From c0aec61f100d60b87765ba649dc8fd542c5d56a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Thu, 14 Mar 2024 13:25:43 +0100 Subject: [PATCH 01/57] Undo workaround with short-circuiting operations (#269) * redo short-circuiting operations * Undo change * Update router/dataplane.go --- router/dataplane.go | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/router/dataplane.go b/router/dataplane.go index 3dad831a0..f9fb9fa94 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -509,15 +509,15 @@ func (d *DataPlane) getInterfaceState(interfaceID uint16) control.InterfaceState // @ defer fold acc(d.Mem(), R5) bfdSessions := d.bfdSessions // @ ghost if bfdSessions != nil { - // @ unfold acc(accBfdSession(d.bfdSessions), R20) - // @ defer fold acc(accBfdSession(d.bfdSessions), R20) + // @ unfold acc(accBfdSession(d.bfdSessions), R20) + // @ defer fold acc(accBfdSession(d.bfdSessions), R20) // @ } - // (VerifiedSCION) had to rewrite this, as Gobra does not correctly - // implement short-circuiting. if bfdSession, ok := bfdSessions[interfaceID]; ok { // @ assert interfaceID in domain(d.bfdSessions) // @ assert bfdSession in range(d.bfdSessions) // @ assert bfdSession != nil + // (VerifiedSCION) This checked used to be conjoined with 'ok' in the condition + // of the if stmt above. We broke it down to perform intermediate asserts. if !bfdSession.IsUp() { return control.InterfaceDown } @@ -1487,12 +1487,9 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, return processResult{}, p.processInterBFD(ohp, pld) /*@, false @*/ } // @ sl.CombineRange_Bytes(ub, start, end, writePerm) - // (VerifiedSCION) Nested if because short-circuiting && is not working - // @ ghost if lastLayerIdx >= 0 { - // @ if !offsets[lastLayerIdx].isNil { - // @ o := offsets[lastLayerIdx] - // @ sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, writePerm) - // @ } + // @ ghost if lastLayerIdx >= 0 && !offsets[lastLayerIdx].isNil { + // @ o := offsets[lastLayerIdx] + // @ sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, writePerm) // @ } // @ assert sl.AbsSlice_Bytes(p.rawPkt, 0, len(p.rawPkt)) // @ unfold acc(p.d.Mem(), _) @@ -1502,12 +1499,9 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, return v1, v2 /*@, aliasesPkt @*/ case scion.PathType: // @ sl.CombineRange_Bytes(ub, start, end, writePerm) - // (VerifiedSCION) Nested if because short-circuiting && is not working - // @ ghost if lastLayerIdx >= 0 { - // @ ghost if !offsets[lastLayerIdx].isNil { - // @ o := offsets[lastLayerIdx] - // @ sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, writePerm) - // @ } + // @ ghost if lastLayerIdx >= 0 && !offsets[lastLayerIdx].isNil { + // @ o := offsets[lastLayerIdx] + // @ sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, writePerm) // @ } // @ assert sl.AbsSlice_Bytes(p.rawPkt, 0, len(p.rawPkt)) v1, v2 /*@ , addrAliasesPkt @*/ := p.processSCION( /*@ p.rawPkt, ub == nil, llStart, llEnd @*/ ) From e1465b5467db620894b2cddb624197c9da99d312 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Sun, 17 Mar 2024 11:56:10 +0100 Subject: [PATCH 02/57] increase timeout for epic (#278) --- .github/workflows/gobra.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/gobra.yml b/.github/workflows/gobra.yml index 277205413..a428649f3 100644 --- a/.github/workflows/gobra.yml +++ b/.github/workflows/gobra.yml @@ -81,7 +81,7 @@ jobs: uses: viperproject/gobra-action@main with: packages: 'pkg/experimental/epic' - timeout: 5m + timeout: 7m headerOnly: ${{ env.headerOnly }} module: ${{ env.module }} includePaths: ${{ env.includePaths }} From f89f635ea3dfbc1f6a290913f1a9d875ddba77e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Mon, 18 Mar 2024 13:51:31 +0100 Subject: [PATCH 03/57] experiment with disabling NL (#265) * experiment with disabling NL * enable the wildcard optimization for when NL is disabled * Apply suggestions from code review --- .github/workflows/gobra.yml | 60 +++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/.github/workflows/gobra.yml b/.github/workflows/gobra.yml index a428649f3..ed10bd5ca 100644 --- a/.github/workflows/gobra.yml +++ b/.github/workflows/gobra.yml @@ -23,6 +23,9 @@ env: mceMode: 'od' requireTriggers: '1' useZ3API: '0' + backend: 'SILICON' + disableNL: '0' + unsafeWildcardOptimization: '1' jobs: verify-deps: @@ -61,6 +64,9 @@ jobs: mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + backend: ${{ env.backend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/addr' uses: viperproject/gobra-action@main with: @@ -77,6 +83,9 @@ jobs: mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + backend: ${{ env.backend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/experimental/epic' uses: viperproject/gobra-action@main with: @@ -92,6 +101,9 @@ jobs: mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + backend: ${{ env.backend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/log' uses: viperproject/gobra-action@main with: @@ -107,6 +119,9 @@ jobs: mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + backend: ${{ env.backend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/private/serrors' uses: viperproject/gobra-action@main with: @@ -122,6 +137,9 @@ jobs: mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + backend: ${{ env.backend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/scrypto' uses: viperproject/gobra-action@main with: @@ -137,6 +155,9 @@ jobs: mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + backend: ${{ env.backend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/slayers' uses: viperproject/gobra-action@main with: @@ -152,6 +173,9 @@ jobs: mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + backend: ${{ env.backend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/slayers/path' uses: viperproject/gobra-action@main with: @@ -167,6 +191,9 @@ jobs: mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + backend: ${{ env.backend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/slayers/path/empty' uses: viperproject/gobra-action@main with: @@ -182,6 +209,9 @@ jobs: mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + backend: ${{ env.backend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/slayers/path/epic' uses: viperproject/gobra-action@main with: @@ -198,6 +228,9 @@ jobs: mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + backend: ${{ env.backend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/slayers/path/onehop' uses: viperproject/gobra-action@main with: @@ -213,6 +246,9 @@ jobs: mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + backend: ${{ env.backend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/slayers/path/scion' uses: viperproject/gobra-action@main with: @@ -228,6 +264,9 @@ jobs: mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + backend: ${{ env.backend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'private/topology' uses: viperproject/gobra-action@main with: @@ -243,6 +282,9 @@ jobs: mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + backend: ${{ env.backend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'private/topology/underlay' uses: viperproject/gobra-action@main with: @@ -258,6 +300,9 @@ jobs: mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + backend: ${{ env.backend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'private/underlay/conn' uses: viperproject/gobra-action@main with: @@ -273,6 +318,9 @@ jobs: mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + backend: ${{ env.backend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'private/underlay/sockctrl' uses: viperproject/gobra-action@main with: @@ -288,6 +336,9 @@ jobs: mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + backend: ${{ env.backend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'router/bfd' uses: viperproject/gobra-action@main with: @@ -303,6 +354,9 @@ jobs: mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + backend: ${{ env.backend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'router/control' uses: viperproject/gobra-action@main with: @@ -318,6 +372,9 @@ jobs: mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + backend: ${{ env.backend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Upload the verification report uses: actions/upload-artifact@v2 with: @@ -347,4 +404,7 @@ jobs: mceMode: 'on' requireTriggers: ${{ env.requireTriggers }} useZ3API: ${{ env.useZ3API }} + disableNL: '1' + backend: ${{ env.backend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} From 99637501ee3cb112d33f4186555daac64918ee4e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Mon, 18 Mar 2024 16:57:34 +0100 Subject: [PATCH 04/57] Drop unnecessary annotations in `Run` (#279) * drop unnecessary annotations * fix precond error * fix verification error * cleanup * fix tiny error --- router/dataplane.go | 66 +++++++++---------------------------- router/dataplane_spec.gobra | 26 +++++++++++++-- 2 files changed, 40 insertions(+), 52 deletions(-) diff --git a/router/dataplane.go b/router/dataplane.go index f9fb9fa94..c2e1589d9 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -778,24 +778,18 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // dPtr as an helper parameter. It always receives the value &d. // @ requires acc(dPtr, _) // @ requires let d := *dPtr in - // @ acc(&d.external, _) && acc(&d.linkTypes, _) && - // @ acc(&d.neighborIAs, _) && acc(&d.internal, _) && - // @ acc(&d.internalIP, _) && acc(&d.internalNextHops, _) && - // @ acc(&d.svc, _) && acc(&d.macFactory, _) && acc(&d.bfdSessions, _) && - // @ acc(&d.localIA, _) && acc(&d.running, _) && acc(&d.Metrics, _) && - // @ acc(&d.forwardingMetrics, _) && acc(&d.key, _) - // @ requires let d := *dPtr in - // @ acc(d.Mem(), _) && d.WellConfigured() - // @ requires let d := *dPtr in d.getValSvc() != nil - // @ requires let d := *dPtr in d.getValForwardingMetrics() != nil - // @ requires let d := *dPtr in (0 in d.getDomForwardingMetrics()) - // @ requires let d := *dPtr in (ingressID in d.getDomForwardingMetrics()) - // @ requires let d := *dPtr in d.macFactory != nil + // @ acc(d.Mem(), _) && + // @ d.WellConfigured() && + // @ d.getValSvc() != nil && + // @ d.getValForwardingMetrics() != nil && + // @ (0 in d.getDomForwardingMetrics()) && + // @ (ingressID in d.getDomForwardingMetrics()) && + // @ d.getMacFactory() != nil // @ requires rd != nil && acc(rd.Mem(), _) // contracts for IO-spec // @ requires dp.Valid() // @ requires let d := *dPtr in - // @ acc(d.Mem(), _) && d.DpAgreesWithSpec(dp) + // @ d.DpAgreesWithSpec(dp) // @ requires acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; func /*@ rc @*/ (ingressID uint16, rd BatchConn, dPtr **DataPlane /*@, ghost ioLock *sync.Mutex, ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/) { // @ ghost ioIngressID := ifsToIO_ifs(ingressID) @@ -846,17 +840,14 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta processor := newPacketProcessor(d, ingressID) var scmpErr /*@@@*/ scmpError + // @ d.getRunningMem() + // @ invariant acc(&scmpErr) // @ invariant forall i int :: { &msgs[i] } 0 <= i && i < len(msgs) ==> // @ msgs[i].Mem() // @ invariant writeMsgInv(writeMsgs) // @ invariant acc(dPtr, _) && *dPtr === d - // @ invariant acc(&d.external, _) && acc(&d.linkTypes, _) && - // @ acc(&d.neighborIAs, _) && acc(&d.internal, _) && - // @ acc(&d.internalIP, _) && acc(&d.internalNextHops, _) && - // @ acc(&d.svc, _) && acc(&d.macFactory, _) && acc(&d.bfdSessions, _) && - // @ acc(&d.localIA, _) && acc(&d.running, _) && acc(&d.Metrics, _) && - // @ acc(&d.forwardingMetrics, _) && acc(&d.key, _) + // @ invariant acc(&d.running, _) // necessary for loop condition // @ invariant acc(d.Mem(), _) && d.WellConfigured() // @ invariant d.getValSvc() != nil // @ invariant d.getValForwardingMetrics() != nil @@ -909,12 +900,6 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ invariant forall i int :: { &msgs[i] } 0 <= i && i < len(msgs) ==> msgs[i].Mem() // @ invariant writeMsgInv(writeMsgs) // @ invariant acc(dPtr, _) && *dPtr === d - // @ invariant acc(&d.external, _) && acc(&d.linkTypes, _) && - // @ acc(&d.neighborIAs, _) && acc(&d.internal, _) && - // @ acc(&d.internalIP, _) && acc(&d.internalNextHops, _) && - // @ acc(&d.svc, _) && acc(&d.macFactory, _) && acc(&d.bfdSessions, _) && - // @ acc(&d.localIA, _) && acc(&d.running, _) && acc(&d.Metrics, _) && - // @ acc(&d.forwardingMetrics, _) && acc(&d.key, _) // @ invariant acc(d.Mem(), _) && d.WellConfigured() // @ invariant d.getValSvc() != nil // @ invariant d.getValForwardingMetrics() != nil @@ -1125,18 +1110,11 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ invariant acc(d.Mem(), _) && d.WellConfigured() // @ invariant externals != nil ==> acc(externals, R4) // @ invariant externals != nil ==> acc(accBatchConn(externals), R4) - // (VerifiedSCION) can we drop a few of these perms? - // @ invariant acc(&d.external, _) && acc(&d.linkTypes, _) && - // @ acc(&d.neighborIAs, _) && acc(&d.internal, _) && - // @ acc(&d.internalIP, _) && acc(&d.internalNextHops, _) && - // @ acc(&d.svc, _) && acc(&d.macFactory, _) && acc(&d.bfdSessions, _) && - // @ acc(&d.localIA, _) && acc(&d.running, _) && acc(&d.Metrics, _) && - // @ acc(&d.forwardingMetrics, _) && acc(&d.key, _) // @ invariant acc(d.Mem(), _) && d.WellConfigured() // @ invariant d.getValSvc() != nil // @ invariant d.getValForwardingMetrics() != nil // @ invariant 0 in d.getDomForwardingMetrics() - // @ invariant d.macFactory != nil + // @ invariant d.getMacFactory() != nil // @ invariant dp.Valid() // @ invariant d.DpAgreesWithSpec(dp) // @ invariant acc(ioLockRun.LockP(), _) && ioLockRun.LockInv() == SharedInv!< dp, ioSharedArgRun !>; @@ -1145,18 +1123,12 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta cl := // @ requires acc(&read, _) && read implements rc // @ requires acc(&d, _) - // @ requires acc(&d.external, _) && acc(&d.linkTypes, _) && - // @ acc(&d.neighborIAs, _) && acc(&d.internal, _) && - // @ acc(&d.internalIP, _) && acc(&d.internalNextHops, _) && - // @ acc(&d.svc, _) && acc(&d.macFactory, _) && acc(&d.bfdSessions, _) && - // @ acc(&d.localIA, _) && acc(&d.running, _) && acc(&d.Metrics, _) && - // @ acc(&d.forwardingMetrics, _) && acc(&d.key, _) // @ requires acc(d.Mem(), _) && d.WellConfigured() // @ requires d.getValSvc() != nil // @ requires d.getValForwardingMetrics() != nil // @ requires 0 in d.getDomForwardingMetrics() // @ requires i in d.getDomForwardingMetrics() - // @ requires d.macFactory != nil + // @ requires d.getMacFactory() != nil // @ requires c != nil && acc(c.Mem(), _) // contracts for IO-spec // @ requires dp.Valid() @@ -1176,17 +1148,11 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta cl := // @ requires acc(&read, _) && read implements rc // @ requires acc(&d, _) - // @ requires acc(&d.external, _) && acc(&d.linkTypes, _) && - // @ acc(&d.neighborIAs, _) && acc(&d.internal, _) && - // @ acc(&d.internalIP, _) && acc(&d.internalNextHops, _) && - // @ acc(&d.svc, _) && acc(&d.macFactory, _) && acc(&d.bfdSessions, _) && - // @ acc(&d.localIA, _) && acc(&d.running, _) && acc(&d.Metrics, _) && - // @ acc(&d.forwardingMetrics, _) && acc(&d.key, _) // @ requires acc(d.Mem(), _) && d.WellConfigured() // @ requires d.getValSvc() != nil // @ requires d.getValForwardingMetrics() != nil // @ requires 0 in d.getDomForwardingMetrics() - // @ requires d.macFactory != nil + // @ requires d.getMacFactory() != nil // @ requires c != nil && acc(c.Mem(), _) // contracts for IO-spec // @ requires dp.Valid() @@ -1196,6 +1162,7 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta defer log.HandlePanic() read(0, c, &d /*@, ioLock, ioSharedArg, dp @*/) //@ as rc } + // @ d.getInternalMem() go cl(d.internal /*@, ioLockRun, ioSharedArgRun, dp @*/) //@ as closure3 d.mtx.Unlock() @@ -1312,8 +1279,7 @@ type processResult struct { OutPkt []byte } -// @ requires acc(&d.macFactory, _) && d.macFactory != nil -// @ requires acc(d.Mem(), _) +// @ requires acc(d.Mem(), _) && d.getMacFactory() != nil // @ ensures res.sInit() && res.sInitD() == d // @ decreases func newPacketProcessor(d *DataPlane, ingressID uint16) (res *scionPacketProcessor) { diff --git a/router/dataplane_spec.gobra b/router/dataplane_spec.gobra index ed9fe6dfa..08b70cb9f 100644 --- a/router/dataplane_spec.gobra +++ b/router/dataplane_spec.gobra @@ -204,6 +204,13 @@ pure func (d *DataPlane) getValForwardingMetrics() map[uint16]forwardingMetrics return unfolding acc(d.Mem(), _) in d.forwardingMetrics } +ghost +requires acc(d.Mem(), _) +decreases +pure func (d *DataPlane) getMacFactory() func() hash.Hash { + return unfolding acc(d.Mem(), _) in d.macFactory +} + ghost requires acc(d.Mem(), _) decreases @@ -319,6 +326,22 @@ func (d *DataPlane) getSvcMem() { unfold acc(d.Mem(), _) } +ghost +requires acc(d.Mem(), _) && d.InternalConnIsSet() +ensures acc(&d.internal, _) && acc(d.internal.Mem(), _) +decreases +func (d *DataPlane) getInternalMem() { + unfold acc(d.Mem(), _) +} + +ghost +requires acc(d.Mem(), _) +ensures acc(&d.running, _) +decreases +func (d *DataPlane) getRunningMem() { + unfold acc(d.Mem(), _) +} + ghost requires acc(d.Mem(), _) decreases @@ -359,8 +382,7 @@ func (d *DataPlane) getMacFactoryMem() { } ghost -requires acc(d.Mem(), _) -requires acc(&d.macFactory, _) && d.macFactory != nil +requires acc(d.Mem(), _) && d.getMacFactory() != nil ensures acc(&d.macFactory, _) && acc(&d.key, _) && acc(d.key, _) ensures acc(sl.AbsSlice_Bytes(*d.key, 0, len(*d.key)), _) ensures scrypto.ValidKeyForHash(*d.key) From fce03a39b047f496aa10f7e10e282f8dd2e94a0f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Mon, 18 Mar 2024 18:21:56 +0100 Subject: [PATCH 05/57] bring changes to the io spec to speed things a little (#281) --- verification/io/io-spec.gobra | 156 ++++++++++++++++++++++------------ 1 file changed, 103 insertions(+), 53 deletions(-) diff --git a/verification/io/io-spec.gobra b/verification/io/io-spec.gobra index b0cc58d78..df46e6ae1 100644 --- a/verification/io/io-spec.gobra +++ b/verification/io/io-spec.gobra @@ -18,8 +18,11 @@ package io -// Unlike the original IO-spec from Isabelle, we need additional information about the network topology. -// To ensure the well-formedness of all map accesses we require an additional conjunction +// called BogusTrigger instead of Unit here because the name Unit is already in use. +type BogusTrigger struct{} + +// Unlike the original IO-spec from Isabelle, we need additional information about the network topology. +// To ensure the well-formedness of all map accesses we require an additional conjunction // for all the events (dp.Valid()) // This is the main IO Specification. @@ -119,20 +122,28 @@ pure func (dp DataPlaneSpec) dp3s_iospec_bio3s_enter_guard(s IO_dp3s_state_local } pred (dp DataPlaneSpec) dp3s_iospec_bio3s_enter(s IO_dp3s_state_local, t Place) { - // TODO: we may need more triggering terms here - forall v IO_val :: { dp.dp3s_iospec_bio3s_enter_guard(s, t, v) } ( + forall v IO_val :: { TriggerBodyIoEnter(v) } ( match v { - case IO_Internal_val1{_, _, ?newpkt, ?nextif}: + case IO_Internal_val1{_, _, ?newpkt, ?nextif}: + // Gobra requires the triggering term to occur inside the qtfier body, + // otherwise we get an error in the call to CBio_IN_bio3s_enter_T. + // We named the variable `_ignored` because using `_` here leads to a strange + // type error. + let _ignored := TriggerBodyIoEnter(v) in (dp.Valid() && dp.dp3s_iospec_bio3s_enter_guard(s, t, v) ==> - (CBio_IN_bio3s_enter(t, v) && - dp.dp3s_iospec_ordered( - dp3s_add_obuf(s, nextif, newpkt), - CBio_IN_bio3s_enter_T(t, v)))) - default: + (CBio_IN_bio3s_enter(t, v) && + dp.dp3s_iospec_ordered( + dp3s_add_obuf(s, nextif, newpkt), + CBio_IN_bio3s_enter_T(t, v)))) + default: true }) } +ghost +decreases +pure func TriggerBodyIoEnter(v IO_val) BogusTrigger { return BogusTrigger{} } + pred CBio_IN_bio3s_xover_up2down(t Place, v IO_val) ghost @@ -146,11 +157,11 @@ requires v.isIO_Internal_val1 requires dp.Valid() decreases pure func (dp DataPlaneSpec) dp3s_iospec_bio3s_xover_up2down_guard(s IO_dp3s_state_local, t Place, v IO_val) bool { - return let currseg := v.IO_Internal_val1_1.CurrSeg in + return let currseg := v.IO_Internal_val1_1.CurrSeg in match v.IO_Internal_val1_1.LeftSeg{ - case none[IO_seg2]: + case none[IO_seg2]: false - default: + default: let nextseg := get(v.IO_Internal_val1_1.LeftSeg) in (!currseg.ConsDir && nextseg.ConsDir && @@ -176,19 +187,28 @@ pure func (dp DataPlaneSpec) dp3s_iospec_bio3s_xover_up2down_guard(s IO_dp3s_sta } pred (dp DataPlaneSpec) dp3s_iospec_bio3s_xover_up2down(s IO_dp3s_state_local, t Place) { - forall v IO_val :: { dp.dp3s_iospec_bio3s_xover_up2down_guard(s, t, v) }{ CBio_IN_bio3s_xover_up2down(t, v) } { dp.dp3s_iospec_ordered(dp3s_add_obuf(s, v.IO_Internal_val1_4, v.IO_Internal_val1_3), dp3s_iospec_bio3s_xover_up2down_T(t, v)) } ( + forall v IO_val :: { TriggerBodyIoXoverUp2Down(v) } ( match v { - case IO_Internal_val1{_, _, ?newpkt, ?nextif}: + case IO_Internal_val1{_, _, ?newpkt, ?nextif}: + // Gobra requires the triggering term to occur inside the qtfier body, + // otherwise we get an error in the call to dp3s_iospec_bio3s_xover_up2down_T. + // We named the variable `_ignored` because using `_` here leads to a strange + // type error. + let _ignored := TriggerBodyIoXoverUp2Down(v) in (dp.Valid() && dp.dp3s_iospec_bio3s_xover_up2down_guard(s, t, v) ==> - (CBio_IN_bio3s_xover_up2down(t, v) && - dp.dp3s_iospec_ordered( - dp3s_add_obuf(s, nextif, newpkt), - dp3s_iospec_bio3s_xover_up2down_T(t, v)))) - default: + (CBio_IN_bio3s_xover_up2down(t, v) && + dp.dp3s_iospec_ordered( + dp3s_add_obuf(s, nextif, newpkt), + dp3s_iospec_bio3s_xover_up2down_T(t, v)))) + default: true }) } +ghost +decreases +pure func TriggerBodyIoXoverUp2Down(v IO_val) BogusTrigger { return BogusTrigger{} } + pred CBio_IN_bio3s_xover_core(t Place, v IO_val) ghost @@ -205,9 +225,9 @@ pure func (dp DataPlaneSpec) dp3s_iospec_bio3s_xover_core_guard(s IO_dp3s_state_ return (dp.Asid() in dp.Core() && let currseg := v.IO_Internal_val1_1.CurrSeg in match v.IO_Internal_val1_1.LeftSeg { - case none[IO_seg2]: + case none[IO_seg2]: false - default: + default: let nextseg := get(v.IO_Internal_val1_1.LeftSeg) in currseg.ConsDir == nextseg.ConsDir && len(nextseg.Future) > 0 && @@ -232,19 +252,28 @@ pure func (dp DataPlaneSpec) dp3s_iospec_bio3s_xover_core_guard(s IO_dp3s_state_ } pred (dp DataPlaneSpec) dp3s_iospec_bio3s_xover_core(s IO_dp3s_state_local, t Place) { - forall v IO_val :: { dp.dp3s_iospec_bio3s_xover_core_guard(s, t, v) }{ CBio_IN_bio3s_xover_core(t, v) }{ dp.dp3s_iospec_ordered(dp3s_add_obuf(s, v.IO_Internal_val1_4, v.IO_Internal_val1_3), dp3s_iospec_bio3s_xover_core_T(t, v)) } ( + forall v IO_val :: { TriggerBodyIoXoverCore(v) } ( match v { - case IO_Internal_val1{_, _, ?newpkt, ?nextif}: + case IO_Internal_val1{_, _, ?newpkt, ?nextif}: + // Gobra requires the triggering term to occur inside the qtfier body, + // otherwise we get an error in the call to dp3s_iospec_bio3s_xover_core_T. + // We named the variable `_ignored` because using `_` here leads to a strange + // type error. + let _ignored := TriggerBodyIoXoverCore(v) in (dp.Valid() && dp.dp3s_iospec_bio3s_xover_core_guard(s, t, v) ==> - (CBio_IN_bio3s_xover_core(t, v) && - dp.dp3s_iospec_ordered( - dp3s_add_obuf(s, nextif, newpkt), - dp3s_iospec_bio3s_xover_core_T(t, v)))) - default: + (CBio_IN_bio3s_xover_core(t, v) && + dp.dp3s_iospec_ordered( + dp3s_add_obuf(s, nextif, newpkt), + dp3s_iospec_bio3s_xover_core_T(t, v)))) + default: true }) } +ghost +decreases +pure func TriggerBodyIoXoverCore(v IO_val) BogusTrigger { return BogusTrigger{} } + pred CBio_IN_bio3s_exit(t Place, v IO_val) ghost @@ -265,19 +294,27 @@ pure func (dp DataPlaneSpec) dp3s_iospec_bio3s_exit_guard(s IO_dp3s_state_local, } pred (dp DataPlaneSpec) dp3s_iospec_bio3s_exit(s IO_dp3s_state_local, t Place) { - forall v IO_val :: { dp.dp3s_iospec_bio3s_exit_guard(s, t, v) }{ CBio_IN_bio3s_exit(t, v) }{ dp.dp3s_iospec_ordered(dp3s_add_obuf(s, some(v.IO_Internal_val2_3), v.IO_Internal_val2_2), dp3s_iospec_bio3s_exit_T(t, v)) } ( + forall v IO_val :: { TriggerBodyIoExit(v) } ( match v { - case IO_Internal_val2{_, ?newpkt, ?nextif}: + case IO_Internal_val2{_, ?newpkt, ?nextif}: + // Gobra requires the triggering term to occur inside the qtfier body, + // otherwise we get an error in the call to dp3s_iospec_bio3s_exit_T. + // We named the variable `_ignored` because using `_` here leads to a strange + // type error. + let _ignored := TriggerBodyIoExit(v) in (dp.Valid() && dp.dp3s_iospec_bio3s_exit_guard(s, t, v) ==> - (CBio_IN_bio3s_exit(t, v) && - dp.dp3s_iospec_ordered( - dp3s_add_obuf(s, some(nextif), newpkt), - dp3s_iospec_bio3s_exit_T(t, v)))) - default: + (CBio_IN_bio3s_exit(t, v) && + dp.dp3s_iospec_ordered( + dp3s_add_obuf(s, some(nextif), newpkt), + dp3s_iospec_bio3s_exit_T(t, v)))) + default: true - }) + }) } +ghost +decreases +pure func TriggerBodyIoExit(v IO_val) BogusTrigger { return BogusTrigger{} } pred CBioIO_bio3s_send(t Place, v IO_val) @@ -296,22 +333,35 @@ pure func (dp DataPlaneSpec) dp3s_iospec_bio3s_send_guard(s IO_dp3s_state_local, (let obuf_set := s.obuf[v.IO_val_Pkt2_1] in (v.IO_val_Pkt2_2 in obuf_set)) } -// TODO: annotate WriteBatch, skipped for now pred (dp DataPlaneSpec) dp3s_iospec_bio3s_send(s IO_dp3s_state_local, t Place) { - forall v IO_val :: { dp.dp3s_iospec_bio3s_send_guard(s, t, v) }{ CBioIO_bio3s_send(t, v) }{ dp.dp3s_iospec_ordered(s, dp3s_iospec_bio3s_send_T(t, v)) }{ CBioIO_bio3s_send(t, v) }{ dp.dp3s_iospec_ordered(s, dp3s_iospec_bio3s_send_T(t, v)) } ( + forall v IO_val :: { TriggerBodyIoSend(v) } ( match v { - case IO_val_Pkt2{_, _}: + case IO_val_Pkt2{_, _}: + // Gobra requires the triggering term to occur inside the qtfier body, + // otherwise we get an error in the call to dp3s_iospec_bio3s_send_T. + // We named the variable `_ignored` because using `_` here leads to a strange + // type error. + let _ignored := TriggerBodyIoSend(v) in (dp.Valid() && dp.dp3s_iospec_bio3s_send_guard(s, t, v) ==> - CBioIO_bio3s_send(t, v) && - dp.dp3s_iospec_ordered(s, dp3s_iospec_bio3s_send_T(t, v))) - case IO_val_Unsupported{_, _}: + CBioIO_bio3s_send(t, v) && + dp.dp3s_iospec_ordered(s, dp3s_iospec_bio3s_send_T(t, v))) + case IO_val_Unsupported{_, _}: + // Gobra requires the triggering term to occur inside the qtfier body, + // otherwise we get an error in the call to dp3s_iospec_bio3s_send_T. + // We named the variable `_ignored` because using `_` here leads to a strange + // type error. + let _ignored := TriggerBodyIoSend(v) in (CBioIO_bio3s_send(t, v) && dp.dp3s_iospec_ordered(s, dp3s_iospec_bio3s_send_T(t, v))) - default: + default: true }) } +ghost +decreases +pure func TriggerBodyIoSend(v IO_val) BogusTrigger { return BogusTrigger{} } + pred CBioIO_bio3s_recv(t Place) ghost @@ -319,7 +369,7 @@ requires CBioIO_bio3s_recv(t) decreases pure func dp3s_iospec_bio3s_recv_T(t Place) Place -// We can safely make this assumption as Isabelle's IO-spec never +// We can safely make this assumption as Isabelle's IO-spec never // receives the other IO values (Unit and Internal). ghost requires CBioIO_bio3s_recv(t) @@ -330,12 +380,12 @@ pure func dp3s_iospec_bio3s_recv_R(t Place) (val IO_val) pred (dp DataPlaneSpec) dp3s_iospec_bio3s_recv(s IO_dp3s_state_local, t Place) { CBioIO_bio3s_recv(t) && (match dp3s_iospec_bio3s_recv_R(t) { - case IO_val_Pkt2{?recvif, ?pkt}: + case IO_val_Pkt2{?recvif, ?pkt}: dp.dp3s_iospec_ordered( dp3s_add_ibuf(s, recvif, pkt), dp3s_iospec_bio3s_recv_T(t)) - case IO_val_Unsupported{_, _}: + case IO_val_Unsupported{_, _}: dp.dp3s_iospec_ordered(s, dp3s_iospec_bio3s_recv_T(t)) - default: + default: dp.dp3s_iospec_ordered(undefined(), dp3s_iospec_bio3s_recv_T(t)) }) } @@ -359,31 +409,31 @@ pred (dp DataPlaneSpec) dp3s_iospec_stop(s IO_dp3s_state_local, t Place) { ghost decreases requires token(t) && CBio_IN_bio3s_enter(t, v) -ensures token(old(CBio_IN_bio3s_enter_T(t, v))) +ensures token(old(CBio_IN_bio3s_enter_T(t, v))) func Enter(ghost t Place, ghost v IO_val) ghost decreases requires token(t) && CBio_IN_bio3s_xover_core(t, v) -ensures token(old(dp3s_iospec_bio3s_xover_core_T(t, v))) +ensures token(old(dp3s_iospec_bio3s_xover_core_T(t, v))) func Xover_core(ghost t Place, ghost v IO_val) ghost decreases requires token(t) && CBio_IN_bio3s_xover_up2down(t, v) -ensures token(old(dp3s_iospec_bio3s_xover_up2down_T(t, v))) +ensures token(old(dp3s_iospec_bio3s_xover_up2down_T(t, v))) func Xover_up2down(ghost t Place, ghost v IO_val) ghost decreases requires token(t) && CBio_IN_bio3s_exit(t, v) -ensures token(old(dp3s_iospec_bio3s_exit_T(t, v))) +ensures token(old(dp3s_iospec_bio3s_exit_T(t, v))) func Exit(ghost t Place, ghost v IO_val) ghost decreases requires token(t) && CBioIO_bio3s_send(t, v) -ensures token(old(dp3s_iospec_bio3s_send_T(t, v))) +ensures token(old(dp3s_iospec_bio3s_send_T(t, v))) func Send(ghost t Place, ghost v IO_val) /** End of helper functions to perfrom BIO operations **/ \ No newline at end of file From 42481464dbdd2d3935746708e4a66563f81fde85 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Mon, 18 Mar 2024 21:23:57 +0100 Subject: [PATCH 06/57] Add config for overflow in the CI (#247) * start overflow checking * backup * fix flags * Apply suggestions from code review * disable checks in all packages for now * Update .github/workflows/gobra.yml --- .github/workflows/gobra.yml | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/.github/workflows/gobra.yml b/.github/workflows/gobra.yml index ed10bd5ca..1a8ba64e4 100644 --- a/.github/workflows/gobra.yml +++ b/.github/workflows/gobra.yml @@ -26,6 +26,7 @@ env: backend: 'SILICON' disableNL: '0' unsafeWildcardOptimization: '1' + overflow: '0' jobs: verify-deps: @@ -63,6 +64,7 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} backend: ${{ env.backend }} @@ -82,6 +84,7 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} backend: ${{ env.backend }} @@ -100,6 +103,7 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} backend: ${{ env.backend }} @@ -118,6 +122,7 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} backend: ${{ env.backend }} @@ -136,6 +141,7 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} backend: ${{ env.backend }} @@ -154,6 +160,7 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} backend: ${{ env.backend }} @@ -172,6 +179,7 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} backend: ${{ env.backend }} @@ -190,6 +198,7 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} backend: ${{ env.backend }} @@ -208,6 +217,7 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} backend: ${{ env.backend }} @@ -227,6 +237,7 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} backend: ${{ env.backend }} @@ -245,6 +256,7 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} backend: ${{ env.backend }} @@ -263,6 +275,7 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} backend: ${{ env.backend }} @@ -281,6 +294,7 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} backend: ${{ env.backend }} @@ -299,6 +313,7 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} backend: ${{ env.backend }} @@ -317,6 +332,7 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} backend: ${{ env.backend }} @@ -335,6 +351,7 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} backend: ${{ env.backend }} @@ -353,6 +370,7 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} backend: ${{ env.backend }} @@ -371,6 +389,7 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} backend: ${{ env.backend }} @@ -403,6 +422,7 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: 'on' requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: '1' backend: ${{ env.backend }} From c6db8fd6ee68f3ae15ed73b83fee60c86b113a29 Mon Sep 17 00:00:00 2001 From: Dionysios Spiliopoulos <32896454+Dspil@users.noreply.github.com> Date: Mon, 25 Mar 2024 15:57:49 +0100 Subject: [PATCH 07/57] Reduce permission amount to buffer for decodeFromLayers (#285) * R40 * epic * extn * onehop * reduce permission amount of decode layers --- pkg/slayers/extn.go | 32 +++++------ pkg/slayers/path/epic/epic.go | 54 +++++++++---------- pkg/slayers/path/onehop/onehop.go | 26 ++++----- pkg/slayers/path/path.go | 4 +- pkg/slayers/path/scion/raw.go | 2 +- pkg/slayers/scion.go | 42 +++++++-------- pkg/slayers/scmp.go | 30 +++++------ router/dataplane.go | 10 ++-- .../github.com/google/gopacket/parser.gobra | 2 +- 9 files changed, 101 insertions(+), 101 deletions(-) diff --git a/pkg/slayers/extn.go b/pkg/slayers/extn.go index 499ada75c..08a7701e7 100644 --- a/pkg/slayers/extn.go +++ b/pkg/slayers/extn.go @@ -99,7 +99,7 @@ func (o *tlvOption) serializeTo(data []byte, fixLengths bool) { } // @ requires 1 <= len(data) -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R20) +// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R41) // @ ensures err == nil ==> acc(res) // @ ensures (err == nil && res.OptType != OptTypePad1) ==> ( // @ 2 <= res.ActualLength && res.ActualLength <= len(data) && res.OptData === data[2:res.ActualLength]) @@ -107,8 +107,8 @@ func (o *tlvOption) serializeTo(data []byte, fixLengths bool) { // @ ensures err != nil ==> err.ErrorMem() // @ decreases func decodeTLVOption(data []byte) (res *tlvOption, err error) { - // @ unfold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R20) - // @ defer fold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R20) + // @ unfold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R41) + // @ defer fold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R41) o := &tlvOption{OptType: OptionType(data[0])} if OptionType(data[0]) == OptTypePad1 { o.ActualLength = 1 @@ -241,7 +241,7 @@ func (e *extnBase) serializeToWithTLVOptions(b gopacket.SerializeBuffer, // @ requires df != nil // @ preserves df.Mem() -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R20) +// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) // @ ensures resErr != nil ==> resErr.ErrorMem() // The following poscondition is more a lot more complicated than it would be if the return type // was *extnBase instead of extnBase @@ -259,10 +259,10 @@ func decodeExtnBase(data []byte, df gopacket.DecodeFeedback) (res extnBase, resE len(data))) } - // @ unfold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R20) + // @ unfold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) e.NextHdr = L4ProtocolType(data[0]) e.ExtLen = data[1] - // @ fold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R20) + // @ fold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) e.ActualLen = (int(e.ExtLen) + 1) * LineLen if len(data) < e.ActualLen { return extnBase{}, serrors.New(fmt.Sprintf("invalid extension header. "+ @@ -346,7 +346,7 @@ func (h *HopByHopExtn) SerializeTo(b gopacket.SerializeBuffer, // @ requires h.NonInitMem() // @ requires df != nil // @ preserves df.Mem() -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R20) +// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) // @ ensures res == nil ==> h.Mem(data) // @ ensures res != nil ==> (h.NonInitMem() && res.ErrorMem()) // @ decreases @@ -373,14 +373,14 @@ func (h *HopByHopExtn) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) // @ invariant len(h.Options) == lenOptions // @ invariant forall i int :: { &h.Options[i] } 0 <= i && i < lenOptions ==> // @ (acc(&h.Options[i]) && h.Options[i].Mem(i)) - // @ invariant acc(sl.AbsSlice_Bytes(data, 0, len(data)), R20) + // @ invariant acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) // @ invariant h.BaseLayer.Contents === data[:h.ActualLen] // @ invariant h.BaseLayer.Payload === data[h.ActualLen:] // @ decreases h.ActualLen - offset for offset < h.ActualLen { - // @ sl.SplitRange_Bytes(data, offset, h.ActualLen, R20) + // @ sl.SplitRange_Bytes(data, offset, h.ActualLen, R40) opt, err := decodeTLVOption(data[offset:h.ActualLen]) - // @ sl.CombineRange_Bytes(data, offset, h.ActualLen, R20) + // @ sl.CombineRange_Bytes(data, offset, h.ActualLen, R40) if err != nil { // @ fold h.NonInitMem() return err @@ -478,7 +478,7 @@ func (e *EndToEndExtn) LayerPayload( /*@ ghost ub []byte @*/ ) (res []byte /*@ , // @ requires e.NonInitMem() // @ requires df != nil // @ preserves df.Mem() -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R20) +// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) // @ ensures res == nil ==> e.Mem(data) // @ ensures res != nil ==> (e.NonInitMem() && res.ErrorMem()) // @ decreases @@ -505,14 +505,14 @@ func (e *EndToEndExtn) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) // @ invariant len(e.Options) == lenOptions // @ invariant forall i int :: { &e.Options[i] } 0 <= i && i < lenOptions ==> // @ (acc(&e.Options[i]) && e.Options[i].Mem(i)) - // @ invariant acc(sl.AbsSlice_Bytes(data, 0, len(data)), R20) + // @ invariant acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) // @ invariant e.BaseLayer.Contents === data[:e.ActualLen] // @ invariant e.BaseLayer.Payload === data[e.ActualLen:] // @ decreases e.ActualLen - offset for offset < e.ActualLen { - // @ sl.SplitRange_Bytes(data, offset, e.ActualLen, R20) + // @ sl.SplitRange_Bytes(data, offset, e.ActualLen, R40) opt, err := decodeTLVOption(data[offset:e.ActualLen]) - // @ sl.CombineRange_Bytes(data, offset, e.ActualLen, R20) + // @ sl.CombineRange_Bytes(data, offset, e.ActualLen, R40) if err != nil { // @ fold e.NonInitMem() return err @@ -600,7 +600,7 @@ type HopByHopExtnSkipper struct { // DecodeFromBytes implementation according to gopacket.DecodingLayer // @ requires s.NonInitMem() // @ requires df != nil -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R20) +// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) // @ preserves df.Mem() // @ ensures res == nil ==> s.Mem(data) // @ ensures res != nil ==> (s.NonInitMem() && res.ErrorMem()) @@ -656,7 +656,7 @@ type EndToEndExtnSkipper struct { // @ requires s.NonInitMem() // @ requires df != nil // @ preserves df.Mem() -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R20) +// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) // @ ensures res == nil ==> s.Mem(data) // @ ensures res != nil ==> (s.NonInitMem() && res.ErrorMem()) // @ decreases diff --git a/pkg/slayers/path/epic/epic.go b/pkg/slayers/path/epic/epic.go index f8b1a3c08..32c57f596 100644 --- a/pkg/slayers/path/epic/epic.go +++ b/pkg/slayers/path/epic/epic.go @@ -138,7 +138,7 @@ func (p *Path) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { // DecodeFromBytes deserializes the buffer b into the Path. On failure, an error is returned, // otherwise SerializeTo will return nil. // @ requires p.NonInitMem() -// @ preserves slices.AbsSlice_Bytes(b, 0, len(b)) +// @ preserves acc(slices.AbsSlice_Bytes(b, 0, len(b)), R40) // @ ensures len(b) < MetadataLen ==> r != nil // @ ensures r == nil ==> p.Mem(b) // @ ensures r != nil ==> p.NonInitMem() && r.ErrorMem() @@ -148,8 +148,8 @@ func (p *Path) DecodeFromBytes(b []byte) (r error) { return serrors.New("EPIC Path raw too short", "expected", int(MetadataLen), "actual", int(len(b))) } //@ unfold p.NonInitMem() - //@ slices.SplitByIndex_Bytes(b, 0, len(b), PktIDLen, writePerm) - //@ preserves slices.AbsSlice_Bytes(b, 0, PktIDLen) + //@ slices.SplitByIndex_Bytes(b, 0, len(b), PktIDLen, R40) + //@ preserves acc(slices.AbsSlice_Bytes(b, 0, PktIDLen), R40) //@ preserves acc(&p.PktID) //@ preserves acc(&p.PHVF) //@ preserves acc(&p.LHVF) @@ -159,56 +159,56 @@ func (p *Path) DecodeFromBytes(b []byte) (r error) { //@ ensures slices.AbsSlice_Bytes(p.LHVF, 0, len(p.LHVF)) //@ decreases //@ outline( - //@ ghost slices.Reslice_Bytes(b, 0, PktIDLen, writePerm) + //@ ghost slices.Reslice_Bytes(b, 0, PktIDLen, R40) p.PktID.DecodeFromBytes(b[:PktIDLen]) p.PHVF = make([]byte, HVFLen) p.LHVF = make([]byte, HVFLen) //@ fold slices.AbsSlice_Bytes(p.PHVF, 0, len(p.PHVF)) //@ fold slices.AbsSlice_Bytes(p.LHVF, 0, len(p.LHVF)) - //@ slices.Unslice_Bytes(b, 0, PktIDLen, writePerm) + //@ slices.Unslice_Bytes(b, 0, PktIDLen, R40) //@ ) - //@ slices.SplitByIndex_Bytes(b, PktIDLen, len(b), PktIDLen+HVFLen, writePerm) + //@ slices.SplitByIndex_Bytes(b, PktIDLen, len(b), PktIDLen+HVFLen, R40) //@ preserves acc(&p.PHVF) //@ preserves slices.AbsSlice_Bytes(p.PHVF, 0, len(p.PHVF)) - //@ preserves slices.AbsSlice_Bytes(b, PktIDLen, PktIDLen + HVFLen) + //@ preserves acc(slices.AbsSlice_Bytes(b, PktIDLen, PktIDLen + HVFLen), R40) //@ decreases //@ outline( - //@ slices.Reslice_Bytes(b, PktIDLen, PktIDLen+HVFLen, writePerm) + //@ slices.Reslice_Bytes(b, PktIDLen, PktIDLen+HVFLen, R40) //@ unfold slices.AbsSlice_Bytes(p.PHVF, 0, len(p.PHVF)) - //@ unfold acc(slices.AbsSlice_Bytes(b[PktIDLen:(PktIDLen+HVFLen)], 0, HVFLen), R1) - copy(p.PHVF, b[PktIDLen:(PktIDLen+HVFLen)] /*@, R1 @*/) - //@ fold acc(slices.AbsSlice_Bytes(b[PktIDLen:(PktIDLen+HVFLen)], 0, HVFLen), R1) + //@ unfold acc(slices.AbsSlice_Bytes(b[PktIDLen:(PktIDLen+HVFLen)], 0, HVFLen), R41) + copy(p.PHVF, b[PktIDLen:(PktIDLen+HVFLen)] /*@, R41 @*/) + //@ fold acc(slices.AbsSlice_Bytes(b[PktIDLen:(PktIDLen+HVFLen)], 0, HVFLen), R41) //@ fold slices.AbsSlice_Bytes(p.PHVF, 0, len(p.PHVF)) - //@ slices.Unslice_Bytes(b, PktIDLen, PktIDLen+HVFLen, writePerm) + //@ slices.Unslice_Bytes(b, PktIDLen, PktIDLen+HVFLen, R40) //@ ) - //@ slices.CombineAtIndex_Bytes(b, 0, PktIDLen+HVFLen, PktIDLen, writePerm) - //@ slices.SplitByIndex_Bytes(b, PktIDLen+HVFLen, len(b), MetadataLen, writePerm) + //@ slices.CombineAtIndex_Bytes(b, 0, PktIDLen+HVFLen, PktIDLen, R40) + //@ slices.SplitByIndex_Bytes(b, PktIDLen+HVFLen, len(b), MetadataLen, R40) //@ preserves acc(&p.LHVF) //@ preserves slices.AbsSlice_Bytes(p.LHVF, 0, len(p.LHVF)) - //@ preserves slices.AbsSlice_Bytes(b, PktIDLen+HVFLen, MetadataLen) + //@ preserves acc(slices.AbsSlice_Bytes(b, PktIDLen+HVFLen, MetadataLen), R40) //@ decreases //@ outline( - //@ slices.Reslice_Bytes(b, PktIDLen+HVFLen, MetadataLen, writePerm) + //@ slices.Reslice_Bytes(b, PktIDLen+HVFLen, MetadataLen, R40) //@ unfold slices.AbsSlice_Bytes(p.LHVF, 0, len(p.LHVF)) - //@ unfold acc(slices.AbsSlice_Bytes(b[PktIDLen+HVFLen:MetadataLen], 0, HVFLen), R1) - copy(p.LHVF, b[(PktIDLen+HVFLen):MetadataLen] /*@, R1 @*/) - //@ fold acc(slices.AbsSlice_Bytes(b[PktIDLen+HVFLen:MetadataLen], 0, HVFLen), R1) + //@ unfold acc(slices.AbsSlice_Bytes(b[PktIDLen+HVFLen:MetadataLen], 0, HVFLen), R41) + copy(p.LHVF, b[(PktIDLen+HVFLen):MetadataLen] /*@, R41 @*/) + //@ fold acc(slices.AbsSlice_Bytes(b[PktIDLen+HVFLen:MetadataLen], 0, HVFLen), R41) //@ fold slices.AbsSlice_Bytes(p.LHVF, 0, len(p.LHVF)) - //@ slices.Unslice_Bytes(b, PktIDLen+HVFLen, MetadataLen, writePerm) + //@ slices.Unslice_Bytes(b, PktIDLen+HVFLen, MetadataLen, R40) //@ ) - //@ slices.CombineAtIndex_Bytes(b, 0, MetadataLen, PktIDLen+HVFLen, writePerm) + //@ slices.CombineAtIndex_Bytes(b, 0, MetadataLen, PktIDLen+HVFLen, R40) p.ScionPath = &scion.Raw{} //@ fold p.ScionPath.Base.NonInitMem() //@ fold p.ScionPath.NonInitMem() - //@ slices.Reslice_Bytes(b, MetadataLen, len(b), writePerm) + //@ slices.Reslice_Bytes(b, MetadataLen, len(b), R40) ret := p.ScionPath.DecodeFromBytes(b[MetadataLen:]) //@ ghost if ret == nil { //@ fold p.Mem(b) //@ } else { //@ fold p.NonInitMem() //@ } - //@ slices.Unslice_Bytes(b, MetadataLen, len(b), writePerm) - //@ slices.CombineAtIndex_Bytes(b, 0, len(b), MetadataLen, writePerm) + //@ slices.Unslice_Bytes(b, MetadataLen, len(b), R40) + //@ slices.CombineAtIndex_Bytes(b, 0, len(b), MetadataLen, R40) return ret } @@ -278,17 +278,17 @@ type PktID struct { // DecodeFromBytes deserializes the buffer (raw) into the PktID. // @ requires len(raw) >= PktIDLen // @ preserves acc(i) -// @ preserves acc(slices.AbsSlice_Bytes(raw, 0, len(raw)), R1) +// @ preserves acc(slices.AbsSlice_Bytes(raw, 0, len(raw)), R41) // @ ensures 0 <= i.Timestamp // @ ensures 0 <= i.Counter // @ decreases func (i *PktID) DecodeFromBytes(raw []byte) { - //@ unfold acc(slices.AbsSlice_Bytes(raw, 0, len(raw)), R1) + //@ unfold acc(slices.AbsSlice_Bytes(raw, 0, len(raw)), R41) //@ assert forall i int :: { &raw[:4][i] } 0 <= i && i < 4 ==> &raw[:4][i] == &raw[i] i.Timestamp = binary.BigEndian.Uint32(raw[:4]) //@ assert forall i int :: { &raw[4:8][i] } 0 <= i && i < 4 ==> &raw[4:8][i] == &raw[4 + i] i.Counter = binary.BigEndian.Uint32(raw[4:8]) - //@ fold acc(slices.AbsSlice_Bytes(raw, 0, len(raw)), R1) + //@ fold acc(slices.AbsSlice_Bytes(raw, 0, len(raw)), R41) } // SerializeTo serializes the PktID into the buffer (b). diff --git a/pkg/slayers/path/onehop/onehop.go b/pkg/slayers/path/onehop/onehop.go index bd0f626f9..200013ace 100644 --- a/pkg/slayers/path/onehop/onehop.go +++ b/pkg/slayers/path/onehop/onehop.go @@ -66,7 +66,7 @@ type Path struct { } // @ requires o.NonInitMem() -// @ preserves slices.AbsSlice_Bytes(data, 0, len(data)) +// @ preserves acc(slices.AbsSlice_Bytes(data, 0, len(data)), R40) // @ ensures (len(data) >= PathLen) == (r == nil) // @ ensures r == nil ==> o.Mem(data) // @ ensures r != nil ==> o.NonInitMem() @@ -79,29 +79,29 @@ func (o *Path) DecodeFromBytes(data []byte) (r error) { } offset := 0 //@ unfold o.NonInitMem() - //@ slices.SplitByIndex_Bytes(data, 0, len(data), path.InfoLen, R1) - //@ slices.Reslice_Bytes(data, 0, path.InfoLen, R1) + //@ slices.SplitByIndex_Bytes(data, 0, len(data), path.InfoLen, R41) + //@ slices.Reslice_Bytes(data, 0, path.InfoLen, R41) if err := o.Info.DecodeFromBytes(data[:path.InfoLen]); err != nil { // @ Unreachable() return err } - //@ slices.Unslice_Bytes(data, 0, path.InfoLen, R1) + //@ slices.Unslice_Bytes(data, 0, path.InfoLen, R41) offset += path.InfoLen - //@ slices.SplitByIndex_Bytes(data, offset, len(data), offset+path.HopLen, R1) - //@ slices.Reslice_Bytes(data, offset, offset+path.HopLen, R1) + //@ slices.SplitByIndex_Bytes(data, offset, len(data), offset+path.HopLen, R41) + //@ slices.Reslice_Bytes(data, offset, offset+path.HopLen, R41) if err := o.FirstHop.DecodeFromBytes(data[offset : offset+path.HopLen]); err != nil { // @ Unreachable() return err } - //@ slices.Unslice_Bytes(data, offset, offset+path.HopLen, R1) - //@ slices.CombineAtIndex_Bytes(data, 0, offset+path.HopLen, offset, R1) + //@ slices.Unslice_Bytes(data, offset, offset+path.HopLen, R41) + //@ slices.CombineAtIndex_Bytes(data, 0, offset+path.HopLen, offset, R41) offset += path.HopLen - //@ slices.SplitByIndex_Bytes(data, offset, len(data), offset+path.HopLen, R1) - //@ slices.Reslice_Bytes(data, offset, offset+path.HopLen, R1) + //@ slices.SplitByIndex_Bytes(data, offset, len(data), offset+path.HopLen, R41) + //@ slices.Reslice_Bytes(data, offset, offset+path.HopLen, R41) r = o.SecondHop.DecodeFromBytes(data[offset : offset+path.HopLen]) - //@ slices.Unslice_Bytes(data, offset, offset+path.HopLen, R1) - //@ slices.CombineAtIndex_Bytes(data, offset, len(data), offset+path.HopLen, R1) - //@ slices.CombineAtIndex_Bytes(data, 0, len(data), offset, R1) + //@ slices.Unslice_Bytes(data, offset, offset+path.HopLen, R41) + //@ slices.CombineAtIndex_Bytes(data, offset, len(data), offset+path.HopLen, R41) + //@ slices.CombineAtIndex_Bytes(data, 0, len(data), offset, R41) //@ ghost if r == nil { fold o.Mem(data) } else { fold o.NonInitMem() } return r } diff --git a/pkg/slayers/path/path.go b/pkg/slayers/path/path.go index 7b03e90e8..c332ce27b 100644 --- a/pkg/slayers/path/path.go +++ b/pkg/slayers/path/path.go @@ -81,7 +81,7 @@ type Path interface { // (VerifiedSCION) There are implementations of this interface (e.g., scion.Raw) that // store b and use it as internal data. //@ requires NonInitMem() - //@ preserves sl.AbsSlice_Bytes(b, 0, len(b)) + //@ preserves acc(sl.AbsSlice_Bytes(b, 0, len(b)), R40) //@ ensures err == nil ==> Mem(b) //@ ensures err != nil ==> err.ErrorMem() //@ ensures err != nil ==> NonInitMem() @@ -219,7 +219,7 @@ func (p *rawPath) SerializeTo(b []byte /*@, ghost underlyingBuf []byte @*/) (e e } // @ requires p.NonInitMem() -// @ preserves sl.AbsSlice_Bytes(b, 0, len(b)) +// @ preserves acc(sl.AbsSlice_Bytes(b, 0, len(b)), R40) // @ ensures p.Mem(b) // @ ensures e == nil // @ decreases diff --git a/pkg/slayers/path/scion/raw.go b/pkg/slayers/path/scion/raw.go index 68dd61154..825115cd7 100644 --- a/pkg/slayers/path/scion/raw.go +++ b/pkg/slayers/path/scion/raw.go @@ -34,7 +34,7 @@ type Raw struct { // DecodeFromBytes only decodes the PathMetaHeader. Otherwise the nothing is decoded and simply kept // as raw bytes. // @ requires s.NonInitMem() -// @ preserves sl.AbsSlice_Bytes(data, 0, len(data)) +// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) // @ ensures res == nil ==> s.Mem(data) // @ ensures res != nil ==> (s.NonInitMem() && res.ErrorMem()) // @ decreases diff --git a/pkg/slayers/scion.go b/pkg/slayers/scion.go index 0e44da293..77f5d1aac 100644 --- a/pkg/slayers/scion.go +++ b/pkg/slayers/scion.go @@ -317,7 +317,7 @@ func (s *SCION) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeO // data, so care should be taken to copy it first should later modification of data be required // before the SCION layer is discarded. // @ requires s.NonInitMem() -// @ preserves sl.AbsSlice_Bytes(data, 0, len(data)) +// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) // @ preserves df != nil && df.Mem() // @ ensures res == nil ==> s.Mem(data) // @ ensures res != nil ==> s.NonInitMem() && res.ErrorMem() @@ -329,27 +329,27 @@ func (s *SCION) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res er return serrors.New("packet is shorter than the common header length", "min", CmnHdrLen, "actual", len(data)) } - // @ sl.SplitRange_Bytes(data, 0, 4, R15) - // @ preserves 4 <= len(data) && acc(sl.AbsSlice_Bytes(data[:4], 0, 4), R15) + // @ sl.SplitRange_Bytes(data, 0, 4, R41) + // @ preserves 4 <= len(data) && acc(sl.AbsSlice_Bytes(data[:4], 0, 4), R41) // @ decreases // @ outline( - // @ unfold acc(sl.AbsSlice_Bytes(data[:4], 0, 4), R15) + // @ unfold acc(sl.AbsSlice_Bytes(data[:4], 0, 4), R41) firstLine := binary.BigEndian.Uint32(data[:4]) - // @ fold acc(sl.AbsSlice_Bytes(data[:4], 0, 4), R15) + // @ fold acc(sl.AbsSlice_Bytes(data[:4], 0, 4), R41) // @ ) - // @ sl.CombineRange_Bytes(data, 0, 4, R15) + // @ sl.CombineRange_Bytes(data, 0, 4, R41) // @ unfold s.NonInitMem() s.Version = uint8(firstLine >> 28) s.TrafficClass = uint8((firstLine >> 20) & 0xFF) s.FlowID = firstLine & 0xFFFFF // @ preserves acc(&s.NextHdr) && acc(&s.HdrLen) && acc(&s.PayloadLen) && acc(&s.PathType) // @ preserves acc(&s.DstAddrType) && acc(&s.SrcAddrType) - // @ preserves CmnHdrLen <= len(data) && acc(sl.AbsSlice_Bytes(data, 0, len(data)), R15) + // @ preserves CmnHdrLen <= len(data) && acc(sl.AbsSlice_Bytes(data, 0, len(data)), R41) // @ ensures s.DstAddrType.Has3Bits() && s.SrcAddrType.Has3Bits() // @ ensures 0 <= s.PathType && s.PathType < 256 // @ decreases // @ outline( - // @ unfold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R15) + // @ unfold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R41) s.NextHdr = L4ProtocolType(data[4]) s.HdrLen = data[5] // @ assert &data[6:8][0] == &data[6] && &data[6:8][1] == &data[7] @@ -361,20 +361,20 @@ func (s *SCION) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res er // @ assert int(s.DstAddrType) == b.BitAnd7(int(data[9] >> 4)) s.SrcAddrType = AddrType(data[9] & 0x7) // @ assert int(s.SrcAddrType) == b.BitAnd7(int(data[9])) - // @ fold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R15) + // @ fold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R41) // @ ) // Decode address header. - // @ sl.SplitByIndex_Bytes(data, 0, len(data), CmnHdrLen, R5) - // @ sl.Reslice_Bytes(data, CmnHdrLen, len(data), R5) + // @ sl.SplitByIndex_Bytes(data, 0, len(data), CmnHdrLen, R41) + // @ sl.Reslice_Bytes(data, CmnHdrLen, len(data), R41) if err := s.DecodeAddrHdr(data[CmnHdrLen:]); err != nil { // @ fold s.NonInitMem() - // @ sl.Unslice_Bytes(data, CmnHdrLen, len(data), R5) - // @ sl.CombineAtIndex_Bytes(data, 0, len(data), CmnHdrLen, R5) + // @ sl.Unslice_Bytes(data, CmnHdrLen, len(data), R41) + // @ sl.CombineAtIndex_Bytes(data, 0, len(data), CmnHdrLen, R41) df.SetTruncated() return err } - // @ sl.Unslice_Bytes(data, CmnHdrLen, len(data), R5) - // @ sl.CombineAtIndex_Bytes(data, 0, len(data), CmnHdrLen, R5) + // @ sl.Unslice_Bytes(data, CmnHdrLen, len(data), R41) + // @ sl.CombineAtIndex_Bytes(data, 0, len(data), CmnHdrLen, R41) // (VerifiedSCION) the first ghost parameter to AddrHdrLen is ignored when the second // is set to nil. As such, we pick the easiest possible value as a placeholder. addrHdrLen := s.AddrHdrLen( /*@ nil, true @*/ ) @@ -404,10 +404,10 @@ func (s *SCION) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res er // @ fold s.NonInitMem() return err } - // @ sl.SplitRange_Bytes(data, offset, offset+pathLen, writePerm) + // @ sl.SplitRange_Bytes(data, offset, offset+pathLen, R40) err = s.Path.DecodeFromBytes(data[offset : offset+pathLen]) if err != nil { - // @ sl.CombineRange_Bytes(data, offset, offset+pathLen, writePerm) + // @ sl.CombineRange_Bytes(data, offset, offset+pathLen, R40) // @ unfold s.HeaderMem(data[CmnHdrLen:]) // @ s.PathPoolMemExchange(s.PathType, s.Path) // @ fold s.NonInitMem() @@ -421,7 +421,7 @@ func (s *SCION) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res er s.Contents = data[:hdrBytes] s.Payload = data[hdrBytes:] - // @ sl.CombineRange_Bytes(data, offset, offset+pathLen, writePerm) + // @ sl.CombineRange_Bytes(data, offset, offset+pathLen, R40) // @ fold s.Mem(data) return nil @@ -898,7 +898,7 @@ func (s *SCION) SerializeAddrHdr(buf []byte /*@ , ghost ubuf []byte @*/) (err er // @ requires acc(&s.SrcAddrType, HalfPerm) && s.SrcAddrType.Has3Bits() // @ requires acc(&s.DstAddrType, HalfPerm) && s.DstAddrType.Has3Bits() // @ requires acc(&s.RawSrcAddr) && acc(&s.RawDstAddr) -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R10) +// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R41) // @ ensures res == nil ==> s.HeaderMem(data) // @ ensures res != nil ==> res.ErrorMem() // @ ensures res != nil ==> ( @@ -913,13 +913,13 @@ func (s *SCION) DecodeAddrHdr(data []byte) (res error) { "actual", len(data)) } offset := 0 - // @ unfold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R10) + // @ unfold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R41) // @ assert forall i int :: { &data[offset:][i] }{ &data[i] } 0 <= i && i < l ==> &data[offset:][i] == &data[i] s.DstIA = addr.IA(binary.BigEndian.Uint64(data[offset:])) offset += addr.IABytes // @ assert forall i int :: { &data[offset:][i] } 0 <= i && i < l ==> &data[offset:][i] == &data[offset+i] s.SrcIA = addr.IA(binary.BigEndian.Uint64(data[offset:])) - // @ fold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R10) + // @ fold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R41) offset += addr.IABytes dstAddrBytes := s.DstAddrType.Length() srcAddrBytes := s.SrcAddrType.Length() diff --git a/pkg/slayers/scmp.go b/pkg/slayers/scmp.go index 9f434b6c9..e74fe6656 100644 --- a/pkg/slayers/scmp.go +++ b/pkg/slayers/scmp.go @@ -197,7 +197,7 @@ func (s *SCMP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOp // DecodeFromBytes decodes the given bytes into this layer. // @ requires df != nil -// @ preserves slices.AbsSlice_Bytes(data, 0, len(data)) +// @ preserves acc(slices.AbsSlice_Bytes(data, 0, len(data)), R40) // @ requires s.NonInitMem() // @ preserves df.Mem() // @ ensures res == nil ==> s.Mem(data) @@ -210,31 +210,31 @@ func (s *SCMP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res err } // @ unfold s.NonInitMem() // @ requires len(data) >= 4 - // @ requires slices.AbsSlice_Bytes(data, 0, len(data)) + // @ requires acc(slices.AbsSlice_Bytes(data, 0, len(data)), R40) // @ preserves acc(&s.TypeCode) - // @ ensures slices.AbsSlice_Bytes(data, 2, len(data)) - // @ ensures slices.AbsSlice_Bytes(data, 0, 2) + // @ ensures acc(slices.AbsSlice_Bytes(data, 2, len(data)), R40) + // @ ensures acc(slices.AbsSlice_Bytes(data, 0, 2), R40) // @ decreases // @ outline ( - // @ slices.SplitByIndex_Bytes(data, 0, len(data), 2, writePerm) - // @ unfold slices.AbsSlice_Bytes(data, 0, 2) + // @ slices.SplitByIndex_Bytes(data, 0, len(data), 2, R40) + // @ unfold acc(slices.AbsSlice_Bytes(data, 0, 2), R40) s.TypeCode = CreateSCMPTypeCode(SCMPType(data[0]), SCMPCode(data[1])) - // @ fold slices.AbsSlice_Bytes(data, 0, 2) + // @ fold acc(slices.AbsSlice_Bytes(data, 0, 2), R40) // @ ) // @ requires len(data) >= 4 - // @ requires slices.AbsSlice_Bytes(data, 0, 2) - // @ requires slices.AbsSlice_Bytes(data, 2, len(data)) + // @ requires acc(slices.AbsSlice_Bytes(data, 0, 2), R40) + // @ requires acc(slices.AbsSlice_Bytes(data, 2, len(data)), R40) // @ preserves acc(&s.Checksum) - // @ ensures slices.AbsSlice_Bytes(data, 0, len(data)) + // @ ensures acc(slices.AbsSlice_Bytes(data, 0, len(data)), R40) // @ decreases // @ outline ( - // @ slices.SplitByIndex_Bytes(data, 2, len(data), 4, writePerm) - // @ unfold slices.AbsSlice_Bytes(data, 2, 4) + // @ slices.SplitByIndex_Bytes(data, 2, len(data), 4, R40) + // @ unfold acc(slices.AbsSlice_Bytes(data, 2, 4), R40) // @ assert forall i int :: { &data[2:4][i] } 0 <= i && i < 2 ==> &data[2 + i] == &data[2:4][i] s.Checksum = binary.BigEndian.Uint16(data[2:4]) - // @ fold slices.AbsSlice_Bytes(data, 2, 4) - // @ slices.CombineAtIndex_Bytes(data, 0, 4, 2, writePerm) - // @ slices.CombineAtIndex_Bytes(data, 0, len(data), 4, writePerm) + // @ fold acc(slices.AbsSlice_Bytes(data, 2, 4), R40) + // @ slices.CombineAtIndex_Bytes(data, 0, 4, 2, R40) + // @ slices.CombineAtIndex_Bytes(data, 0, len(data), 4, R40) // @ ) s.BaseLayer = BaseLayer{Contents: data[:4], Payload: data[4:]} // @ fold s.BaseLayer.Mem(data, 4) diff --git a/router/dataplane.go b/router/dataplane.go index c2e1589d9..168b9ac58 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -3498,7 +3498,7 @@ func (p *scionPacketProcessor) prepareSCMP( // Due to Viper's very strict injectivity constraints: // @ requires forall i, j int :: { &opts[i], &opts[j] } 0 <= i && i < j && j < len(opts) ==> // @ opts[i] !== opts[j] -// @ preserves sl.AbsSlice_Bytes(data, 0, len(data)) +// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R39) // @ ensures forall i int :: { &opts[i] } 0 <= i && i < len(opts) ==> // @ (acc(&opts[i], R10) && opts[i] != nil) // @ ensures -1 <= idx && idx < len(opts) @@ -3538,7 +3538,7 @@ func decodeLayers(data []byte, base gopacket.DecodingLayer, // @ ghost oldStart := 0 // @ ghost oldEnd := len(data) - // @ invariant sl.AbsSlice_Bytes(oldData, 0, len(oldData)) + // @ invariant acc(sl.AbsSlice_Bytes(oldData, 0, len(oldData)), R40) // @ invariant base.Mem(oldData) // @ invariant 0 < len(opts) ==> 0 <= i0 && i0 <= len(opts) // @ invariant forall i int :: {&opts[i]} 0 <= i && i < len(opts) ==> acc(&opts[i], R10) @@ -3582,10 +3582,10 @@ func decodeLayers(data []byte, base gopacket.DecodingLayer, // @ ghost if data == nil { // @ sl.NilAcc_Bytes() // @ } else { - // @ sl.SplitRange_Bytes(oldData, oldStart, oldEnd, writePerm) + // @ sl.SplitRange_Bytes(oldData, oldStart, oldEnd, R40) // @ } if err := opt.DecodeFromBytes(data, gopacket.NilDecodeFeedback); err != nil { - // @ ghost if data != nil { sl.CombineRange_Bytes(oldData, oldStart, oldEnd, writePerm) } + // @ ghost if data != nil { sl.CombineRange_Bytes(oldData, oldStart, oldEnd, R40) } // @ base.DowngradePerm(oldData) // ghost clean-up: @@ -3624,7 +3624,7 @@ func decodeLayers(data []byte, base gopacket.DecodingLayer, // @ processed[i0] = true // @ ghost offsets[i0] = offsetPair{oldStart, oldEnd, data == nil} // @ idx = i0 - // @ ghost if data != nil { sl.CombineRange_Bytes(oldData, oldStart, oldEnd, writePerm) } + // @ ghost if data != nil { sl.CombineRange_Bytes(oldData, oldStart, oldEnd, R40) } last = opt } } diff --git a/verification/dependencies/github.com/google/gopacket/parser.gobra b/verification/dependencies/github.com/google/gopacket/parser.gobra index 6298e29af..978b0ee2a 100644 --- a/verification/dependencies/github.com/google/gopacket/parser.gobra +++ b/verification/dependencies/github.com/google/gopacket/parser.gobra @@ -28,7 +28,7 @@ type DecodingLayer interface { requires NonInitMem() requires df != nil - preserves slices.AbsSlice_Bytes(data, 0, len(data)) + preserves acc(slices.AbsSlice_Bytes(data, 0, len(data)), R40) preserves df.Mem() ensures res == nil ==> Mem(data) ensures res != nil ==> (NonInitMem() && res.ErrorMem()) From 97115c64e587bac9423104d9dea013aa580d9079 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Mon, 25 Mar 2024 19:35:57 +0100 Subject: [PATCH 08/57] cosmetic changes (#286) --- router/dataplane.go | 32 +++++++++---------- .../utils/definitions/definitions.gobra | 15 +++++---- 2 files changed, 25 insertions(+), 22 deletions(-) diff --git a/router/dataplane.go b/router/dataplane.go index 168b9ac58..167576808 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -1835,8 +1835,8 @@ func (p *scionPacketProcessor) validateHopExpiry() (respr processResult, reserr // @ fold p.d.validResult(respr, false) return processResult{}, nil } - // @ TODO() - // TODO: adapt; note that packSCMP always returns an empty addr and conn and + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP + // (VerifiedSCION): adapt; note that packSCMP always returns an empty addr and conn and // when the err is nil, it returns the bytes of p.buffer. This should be a magic wand // that is consumed after sending the reply. For now, we are making this simplifying // assumption, but in the future, we should elaborate the proof for this to not be @@ -1875,7 +1875,7 @@ func (p *scionPacketProcessor) validateIngressID() (respr processResult, reserr errCode = slayers.SCMPCodeUnknownHopFieldEgress } if p.ingressID != 0 && p.ingressID != pktIngressID { - // @ TODO() + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") return p.packSCMP( slayers.SCMPTypeParameterProblem, errCode, @@ -1916,21 +1916,21 @@ func (p *scionPacketProcessor) validateSrcDstIA( /*@ ghost ubScionL []byte @*/ ) // Note: SCMP error messages triggered by the sibling router may use paths that // don't start with the first hop. if p.path.IsFirstHop( /*@ ubPath @*/ ) && !srcIsLocal { - // @ TODO() // depends on packSCMP + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP return p.invalidSrcIA() } if dstIsLocal { - // @ TODO() // depends on packSCMP + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP return p.invalidDstIA() } } else { // Inbound if srcIsLocal { - // @ TODO() // depends on packSCMP + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP return p.invalidSrcIA() } if p.path.IsLastHop( /*@ ubPath @*/ ) != dstIsLocal { - // @ TODO() // depends on packSCMP + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP return p.invalidDstIA() } } @@ -2050,7 +2050,7 @@ func (p *scionPacketProcessor) validateEgressID() (respr processResult, reserr e if !p.infoField.ConsDir { errCode = slayers.SCMPCodeUnknownHopFieldIngress } - // @ TODO() + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP return p.packSCMP( slayers.SCMPTypeParameterProblem, errCode, @@ -2078,7 +2078,7 @@ func (p *scionPacketProcessor) validateEgressID() (respr processResult, reserr e // @ fold p.d.validResult(respr, false) return processResult{}, nil default: // malicious - // @ TODO() + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP return p.packSCMP( slayers.SCMPTypeParameterProblem, slayers.SCMPCodeInvalidPath, // XXX(matzf) new code InvalidHop? @@ -2100,7 +2100,7 @@ func (p *scionPacketProcessor) validateEgressID() (respr processResult, reserr e // @ fold p.d.validResult(respr, false) return processResult{}, nil default: - // @ TODO() + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP return p.packSCMP( slayers.SCMPTypeParameterProblem, slayers.SCMPCodeInvalidSegmentChange, @@ -2205,7 +2205,7 @@ func (p *scionPacketProcessor) verifyCurrentMAC() (respr processResult, reserr e // @ sl.SplitRange_Bytes(fullMac, 0, path.MacLen, R20) // @ ghost defer sl.CombineRange_Bytes(fullMac, 0, path.MacLen, R20) if subtle.ConstantTimeCompare(p.hopField.Mac[:path.MacLen], fullMac[:path.MacLen]) == 0 { - // @ TODO() + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") return p.packSCMP( slayers.SCMPTypeParameterProblem, slayers.SCMPCodeInvalidHopFieldMAC, @@ -2252,7 +2252,7 @@ func (p *scionPacketProcessor) resolveInbound( /*@ ghost ubScionL []byte @*/ ) ( // @ ghost if addrAliases { // @ apply acc(a.Mem(), R15) --* acc(sl.AbsSlice_Bytes(ubScionL, 0, len(ubScionL)), R15) // @ } - // @ TODO() + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") r, err := p.packSCMP( slayers.SCMPTypeDestinationUnreachable, slayers.SCMPCodeNoRoute, @@ -2425,7 +2425,7 @@ func (p *scionPacketProcessor) validateEgressUp() (respr processResult, reserr e Egress: uint64(egressID), } } - // @ TODO() + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP return p.packSCMP(typ, 0, scmpP, serrors.New("bfd session down")) } } @@ -2648,7 +2648,7 @@ func (p *scionPacketProcessor) handleSCMPTraceRouteRequest( IA: p.d.localIA, Interface: uint64(interfaceID), } - // @ TODO() + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP return p.packSCMP(slayers.SCMPTypeTracerouteReply, 0, &scmpP, nil) } @@ -2666,7 +2666,7 @@ func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte @*/ ) ( // @ fold p.d.validResult(processResult{}, false) return processResult{}, nil } - // @ TODO() + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP return p.packSCMP( slayers.SCMPTypeParameterProblem, slayers.SCMPCodeInvalidPacketSize, @@ -2831,8 +2831,8 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, if !p.infoField.ConsDir { errCode = slayers.SCMPCodeUnknownHopFieldIngress } - // @ TODO() // @ p.scionLayer.DowngradePerm(ub) + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP tmp, err := p.packSCMP( slayers.SCMPTypeParameterProblem, errCode, diff --git a/verification/utils/definitions/definitions.gobra b/verification/utils/definitions/definitions.gobra index b7086de2a..19ae4d2a7 100644 --- a/verification/utils/definitions/definitions.gobra +++ b/verification/utils/definitions/definitions.gobra @@ -91,20 +91,23 @@ pure func Uncallable() (res bool) { return false } -ghost -ensures false -decreases _ -func IgnoreBranch() +/**** Functions to introduce temporary assumptions **/ +// Kills the branches that reach this point. ghost ensures false decreases _ -func IgnoreFromHere() +func TODO() +// Does the same as TODO, but should be used when it kills a branch +// that cannot be verified until an issue in SCION is fixed and ported +// to our branch of SCION. ghost ensures false decreases _ -func TODO() +func ToDoAfterScionFix(url string) + +/**** End of functions to introduce temporary assumptions **/ // type to be used as a stub for sets of private fields in formalizations of // third party libs From e8bdfc67838905dfc4d4b0757d4a71a1616bf4a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Thu, 28 Mar 2024 13:58:47 +0100 Subject: [PATCH 09/57] Fix warning in the CI (#288) --- .github/workflows/gobra.yml | 40 ++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/.github/workflows/gobra.yml b/.github/workflows/gobra.yml index 1a8ba64e4..ab6668532 100644 --- a/.github/workflows/gobra.yml +++ b/.github/workflows/gobra.yml @@ -23,7 +23,7 @@ env: mceMode: 'od' requireTriggers: '1' useZ3API: '0' - backend: 'SILICON' + viperBackend: 'SILICON' disableNL: '0' unsafeWildcardOptimization: '1' overflow: '0' @@ -67,7 +67,7 @@ jobs: overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} - backend: ${{ env.backend }} + viperBackend: ${{ env.viperBackend }} unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/addr' uses: viperproject/gobra-action@main @@ -87,7 +87,7 @@ jobs: overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} - backend: ${{ env.backend }} + viperBackend: ${{ env.viperBackend }} unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/experimental/epic' uses: viperproject/gobra-action@main @@ -106,7 +106,7 @@ jobs: overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} - backend: ${{ env.backend }} + viperBackend: ${{ env.viperBackend }} unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/log' uses: viperproject/gobra-action@main @@ -125,7 +125,7 @@ jobs: overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} - backend: ${{ env.backend }} + viperBackend: ${{ env.viperBackend }} unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/private/serrors' uses: viperproject/gobra-action@main @@ -144,7 +144,7 @@ jobs: overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} - backend: ${{ env.backend }} + viperBackend: ${{ env.viperBackend }} unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/scrypto' uses: viperproject/gobra-action@main @@ -163,7 +163,7 @@ jobs: overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} - backend: ${{ env.backend }} + viperBackend: ${{ env.viperBackend }} unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/slayers' uses: viperproject/gobra-action@main @@ -182,7 +182,7 @@ jobs: overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} - backend: ${{ env.backend }} + viperBackend: ${{ env.viperBackend }} unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/slayers/path' uses: viperproject/gobra-action@main @@ -201,7 +201,7 @@ jobs: overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} - backend: ${{ env.backend }} + viperBackend: ${{ env.viperBackend }} unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/slayers/path/empty' uses: viperproject/gobra-action@main @@ -220,7 +220,7 @@ jobs: overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} - backend: ${{ env.backend }} + viperBackend: ${{ env.viperBackend }} unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/slayers/path/epic' uses: viperproject/gobra-action@main @@ -240,7 +240,7 @@ jobs: overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} - backend: ${{ env.backend }} + viperBackend: ${{ env.viperBackend }} unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/slayers/path/onehop' uses: viperproject/gobra-action@main @@ -259,7 +259,7 @@ jobs: overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} - backend: ${{ env.backend }} + viperBackend: ${{ env.viperBackend }} unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/slayers/path/scion' uses: viperproject/gobra-action@main @@ -278,7 +278,7 @@ jobs: overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} - backend: ${{ env.backend }} + viperBackend: ${{ env.viperBackend }} unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'private/topology' uses: viperproject/gobra-action@main @@ -297,7 +297,7 @@ jobs: overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} - backend: ${{ env.backend }} + viperBackend: ${{ env.viperBackend }} unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'private/topology/underlay' uses: viperproject/gobra-action@main @@ -316,7 +316,7 @@ jobs: overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} - backend: ${{ env.backend }} + viperBackend: ${{ env.viperBackend }} unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'private/underlay/conn' uses: viperproject/gobra-action@main @@ -335,7 +335,7 @@ jobs: overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} - backend: ${{ env.backend }} + viperBackend: ${{ env.viperBackend }} unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'private/underlay/sockctrl' uses: viperproject/gobra-action@main @@ -354,7 +354,7 @@ jobs: overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} - backend: ${{ env.backend }} + viperBackend: ${{ env.viperBackend }} unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'router/bfd' uses: viperproject/gobra-action@main @@ -373,7 +373,7 @@ jobs: overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} - backend: ${{ env.backend }} + viperBackend: ${{ env.viperBackend }} unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'router/control' uses: viperproject/gobra-action@main @@ -392,7 +392,7 @@ jobs: overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: ${{ env.disableNL }} - backend: ${{ env.backend }} + viperBackend: ${{ env.viperBackend }} unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Upload the verification report uses: actions/upload-artifact@v2 @@ -425,6 +425,6 @@ jobs: overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} disableNL: '1' - backend: ${{ env.backend }} + viperBackend: ${{ env.viperBackend }} unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} From 1e608307ba609fff78c3304f58accd996a4b1dbc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Tue, 2 Apr 2024 13:12:50 +0200 Subject: [PATCH 10/57] Update gobra.yml to disableNL (#289) --- .github/workflows/gobra.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/gobra.yml b/.github/workflows/gobra.yml index ab6668532..0c1a707d4 100644 --- a/.github/workflows/gobra.yml +++ b/.github/workflows/gobra.yml @@ -424,7 +424,7 @@ jobs: requireTriggers: ${{ env.requireTriggers }} overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} - disableNL: '1' + disableNL: ${{ env.disableNL }} viperBackend: ${{ env.viperBackend }} unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} From 2bc193bc2ce492c1592da51bf6a7aefe0c15f471 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Tue, 2 Apr 2024 18:51:41 +0200 Subject: [PATCH 11/57] simplify Decoded.Reverse (#295) --- pkg/slayers/path/scion/decoded.go | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/pkg/slayers/path/scion/decoded.go b/pkg/slayers/path/scion/decoded.go index 6af13f001..29e8df090 100644 --- a/pkg/slayers/path/scion/decoded.go +++ b/pkg/slayers/path/scion/decoded.go @@ -225,6 +225,7 @@ func (s *Decoded) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { // @ decreases func (s *Decoded) Reverse( /*@ ghost ubuf []byte @*/ ) (p path.Path, r error) { //@ ghost isValid := s.ValidCurrIdxs(ubuf) + //@ ghost base := s.GetBase(ubuf) //@ unfold s.Mem(ubuf) //@ unfold s.Base.Mem() if s.NumINF == 0 { @@ -232,24 +233,16 @@ func (s *Decoded) Reverse( /*@ ghost ubuf []byte @*/ ) (p path.Path, r error) { //@ fold s.Mem(ubuf) return nil, serrors.New("empty decoded path is invalid and cannot be reversed") } - //@ fold s.Base.Mem() - //@ fold s.Mem(ubuf) - //@ ghost base := s.GetBase(ubuf) // Reverse order of InfoFields and SegLens - //@ invariant s.Mem(ubuf) - //@ invariant isValid ==> s.ValidCurrIdxs(ubuf) - //@ invariant 0 <= i && i < s.GetNumINF(ubuf) - //@ invariant 0 <= j && j < s.GetNumINF(ubuf) - //@ decreases j-i - for i, j := 0, ( /*@ unfolding s.Mem(ubuf) in (unfolding s.Base.Mem() in @*/ s.NumINF - 1 /*@) @*/); i < j; i, j = i+1, j-1 { - //@ unfold s.Mem(ubuf) - s.InfoFields[i], s.InfoFields[j] = s.InfoFields[j], s.InfoFields[i] - //@ unfold s.Base.Mem() - s.PathMeta.SegLen[i], s.PathMeta.SegLen[j] = s.PathMeta.SegLen[j], s.PathMeta.SegLen[i] - //@ fold s.Base.Mem() - //@ fold s.Mem(ubuf) + if s.NumINF > 1 { + lastIdx := s.NumINF - 1 + s.InfoFields[0], s.InfoFields[lastIdx] = s.InfoFields[lastIdx], s.InfoFields[0] + s.PathMeta.SegLen[0], s.PathMeta.SegLen[lastIdx] = s.PathMeta.SegLen[lastIdx], s.PathMeta.SegLen[0] } + //@ fold s.Base.Mem() + //@ fold s.Mem(ubuf) + //@ preserves s.Mem(ubuf) //@ preserves isValid ==> s.ValidCurrIdxs(ubuf) //@ decreases From 090baffdf2441138cb806b097661e830d9ac424e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Wed, 3 Apr 2024 10:26:48 +0200 Subject: [PATCH 12/57] Cherry-pick #4483 from `scionproto/scion` (#292) * cherry-pick 4483 * undo change to test due to the use of (yet) undefined symbols * fix verification error --------- Co-authored-by: jiceatscion <139873336+jiceatscion@users.noreply.github.com> --- pkg/slayers/path/scion/BUILD.bazel | 1 + pkg/slayers/path/scion/base.go | 25 +++++++++++++++++++++---- pkg/slayers/path/scion/decoded.go | 7 ------- pkg/slayers/path/scion/raw_test.go | 25 ++++++++++++++++++++++++- 4 files changed, 46 insertions(+), 12 deletions(-) diff --git a/pkg/slayers/path/scion/BUILD.bazel b/pkg/slayers/path/scion/BUILD.bazel index a4a57739f..c05261537 100644 --- a/pkg/slayers/path/scion/BUILD.bazel +++ b/pkg/slayers/path/scion/BUILD.bazel @@ -24,6 +24,7 @@ go_test( ], deps = [ ":go_default_library", + "//pkg/private/serrors:go_default_library", "//pkg/slayers/path:go_default_library", "@com_github_stretchr_testify//assert:go_default_library", "@com_github_stretchr_testify//require:go_default_library", diff --git a/pkg/slayers/path/scion/base.go b/pkg/slayers/path/scion/base.go index f8960fd72..a41185eab 100644 --- a/pkg/slayers/path/scion/base.go +++ b/pkg/slayers/path/scion/base.go @@ -27,10 +27,17 @@ import ( //@ sl "github.com/scionproto/scion/verification/utils/slices" ) -// MetaLen is the length of the PathMetaHeader. -const MetaLen = 4 +const ( + // MaxINFs is the maximum number of info fields in a SCION path. + MaxINFs = 3 + // MaxHops is the maximum number of hop fields in a SCION path. + MaxHops = 64 -const PathType path.Type = 1 + // MetaLen is the length of the PathMetaHeader. + MetaLen = 4 + + PathType path.Type = 1 +) // @ requires path.PathPackageMem() // @ requires !path.Registered(PathType) @@ -142,6 +149,14 @@ func (s *Base) DecodeFromBytes(data []byte) (r error) { //@ assume int(s.PathMeta.SegLen[i]) >= 0 s.NumHops += int(s.PathMeta.SegLen[i]) } + + // We must check the validity of NumHops. It is possible to fit more than 64 hops in + // the length of a scion header. Yet a path of more than 64 hops cannot be followed to + // the end because CurrHF is only 6 bits long. + if s.NumHops > MaxHops { + //@ defer fold s.NonInitMem() + return serrors.New("NumHops too large", "NumHops", s.NumHops, "Maximum", MaxHops) + } //@ fold s.Mem() return nil } @@ -212,7 +227,9 @@ func (s *Base) infIndexForHF(hf uint8) (r uint8) { } } -// Len returns the length of the path in bytes. +// Len returns the length of the path in bytes. That is, the number of byte required to +// store it, based on the metadata. The actual number of bytes available to contain it +// can be inferred from the common header field HdrLen. It may or may not be consistent. // @ pure // @ requires acc(s.Mem(), _) // @ ensures r >= MetaLen diff --git a/pkg/slayers/path/scion/decoded.go b/pkg/slayers/path/scion/decoded.go index 29e8df090..195125e6e 100644 --- a/pkg/slayers/path/scion/decoded.go +++ b/pkg/slayers/path/scion/decoded.go @@ -24,13 +24,6 @@ import ( //@ sl "github.com/scionproto/scion/verification/utils/slices" ) -const ( - // MaxINFs is the maximum number of info fields in a SCION path. - MaxINFs = 3 - // MaxHops is the maximum number of hop fields in a SCION path. - MaxHops = 64 -) - // Decoded implements the SCION (data-plane) path type. Decoded is intended to be used in // non-performance critical code paths, where the convenience of having a fully parsed path trumps // the loss of performance. diff --git a/pkg/slayers/path/scion/raw_test.go b/pkg/slayers/path/scion/raw_test.go index ff527d939..3a869b016 100644 --- a/pkg/slayers/path/scion/raw_test.go +++ b/pkg/slayers/path/scion/raw_test.go @@ -21,6 +21,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/scionproto/scion/pkg/private/serrors" "github.com/scionproto/scion/pkg/slayers/path" "github.com/scionproto/scion/pkg/slayers/path/scion" ) @@ -51,6 +52,19 @@ var emptyRawTestPath = &scion.Raw{ Raw: make([]byte, scion.MetaLen), } +var overlongPath = &scion.Raw{ + Base: scion.Base{ + PathMeta: scion.MetaHdr{ + CurrINF: 0, + CurrHF: 0, + SegLen: [3]uint8{24, 24, 17}, + }, + NumINF: 3, + NumHops: 65, + }, + Raw: rawPath, +} + func TestRawSerialize(t *testing.T) { b := make([]byte, rawTestPath.Len()) assert.NoError(t, rawTestPath.SerializeTo(b)) @@ -63,7 +77,7 @@ func TestRawDecodeFromBytes(t *testing.T) { assert.Equal(t, rawTestPath, s) } -func TestRawSerliazeDecode(t *testing.T) { +func TestRawSerializeDecode(t *testing.T) { b := make([]byte, rawTestPath.Len()) assert.NoError(t, rawTestPath.SerializeTo(b)) s := &scion.Raw{} @@ -71,6 +85,15 @@ func TestRawSerliazeDecode(t *testing.T) { assert.Equal(t, rawTestPath, s) } +func TestOverlongSerializeDecode(t *testing.T) { + b := make([]byte, overlongPath.Len()) + assert.NoError(t, overlongPath.SerializeTo(b)) // permitted, if only to enable this test. + s := &scion.Raw{} + expected := serrors.New("NumHops too large", "NumHops", 65, "Maximum", scion.MaxHops) + err := s.DecodeFromBytes(b) + assert.Equal(t, expected.Error(), err.Error()) +} + func TestRawReverse(t *testing.T) { for name, tc := range pathReverseCases { name, tc := name, tc From 45323db532c6e0eb2e572ddb96071f7b9aac8ca2 Mon Sep 17 00:00:00 2001 From: Markus Limbeck <92801626+mlimbeck@users.noreply.github.com> Date: Wed, 3 Apr 2024 13:51:30 +0200 Subject: [PATCH 13/57] change permissions amount for decode SCMPTraceRoute (#299) --- pkg/slayers/scmp_msg.go | 58 ++++++++++++++++++++--------------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/pkg/slayers/scmp_msg.go b/pkg/slayers/scmp_msg.go index 734c3b4c0..94ac8c449 100644 --- a/pkg/slayers/scmp_msg.go +++ b/pkg/slayers/scmp_msg.go @@ -672,7 +672,7 @@ func (*SCMPTraceroute) NextLayerType() gopacket.LayerType { // DecodeFromBytes decodes the given bytes into this layer. // @ requires df != nil // @ requires i.NonInitMem() -// @ preserves sl.AbsSlice_Bytes(data, 0, len(data)) +// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) // @ preserves df.Mem() // @ ensures res == nil ==> i.Mem(data) // @ ensures res != nil ==> i.NonInitMem() @@ -690,66 +690,66 @@ func (i *SCMPTraceroute) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback // @ requires offset == 0 // @ preserves acc(&i.Identifier) // @ requires len(data) >= 2 + 2 + addr.IABytes + scmpRawInterfaceLen - // @ requires sl.AbsSlice_Bytes(data, 0, len(data)) - // @ ensures sl.AbsSlice_Bytes(data, 0, 2) - // @ ensures sl.AbsSlice_Bytes(data, 2, len(data)) + // @ requires acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) + // @ ensures acc(sl.AbsSlice_Bytes(data, 0, 2), R40) + // @ ensures acc(sl.AbsSlice_Bytes(data, 2, len(data)), R40) // @ decreases // @ outline ( - // @ sl.SplitByIndex_Bytes(data, 0, len(data), 2, writePerm) - // @ unfold sl.AbsSlice_Bytes(data, 0, 2) + // @ sl.SplitByIndex_Bytes(data, 0, len(data), 2, R40) + // @ unfold acc(sl.AbsSlice_Bytes(data, 0, 2), R40) i.Identifier = binary.BigEndian.Uint16(data[offset : offset+2]) - // @ fold sl.AbsSlice_Bytes(data, 0, 2) + // @ fold acc(sl.AbsSlice_Bytes(data, 0, 2), R40) // @ ) offset += 2 // @ requires offset == 2 // @ preserves acc(&i.Sequence) // @ requires len(data) >= 2 + 2 + addr.IABytes + scmpRawInterfaceLen - // @ requires sl.AbsSlice_Bytes(data, 2, len(data)) - // @ ensures sl.AbsSlice_Bytes(data, 2, 2+2) - // @ ensures sl.AbsSlice_Bytes(data, 2+2, len(data)) + // @ requires acc(sl.AbsSlice_Bytes(data, 2, len(data)), R40) + // @ ensures acc(sl.AbsSlice_Bytes(data, 2, 2+2), R40) + // @ ensures acc(sl.AbsSlice_Bytes(data, 2+2, len(data)), R40) // @ decreases // @ outline ( - // @ sl.SplitByIndex_Bytes(data, 2, len(data), 2+2, writePerm) - // @ unfold sl.AbsSlice_Bytes(data, 2, 2+2) + // @ sl.SplitByIndex_Bytes(data, 2, len(data), 2+2, R40) + // @ unfold acc(sl.AbsSlice_Bytes(data, 2, 2+2), R40) // @ assert forall i int :: { &data[offset:offset+2][i] } 0 <= i && i < 2 ==> &data[offset + i] == &data[offset : offset+2][i] i.Sequence = binary.BigEndian.Uint16(data[offset : offset+2]) - // @ fold sl.AbsSlice_Bytes(data, 2, 2+2) + // @ fold acc(sl.AbsSlice_Bytes(data, 2, 2+2), R40) // @ ) offset += 2 // @ requires offset == 2 + 2 // @ preserves acc(&i.IA) // @ requires len(data) >= 2 + 2 + addr.IABytes + scmpRawInterfaceLen - // @ requires sl.AbsSlice_Bytes(data, 2+2, len(data)) - // @ ensures sl.AbsSlice_Bytes(data, 2+2, 2+2+addr.IABytes) - // @ ensures sl.AbsSlice_Bytes(data, 2+2+addr.IABytes, len(data)) + // @ requires acc(sl.AbsSlice_Bytes(data, 2+2, len(data)), R40) + // @ ensures acc(sl.AbsSlice_Bytes(data, 2+2, 2+2+addr.IABytes), R40) + // @ ensures acc(sl.AbsSlice_Bytes(data, 2+2+addr.IABytes, len(data)), R40) // @ decreases // @ outline ( - // @ sl.SplitByIndex_Bytes(data, 2+2, len(data), 2+2+addr.IABytes, writePerm) - // @ unfold sl.AbsSlice_Bytes(data, 2+2, 2+2+addr.IABytes) + // @ sl.SplitByIndex_Bytes(data, 2+2, len(data), 2+2+addr.IABytes, R40) + // @ unfold acc(sl.AbsSlice_Bytes(data, 2+2, 2+2+addr.IABytes), R40) // @ assert forall i int :: { &data[offset:offset+addr.IABytes][i] } 0 <= i && i < addr.IABytes ==> &data[offset + i] == &data[offset : offset+addr.IABytes][i] i.IA = addr.IA(binary.BigEndian.Uint64(data[offset : offset+addr.IABytes])) - // @ fold sl.AbsSlice_Bytes(data, 2+2, 2+2+addr.IABytes) + // @ fold acc(sl.AbsSlice_Bytes(data, 2+2, 2+2+addr.IABytes), R40) // @ ) offset += addr.IABytes // @ requires offset == 2 + 2 + addr.IABytes // @ preserves acc(&i.Interface) // @ requires len(data) >= 2 + 2 + addr.IABytes + scmpRawInterfaceLen - // @ requires sl.AbsSlice_Bytes(data, 2+2+addr.IABytes, len(data)) - // @ ensures sl.AbsSlice_Bytes(data, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen) - // @ ensures sl.AbsSlice_Bytes(data, 2+2+addr.IABytes+scmpRawInterfaceLen, len(data)) + // @ requires acc(sl.AbsSlice_Bytes(data, 2+2+addr.IABytes, len(data)), R40) + // @ ensures acc(sl.AbsSlice_Bytes(data, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen), R40) + // @ ensures acc(sl.AbsSlice_Bytes(data, 2+2+addr.IABytes+scmpRawInterfaceLen, len(data)), R40) // @ decreases // @ outline ( - // @ sl.SplitByIndex_Bytes(data, 2+2+addr.IABytes, len(data), 2+2+addr.IABytes+scmpRawInterfaceLen, writePerm) - // @ unfold sl.AbsSlice_Bytes(data, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen) + // @ sl.SplitByIndex_Bytes(data, 2+2+addr.IABytes, len(data), 2+2+addr.IABytes+scmpRawInterfaceLen, R40) + // @ unfold acc(sl.AbsSlice_Bytes(data, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen), R40) // @ assert forall i int :: { &data[offset:offset+scmpRawInterfaceLen][i] } 0 <= i && i < scmpRawInterfaceLen ==> &data[offset + i] == &data[offset : offset+addr.IABytes][i] i.Interface = binary.BigEndian.Uint64(data[offset : offset+scmpRawInterfaceLen]) - // @ fold sl.AbsSlice_Bytes(data, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen) + // @ fold acc(sl.AbsSlice_Bytes(data, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen), R40) // @ ) offset += scmpRawInterfaceLen - // @ sl.CombineAtIndex_Bytes(data, 0, 2+2, 2, writePerm) - // @ sl.CombineAtIndex_Bytes(data, 0, 2+2+addr.IABytes, 2+2, writePerm) - // @ sl.CombineAtIndex_Bytes(data, 0, 2+2+addr.IABytes+scmpRawInterfaceLen, 2+2+addr.IABytes, writePerm) - // @ sl.CombineAtIndex_Bytes(data, 0, len(data), 2+2+addr.IABytes+scmpRawInterfaceLen, writePerm) + // @ sl.CombineAtIndex_Bytes(data, 0, 2+2, 2, R40) + // @ sl.CombineAtIndex_Bytes(data, 0, 2+2+addr.IABytes, 2+2, R40) + // @ sl.CombineAtIndex_Bytes(data, 0, 2+2+addr.IABytes+scmpRawInterfaceLen, 2+2+addr.IABytes, R40) + // @ sl.CombineAtIndex_Bytes(data, 0, len(data), 2+2+addr.IABytes+scmpRawInterfaceLen, R40) i.BaseLayer = BaseLayer{ Contents: data[:offset], Payload: data[offset:], From 42bde11cea10c94076edbfcd15e49d3695ad994e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Sat, 6 Apr 2024 01:45:40 +0200 Subject: [PATCH 14/57] First batch of changes from PR #248 (#306) * first batch of changes from the IO spec * Apply suggestions from code review * adapt config options --- .github/workflows/gobra.yml | 4 +- router/dataplane_concurrency_model.gobra | 19 +++- router/dataplane_spec.gobra | 10 ++- .../dependencies/encoding/binary/binary.gobra | 23 +++++ verification/io/bios.gobra | 1 - verification/io/io-spec.gobra | 89 ++----------------- verification/io/router.gobra | 3 +- verification/io/router_events.gobra | 10 --- verification/io/xover.gobra | 50 ++++++----- verification/utils/bitwise/bitwise-eqs.gobra | 17 +++- verification/utils/bitwise/proofs.dfy | 62 +++++++++++++ .../utils/definitions/definitions.gobra | 16 +++- 12 files changed, 177 insertions(+), 127 deletions(-) diff --git a/.github/workflows/gobra.yml b/.github/workflows/gobra.yml index 0c1a707d4..f16eae5a2 100644 --- a/.github/workflows/gobra.yml +++ b/.github/workflows/gobra.yml @@ -424,7 +424,7 @@ jobs: requireTriggers: ${{ env.requireTriggers }} overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} - disableNL: ${{ env.disableNL }} + disableNL: '0' viperBackend: ${{ env.viperBackend }} - unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} + unsafeWildcardOptimization: '0' diff --git a/router/dataplane_concurrency_model.gobra b/router/dataplane_concurrency_model.gobra index 5a8223130..ae89a26a8 100644 --- a/router/dataplane_concurrency_model.gobra +++ b/router/dataplane_concurrency_model.gobra @@ -98,8 +98,8 @@ pure func MultiReadBioCorrectIfs( } && MultiReadBioCorrectIfs(io.dp3s_iospec_bio3s_recv_T(t), expectedPkts-1, k) } - ghost +opaque requires 0 <= expectedPkts && MultiReadBio(t, expectedPkts) ensures len(res) == expectedPkts decreases expectedPkts @@ -160,8 +160,10 @@ requires ElemAuth(s.ibuf, y.IBufY) && ElemAuth(s.obuf, y.OBufY) ensures MultiReadBio(t, n) ensures MultiReadBioUpd(t, n, s) == old(MultiReadBioUpd(t, n, s)) ensures MultiReadBioNext(t, n) == old(MultiReadBioNext(t, n)) -ensures ElemAuth(MultiReadBioUpd(t, n, s).ibuf, y.IBufY) && ElemAuth(MultiReadBioUpd(t, n, s).obuf, y.OBufY) -ensures 0 <= n && MultiReadBioCorrectIfs(t, n, k) ==> MultiElemWitness(y.IBufY, k, MultiReadBioIO_val(t, n)) +ensures ElemAuth(MultiReadBioUpd(t, n, s).ibuf, y.IBufY) +ensures ElemAuth(MultiReadBioUpd(t, n, s).obuf, y.OBufY) +ensures 0 <= n && MultiReadBioCorrectIfs(t, n, k) ==> + MultiElemWitness(y.IBufY, k, MultiReadBioIO_val(t, n)) decreases n func MultiUpdateElemWitness( t io.Place, @@ -181,6 +183,7 @@ func MultiUpdateElemWitness( } if 0 <= n && MultiReadBioCorrectIfs(t, n, k) { + reveal MultiReadBioIO_val(t, n) fold MultiElemWitness(y.IBufY, k, MultiReadBioIO_val(t, n)) } } @@ -216,6 +219,16 @@ func MultiElemWitnessConv(y ElemRA,k Key, es seq[io.IO_val]) { multiElemWitnessConvAux(y,k,es,0) } +ghost +requires 0 <= currIdx && currIdx < len(es) +requires MultiElemWitnessWithIndex(y, k, es, currIdx) +ensures es[currIdx].isIO_val_Pkt2 ==> ElemWitness(y, k, es[currIdx].IO_val_Pkt2_2) +ensures MultiElemWitnessWithIndex(y, k, es, currIdx + 1) +decreases +func MultiElemWitnessStep(y ElemRA, k Key, es seq[io.IO_val], currIdx int) { + unfold MultiElemWitnessWithIndex(y, k, es, currIdx) +} + ghost requires i >= 0 requires MultiElemWitness(y,k,es[i:]) diff --git a/router/dataplane_spec.gobra b/router/dataplane_spec.gobra index 08b70cb9f..77303745e 100644 --- a/router/dataplane_spec.gobra +++ b/router/dataplane_spec.gobra @@ -204,6 +204,14 @@ pure func (d *DataPlane) getValForwardingMetrics() map[uint16]forwardingMetrics return unfolding acc(d.Mem(), _) in d.forwardingMetrics } +ghost +pure +requires acc(p.sInit(), _) +decreases +func (p *scionPacketProcessor) getIngressID() uint16 { + return unfolding acc(p.sInit(), _) in p.ingressID +} + ghost requires acc(d.Mem(), _) decreases @@ -696,4 +704,4 @@ pure func (d *DataPlane) DomainForwardingMetrics() set[uint16] { unfolding acc(accForwardingMetrics(d.forwardingMetrics), _) in domain(d.forwardingMetrics) : set[uint16]{} -} \ No newline at end of file +} diff --git a/verification/dependencies/encoding/binary/binary.gobra b/verification/dependencies/encoding/binary/binary.gobra index 8861e78b8..af43eb984 100644 --- a/verification/dependencies/encoding/binary/binary.gobra +++ b/verification/dependencies/encoding/binary/binary.gobra @@ -126,21 +126,41 @@ ensures res == "binary.LittleEndian" decreases pure func (l littleEndian) GoString() (res string) { return "binary.LittleEndian" } +// The specs here could be simpler now that we have FUint16Spec and FPutUint16Spec. + +decreases +pure func (e bigEndian) Uint16Spec(b0, b1 byte) (res uint16) { + return uint16(b1) | uint16(b0)<<8 +} + trusted // related to https://github.com/viperproject/gobra/issues/192 requires acc(&b[0], _) && acc(&b[1], _) ensures res >= 0 +ensures res == BigEndian.Uint16Spec(b[0], b[1]) decreases pure func (e bigEndian) Uint16(b []byte) (res uint16) { return uint16(b[1]) | uint16(b[0])<<8 } +decreases +pure func (e bigEndian) PutUint16Spec(b0, b1 byte, v uint16) bool { + return b0 == byte(v >> 8) && + b1 == byte(v) +} + +// Proven in verification/utils/bitwise/proofs.dfy +trusted preserves acc(&b[0]) && acc(&b[1]) +ensures BigEndian.PutUint16Spec(b[0], b[1], v) +ensures BigEndian.Uint16Spec(b[0], b[1]) == v decreases func (e bigEndian) PutUint16(b []byte, v uint16) { b[0] = byte(v >> 8) b[1] = byte(v) } +// The specs here could be simpler now that we have FUint32Spec and FPutUint32Spec. + decreases pure func (e bigEndian) Uint32Spec(b0, b1, b2, b3 byte) (res uint32) { return uint32(b3) | uint32(b2)<<8 | uint32(b1)<<16 | uint32(b0)<<24 @@ -163,8 +183,11 @@ pure func (e bigEndian) PutUint32Spec(b0, b1, b2, b3 byte, v uint32) bool { b3 == byte(v) } +// Proven in verification/utils/bitwise/proofs.dfy +trusted preserves acc(&b[0]) && acc(&b[1]) && acc(&b[2]) && acc(&b[3]) ensures BigEndian.PutUint32Spec(b[0], b[1], b[2], b[3], v) +ensures BigEndian.Uint32Spec(b[0], b[1], b[2], b[3]) == v decreases func (e bigEndian) PutUint32(b []byte, v uint32) { b[0] = byte(v >> 24) diff --git a/verification/io/bios.gobra b/verification/io/bios.gobra index c2edeada0..f85bc30db 100644 --- a/verification/io/bios.gobra +++ b/verification/io/bios.gobra @@ -21,7 +21,6 @@ package io type IO_bio3sIN adt { IO_bio3s_enter{} IO_bio3s_xover_up2down{} - IO_bio3s_xover_core{} IO_bio3s_exit{} } diff --git a/verification/io/io-spec.gobra b/verification/io/io-spec.gobra index df46e6ae1..625690554 100644 --- a/verification/io/io-spec.gobra +++ b/verification/io/io-spec.gobra @@ -29,7 +29,6 @@ type BogusTrigger struct{} pred (dp DataPlaneSpec) dp3s_iospec_ordered(s IO_dp3s_state_local, t Place) { dp.dp3s_iospec_bio3s_enter(s, t) && dp.dp3s_iospec_bio3s_xover_up2down(s, t) && - dp.dp3s_iospec_bio3s_xover_core(s, t) && dp.dp3s_iospec_bio3s_exit(s, t) && dp.dp3s_iospec_bio3s_send(s, t) && dp.dp3s_iospec_bio3s_recv(s, t) && @@ -163,14 +162,12 @@ pure func (dp DataPlaneSpec) dp3s_iospec_bio3s_xover_up2down_guard(s IO_dp3s_sta false default: let nextseg := get(v.IO_Internal_val1_1.LeftSeg) in - (!currseg.ConsDir && - nextseg.ConsDir && - len(nextseg.Future) > 0 && + (len(nextseg.Future) > 0 && len(currseg.Future) > 0 && - len(v.IO_Internal_val1_3.CurrSeg.Future) > 0 && let hf1, hf2 := currseg.Future[0], nextseg.Future[0] in let traversedseg := establishGuardTraversedsegInc(currseg, !currseg.ConsDir) in - (dp.xover_up2down2_link_type(dp.Asid(), hf1, hf2) && + let nextfut := nextseg.Future[1:] in + (dp.xover_up2down2_link_type_dir(dp.Asid(), currseg.ConsDir, hf1, nextseg.ConsDir, hf2) && dp.dp3s_xover_common( s, v.IO_Internal_val1_1, @@ -180,6 +177,7 @@ pure func (dp DataPlaneSpec) dp3s_iospec_bio3s_xover_up2down_guard(s IO_dp3s_sta IO_pkt2(IO_Packet2{nextseg, v.IO_Internal_val1_1.MidSeg, v.IO_Internal_val1_1.RightSeg, some(traversedseg)}), hf1, hf2, + nextfut, v.IO_Internal_val1_2, v.IO_Internal_val1_3, v.IO_Internal_val1_4,))) @@ -209,71 +207,6 @@ ghost decreases pure func TriggerBodyIoXoverUp2Down(v IO_val) BogusTrigger { return BogusTrigger{} } -pred CBio_IN_bio3s_xover_core(t Place, v IO_val) - -ghost -requires CBio_IN_bio3s_xover_core(t, v) -decreases -pure func dp3s_iospec_bio3s_xover_core_T(t Place, v IO_val) Place - -// This corresponds to the condition of the if statement in the io-spec case for xover_core -ghost -requires v.isIO_Internal_val1 -requires dp.Valid() -decreases -pure func (dp DataPlaneSpec) dp3s_iospec_bio3s_xover_core_guard(s IO_dp3s_state_local, t Place, v IO_val) bool { - return (dp.Asid() in dp.Core() && - let currseg := v.IO_Internal_val1_1.CurrSeg in - match v.IO_Internal_val1_1.LeftSeg { - case none[IO_seg2]: - false - default: - let nextseg := get(v.IO_Internal_val1_1.LeftSeg) in - currseg.ConsDir == nextseg.ConsDir && - len(nextseg.Future) > 0 && - len(currseg.Future) > 0 && - len(v.IO_Internal_val1_3.CurrSeg.Future) > 0 && - let hf1, hf2 := currseg.Future[0], nextseg.Future[0] in - let traversedseg := establishGuardTraversedsegInc(currseg, !currseg.ConsDir) in - (dp.xover_core2_link_type(hf1, hf2, dp.Asid(), currseg.ConsDir) && - dp.dp3s_xover_common( - s, - v.IO_Internal_val1_1, - currseg, - nextseg, - traversedseg, - IO_pkt2(IO_Packet2{nextseg, v.IO_Internal_val1_1.MidSeg, v.IO_Internal_val1_1.RightSeg, some(traversedseg)}), - hf1, - hf2, - v.IO_Internal_val1_2, - v.IO_Internal_val1_3, - v.IO_Internal_val1_4)) - }) -} - -pred (dp DataPlaneSpec) dp3s_iospec_bio3s_xover_core(s IO_dp3s_state_local, t Place) { - forall v IO_val :: { TriggerBodyIoXoverCore(v) } ( - match v { - case IO_Internal_val1{_, _, ?newpkt, ?nextif}: - // Gobra requires the triggering term to occur inside the qtfier body, - // otherwise we get an error in the call to dp3s_iospec_bio3s_xover_core_T. - // We named the variable `_ignored` because using `_` here leads to a strange - // type error. - let _ignored := TriggerBodyIoXoverCore(v) in - (dp.Valid() && dp.dp3s_iospec_bio3s_xover_core_guard(s, t, v) ==> - (CBio_IN_bio3s_xover_core(t, v) && - dp.dp3s_iospec_ordered( - dp3s_add_obuf(s, nextif, newpkt), - dp3s_iospec_bio3s_xover_core_T(t, v)))) - default: - true - }) -} - -ghost -decreases -pure func TriggerBodyIoXoverCore(v IO_val) BogusTrigger { return BogusTrigger{} } - pred CBio_IN_bio3s_exit(t Place, v IO_val) ghost @@ -412,12 +345,6 @@ requires token(t) && CBio_IN_bio3s_enter(t, v) ensures token(old(CBio_IN_bio3s_enter_T(t, v))) func Enter(ghost t Place, ghost v IO_val) -ghost -decreases -requires token(t) && CBio_IN_bio3s_xover_core(t, v) -ensures token(old(dp3s_iospec_bio3s_xover_core_T(t, v))) -func Xover_core(ghost t Place, ghost v IO_val) - ghost decreases requires token(t) && CBio_IN_bio3s_xover_up2down(t, v) @@ -430,10 +357,4 @@ requires token(t) && CBio_IN_bio3s_exit(t, v) ensures token(old(dp3s_iospec_bio3s_exit_T(t, v))) func Exit(ghost t Place, ghost v IO_val) -ghost -decreases -requires token(t) && CBioIO_bio3s_send(t, v) -ensures token(old(dp3s_iospec_bio3s_send_T(t, v))) -func Send(ghost t Place, ghost v IO_val) - -/** End of helper functions to perfrom BIO operations **/ \ No newline at end of file +/** End of helper functions to perfrom BIO operations **/ diff --git a/verification/io/router.gobra b/verification/io/router.gobra index 4574c48f0..d34d2de3a 100644 --- a/verification/io/router.gobra +++ b/verification/io/router.gobra @@ -193,6 +193,7 @@ pure func (dp DataPlaneSpec) dp3s_xover_common( intermediatepkt IO_pkt3, hf1 IO_HF, hf2 IO_HF, + nextfut seq[IO_HF], recvif IO_ifs, newpkt IO_pkt3, nextif option[IO_ifs], @@ -201,6 +202,6 @@ pure func (dp DataPlaneSpec) dp3s_xover_common( // this is because of the way math. maps are implemented, we can only obtain a key that is in the map before. return some(recvif) in domain(s.ibuf) && (let lookupRes := s.ibuf[some(recvif)] in (m in lookupRes)) && - dp.dp2_xover_common_guard(m, currseg, nextseg, traversedseg, intermediatepkt, hf1, hf2, dp.Asid(), recvif) && + dp.dp2_xover_common_guard(m, currseg, nextseg, traversedseg, intermediatepkt, hf1, hf2, nextfut, dp.Asid(), recvif) && dp.dp3s_forward_xover(intermediatepkt, newpkt, nextif) } diff --git a/verification/io/router_events.gobra b/verification/io/router_events.gobra index 4ba87d715..51c4dc33a 100644 --- a/verification/io/router_events.gobra +++ b/verification/io/router_events.gobra @@ -18,16 +18,6 @@ package io -ghost -requires dp.Valid() -requires asid == dp.Asid() -decreases -pure func (dp DataPlaneSpec) dp2_check_recvif(d bool, asid IO_as, recvif IO_ifs) bool { - return d? - (dp.link_type(asid, recvif) == IO_CustProv{} || dp.link_type(asid, recvif) == IO_PeerOrCore{}) : - (dp.link_type(asid, recvif) == IO_ProvCust{} || dp.link_type(asid, recvif) == IO_PeerOrCore{}) -} - /* Abbreviations */ ghost requires dp.Valid() diff --git a/verification/io/xover.gobra b/verification/io/xover.gobra index e9cda55ad..206d53d55 100644 --- a/verification/io/xover.gobra +++ b/verification/io/xover.gobra @@ -18,22 +18,18 @@ package io +// Switching between segments (xover) // Xover events are similar to the enter event in that a packet is received form an external -// channel and forwarded (internally or externally), but in contrast to the enter event, -// additional processing steps are required to switch from the current segment, -// which has reached its end, to the next segment. -// We have two events for xovering segments. One in which we xover from a segment against -// construction direction to one in construction direction (up2down), and one in which we -// xover between segments of the same directionality. The latter can only happen at a -// core node, hence we call it “xover_core" +// channel and forwarded (internally or externally), but in contrast to the enter event, additional +// processing steps are required to switch from the current segment, which has reached its end, to the +// next segment. -// Common guard between"dp2_xover_up2down"and "dp2_xover_core": -// Check if we are at the end of one segment and that there is a non empty -// Future segment. There are three different segments in this definition: -// currseg, the 'old segment' with exactly one hop field remaining in the -// Future path, traversedseg, which is currseg after we push its -// remaining hop field into the Past path, and nextseg, which is -// the new segment that we are xovering over to. +// Guard: +// Check if we are at the end of one segment and that there is a non empty Future segment. +// There are three different segments in this definition: currseg, the 'old segment' with +// exactly one hop field remaining in the Future path, traversedseg, which is currseg after we +// push its remaining hop field into the Past path, and nextseg, which is the new segment that we +// are xovering over to ghost requires dp.Valid() requires asid == dp.Asid() @@ -45,6 +41,7 @@ pure func (dp DataPlaneSpec) dp2_xover_common_guard(m IO_pkt2, newpkt IO_pkt2, hf1 IO_HF, hf2 IO_HF, + nextfut seq[IO_HF], asid IO_as, recvif IO_ifs) bool { return m.CurrSeg == currseg && @@ -52,10 +49,8 @@ pure func (dp DataPlaneSpec) dp2_xover_common_guard(m IO_pkt2, nextseg.History == seq[IO_ahi]{} && newpkt == IO_pkt2(IO_Packet2{nextseg, m.MidSeg, m.RightSeg, some(traversedseg)}) && currseg.Future == seq[IO_HF]{hf1} && - len(nextseg.Future) > 0 && - nextseg.Future[0] == hf2 && + nextseg.Future == seq[IO_HF]{hf2} ++ nextfut && dp.dp2_check_interface(currseg.ConsDir, asid, hf1, recvif) && - dp.dp2_check_recvif(currseg.ConsDir, asid, recvif) && update_uinfo(!currseg.ConsDir, currseg, traversedseg, hf1) && inc_seg2(currseg, traversedseg, hf1, seq[IO_HF]{}) && dp.hf_valid(currseg.ConsDir, currseg.AInfo, traversedseg.UInfo, hf1) && @@ -86,16 +81,25 @@ requires dp.Valid() requires asid == dp.Asid() decreases pure func (dp DataPlaneSpec) xover_up2down2_link_type(asid IO_as, hf1 IO_HF, hf2 IO_HF) bool { - return (dp.egif2_type(hf1, asid, IO_ProvCust{}) && dp.egif2_type(hf2, asid, IO_ProvCust{})) || - (dp.egif2_type(hf1, asid, IO_ProvCust{}) && dp.egif2_type(hf2, asid, IO_PeerOrCore{})) || - (dp.egif2_type(hf1, asid, IO_PeerOrCore{}) && dp.egif2_type(hf2, asid, IO_ProvCust{})) + return (dp.inif2_type(hf1, asid, IO_ProvCust{}) && dp.egif2_type(hf2, asid, IO_ProvCust{})) || + (dp.inif2_type(hf1, asid, IO_ProvCust{}) && dp.egif2_type(hf2, asid, IO_PeerOrCore{})) || + (dp.inif2_type(hf1, asid, IO_PeerOrCore{}) && dp.egif2_type(hf2, asid, IO_ProvCust{})) +} + +ghost +decreases +pure func swap_if_dir2(hf IO_HF, d bool) IO_HF { + return IO_HF(IO_HF_{ + InIF2: d ? hf.InIF2 : hf.EgIF2, + EgIF2: d ? hf.EgIF2 : hf.InIF2, + HVF: hf.HVF, + }) } ghost requires dp.Valid() requires asid == dp.Asid() decreases -pure func (dp DataPlaneSpec) xover_core2_link_type(hf1 IO_HF, hf2 IO_HF, asid IO_as, d bool) bool { - return (!d && dp.egif2_type(hf1, asid, IO_ProvCust{}) && dp.inif2_type(hf2, asid, IO_PeerOrCore{})) || - (d && dp.inif2_type(hf1, asid, IO_PeerOrCore{}) && dp.egif2_type(hf2, asid, IO_ProvCust{})) +pure func (dp DataPlaneSpec) xover_up2down2_link_type_dir(asid IO_as, d1 bool, hf1 IO_HF, d2 bool, hf2 IO_HF) bool { + return dp.xover_up2down2_link_type(asid, swap_if_dir2(hf1, d1), swap_if_dir2(hf2, d2)) } diff --git a/verification/utils/bitwise/bitwise-eqs.gobra b/verification/utils/bitwise/bitwise-eqs.gobra index 605e754b5..2b8b5b18b 100644 --- a/verification/utils/bitwise/bitwise-eqs.gobra +++ b/verification/utils/bitwise/bitwise-eqs.gobra @@ -52,4 +52,19 @@ ghost ensures res == b & 0x3F ensures 0 <= res && res < 64 decreases -pure func And3fAtMost64(b uint8) (res uint8) \ No newline at end of file +pure func And3fAtMost64(b uint8) (res uint8) + +ghost +ensures 0 | 1 == 1 +ensures 0 | 2 == 2 +ensures 1 | 2 == 3 +ensures 0 & 1 == 0 +ensures 0 & 2 == 0 +ensures 1 & 1 == 1 +ensures 1 & 2 == 0 +ensures 2 & 1 == 0 +ensures 2 & 2 == 2 +ensures 3 & 1 == 1 +ensures 3 & 2 == 2 +decreases +pure func InfoFieldFirstByteSerializationLemmas() bool diff --git a/verification/utils/bitwise/proofs.dfy b/verification/utils/bitwise/proofs.dfy index d788a165f..9d38da8ce 100644 --- a/verification/utils/bitwise/proofs.dfy +++ b/verification/utils/bitwise/proofs.dfy @@ -101,3 +101,65 @@ lemma SerializeAndDeserializeLemma(m: MetaHdr, b0: bv8, b1: bv8, b2: bv8, b3: bv ensures var line := SerializedToLine(m); PutUint32Spec(b0, b1, b2, b3, line) ==> (DecodedFrom(Uint32Spec(b0, b1, b2, b3)) == m) {} + +lemma SerializeAndDeserializeMetaHdrLemma(m: MetaHdr) + requires InBounds(m) + ensures DecodedFrom(SerializedToLine(m)) == m +{} + +lemma InfoFieldFirstByteSerializationLemmas() + // or + ensures 0 as bv8 | 1 == 1 + ensures 0 as bv8 | 2 == 2 + ensures 1 as bv8 | 2 == 3 + // and + ensures 0 as bv8 & 1 == 0 + ensures 0 as bv8 & 2 == 0 + ensures 1 as bv8 & 1 == 1 + ensures 1 as bv8 & 2 == 0 + ensures 2 as bv8 & 1 == 0 + ensures 2 as bv8 & 2 == 2 + ensures 3 as bv8 & 1 == 1 + ensures 3 as bv8 & 2 == 2 +{} + + +// Functional specs for encoding/binary (BigEndian) +function FUint16Spec(b0: bv8, b1: bv8): bv16 { + (b1 as bv16) | ((b0 as bv16) << 8) +} + +function FPutUint16Spec(v: bv16): (bv8, bv8) { + ((v >> 8) as bv8, (v & 0xFF) as bv8) +} + +lemma FUint16AfterFPutUint16(v: bv16) + ensures var (b0, b1) := FPutUint16Spec(v); + FUint16Spec(b0, b1) == v +{} + +lemma FPutUint16AfterFUint16(b0: bv8, b1: bv8) + ensures var v := FUint16Spec(b0, b1); + FPutUint16Spec(v) == (b0, b1) +{} + +function FUint32Spec(b0: bv8, b1: bv8, b2: bv8, b3: bv8): bv32 { + (b3 as bv32) | ((b2 as bv32) << 8) | ((b1 as bv32) << 16) | ((b0 as bv32) << 24) +} + +function FPutUint32Spec(v: bv32): (bv8, bv8, bv8, bv8) { + (((v >> 24) & 0xFF) as bv8, + ((v >> 16) & 0xFF) as bv8, + ((v >> 8) & 0xFF) as bv8, + (v & 0xFF) as bv8) +} + +lemma FUint32AfterFPutUint32(v: bv32) + ensures var (b0, b1, b2, b3) := FPutUint32Spec(v); + FUint32Spec(b0, b1, b2, b3) == v +{} + +lemma FPutUint32AfterFUint32(b0: bv8, b1: bv8, b2: bv8, b3: bv8) + ensures var v := FUint32Spec(b0, b1, b2, b3); + FPutUint32Spec(v) == (b0, b1, b2, b3) +{} \ No newline at end of file diff --git a/verification/utils/definitions/definitions.gobra b/verification/utils/definitions/definitions.gobra index 19ae4d2a7..a9ac46f6b 100644 --- a/verification/utils/definitions/definitions.gobra +++ b/verification/utils/definitions/definitions.gobra @@ -75,6 +75,7 @@ const ( R53 R54 R55 + R56 ) // To be used as a precondition of functions and methods that can never be called @@ -119,4 +120,17 @@ requires b decreases func Asserting(ghost b bool) bool { return true -} \ No newline at end of file +} + +type Lemma struct{} + +// Assumption for IO-Specification +ghost +ensures b +decreases +func AssumeForIO(b bool) + +ghost +ensures b +decreases +func TemporaryAssumeForIO(b bool) From a8ff113766f0c4c65fa1b56c5f5b1fc4dfffab6f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Wed, 10 Apr 2024 12:04:39 +0200 Subject: [PATCH 15/57] Add checks for termination modulo blocking (#309) * add termination checking if we ignore locking * add termination checks modulo locking * backup * fix termination measure * fix verification errors * fix verification error --- router/dataplane.go | 25 +++++++++++++++++++++- router/svc.go | 5 +++-- verification/dependencies/sync/mutex.gobra | 9 ++++++-- 3 files changed, 34 insertions(+), 5 deletions(-) diff --git a/router/dataplane.go b/router/dataplane.go index 167576808..4c9391ac5 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -104,8 +104,10 @@ type bfdSession interface { // (VerifiedSCION) an implementation must copy the fields it needs from msg // @ preserves sl.AbsSlice_Bytes(ub, 0, len(ub)) // @ ensures msg.NonInitMem() + // @ decreases 0 if sync.IgnoreBlockingForTermination() ReceiveMessage(msg *layers.BFD /*@ , ghost ub []byte @*/) // @ requires acc(Mem(), _) + // @ decreases 0 if sync.IgnoreBlockingForTermination() IsUp() bool } @@ -235,6 +237,7 @@ func (e scmpError) Error() string { // @ ensures acc(d.Mem(), OutMutexPerm) // @ ensures !d.IsRunning() // @ ensures e == nil +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (d *DataPlane) SetIA(ia addr.IA) (e error) { d.mtx.Lock() defer d.mtx.Unlock() @@ -272,6 +275,7 @@ func (d *DataPlane) SetIA(ia addr.IA) (e error) { // @ ensures acc(d.Mem(), OutMutexPerm) // @ ensures !d.IsRunning() // @ ensures res == nil ==> d.KeyIsSet() +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (d *DataPlane) SetKey(key []byte) (res error) { // @ share key d.mtx.Lock() @@ -331,6 +335,7 @@ func (d *DataPlane) SetKey(key []byte) (res error) { // @ preserves d.mtx.LockInv() == MutexInvariant!; // @ ensures acc(d.Mem(), OutMutexPerm) // @ ensures !d.IsRunning() +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (d *DataPlane) AddInternalInterface(conn BatchConn, ip net.IP) error { d.mtx.Lock() defer d.mtx.Unlock() @@ -367,6 +372,7 @@ func (d *DataPlane) AddInternalInterface(conn BatchConn, ip net.IP) error { // @ preserves !d.IsRunning() // @ preserves d.mtx.LockP() // @ preserves d.mtx.LockInv() == MutexInvariant!; +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (d *DataPlane) AddExternalInterface(ifID uint16, conn BatchConn) error { d.mtx.Lock() defer d.mtx.Unlock() @@ -411,6 +417,7 @@ func (d *DataPlane) AddExternalInterface(ifID uint16, conn BatchConn) error { // @ preserves !d.IsRunning() // @ preserves d.mtx.LockP() // @ preserves d.mtx.LockInv() == MutexInvariant!; +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (d *DataPlane) AddNeighborIA(ifID uint16, remote addr.IA) error { d.mtx.Lock() defer d.mtx.Unlock() @@ -448,6 +455,7 @@ func (d *DataPlane) AddNeighborIA(ifID uint16, remote addr.IA) error { // (VerifiedSCION) unlike all other setter methods, this does not lock d.mtx. // This was reported in https://github.com/scionproto/scion/issues/4282. // @ preserves MutexInvariant!() +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (d *DataPlane) AddLinkType(ifID uint16, linkTo topology.LinkType) error { // @ unfold acc(d.Mem(), OutMutexPerm) if _, existsB := d.linkTypes[ifID]; existsB { @@ -504,6 +512,7 @@ func (d *DataPlane) AddExternalInterfaceBFD(ifID uint16, conn BatchConn, // returns InterfaceUp if the relevant bfdsession state is up, or if there is no BFD // session. Otherwise, it returns InterfaceDown. // @ preserves acc(d.Mem(), R5) +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (d *DataPlane) getInterfaceState(interfaceID uint16) control.InterfaceState { // @ unfold acc(d.Mem(), R5) // @ defer fold acc(d.Mem(), R5) @@ -564,6 +573,7 @@ func (d *DataPlane) addBFDController(ifID uint16, s *bfdSend, cfg control.BFD, // @ preserves !d.IsRunning() // @ preserves d.mtx.LockP() // @ preserves d.mtx.LockInv() == MutexInvariant!; +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (d *DataPlane) AddSvc(svc addr.HostSVC, a *net.UDPAddr) error { d.mtx.Lock() // @ unfold MutexInvariant!() @@ -616,6 +626,7 @@ func (d *DataPlane) AddSvc(svc addr.HostSVC, a *net.UDPAddr) error { // @ requires a != nil && acc(a.Mem(), R10) // @ preserves acc(d.Mem(), OutMutexPerm/2) // @ preserves d.mtx.LockP() +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (d *DataPlane) DelSvc(svc addr.HostSVC, a *net.UDPAddr) error { d.mtx.Lock() defer d.mtx.Unlock() @@ -646,6 +657,7 @@ func (d *DataPlane) DelSvc(svc addr.HostSVC, a *net.UDPAddr) error { // @ preserves !d.IsRunning() // @ preserves d.mtx.LockP() // @ preserves d.mtx.LockInv() == MutexInvariant!; +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (d *DataPlane) AddNextHop(ifID uint16, a *net.UDPAddr) error { d.mtx.Lock() defer d.mtx.Unlock() @@ -915,11 +927,13 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ invariant forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> // @ msgs[i].GetN() <= len(msgs[i].GetFstBuffer()) // @ invariant processor.sInit() && processor.sInitD() === d + // @ decreases pkts - i0 for i0 := 0; i0 < pkts; i0++ { // @ assert &msgs[:pkts][i0] == &msgs[i0] // @ preserves 0 <= i0 && i0 < pkts && pkts <= len(msgs) // @ preserves acc(msgs[i0].Mem(), R1) // @ ensures p === msgs[:pkts][i0].GetMessage() + // @ decreases // @ outline( // @ unfold acc(msgs[i0].Mem(), R1) p := msgs[:pkts][i0] @@ -936,7 +950,6 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ prometheus.CounterMemImpliesNonNil(inputCounters.InputBytesTotal) inputCounters.InputPacketsTotal.Inc() // @ assert msgs[i0].GetN() == p.N - // (VerifiedSCION) Gobra still does not fully support floats // @ fl.CastPreservesOrder64(0, p.N) inputCounters.InputBytesTotal.Add(float64(p.N)) @@ -1353,6 +1366,7 @@ func (p *scionPacketProcessor) reset() (err error) { // @ ensures respr.OutPkt !== rawPkt && respr.OutPkt != nil ==> // @ sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (p *scionPacketProcessor) processPkt(rawPkt []byte, srcAddr *net.UDPAddr) (respr processResult, reserr error /*@ , addrAliasesPkt bool @*/) { @@ -1499,6 +1513,7 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, // @ ensures acc(&p.ingressID, R20) // @ ensures p.bfdLayer.NonInitMem() // @ ensures err != nil ==> err.ErrorMem() +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (p *scionPacketProcessor) processInterBFD(oh *onehop.Path, data []byte) (err error) { // @ unfold acc(p.d.Mem(), _) // @ ghost if p.d.bfdSessions != nil { unfold acc(accBfdSession(p.d.bfdSessions), _) } @@ -1534,6 +1549,7 @@ func (p *scionPacketProcessor) processInterBFD(oh *onehop.Path, data []byte) (er // @ ensures p.bfdLayer.NonInitMem() // @ ensures sl.AbsSlice_Bytes(data, 0, len(data)) // @ ensures res != nil ==> res.ErrorMem() +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (p *scionPacketProcessor) processIntraBFD(data []byte) (res error) { // @ unfold acc(p.d.Mem(), _) // @ ghost if p.d.bfdSessions != nil { unfold acc(accBfdSession(p.d.bfdSessions), _) } @@ -1563,6 +1579,7 @@ func (p *scionPacketProcessor) processIntraBFD(data []byte) (res error) { // @ invariant m != nil ==> acc(m, R20) // @ invariant m != nil ==> forall a *net.UDPAddr :: { a in range(m) } a in range(m) ==> acc(a.Mem(), _) // @ invariant acc(&p.srcAddr, R20) && acc(p.srcAddr.Mem(), _) + // @ decreases len(p.d.internalNextHops) - len(keys) for k, v := range p.d.internalNextHops /*@ with keys @*/ { // @ assert acc(&p.d.internalNextHops, _) // @ assert forall a *net.UDPAddr :: { a in range(m) } a in range(m) ==> acc(a.Mem(), _) @@ -1629,6 +1646,7 @@ func (p *scionPacketProcessor) processIntraBFD(data []byte) (res error) { // @ ensures respr.OutPkt !== ub && respr.OutPkt != nil ==> // @ sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (p *scionPacketProcessor) processSCION( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int @*/ ) (respr processResult, reserr error /*@ , addrAliasesPkt bool @*/) { var ok bool @@ -2242,6 +2260,7 @@ func (p *scionPacketProcessor) verifyCurrentMAC() (respr processResult, reserr e // @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> !addrAliasesUb // @ ensures reserr != nil ==> reserr.ErrorMem() +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (p *scionPacketProcessor) resolveInbound( /*@ ghost ubScionL []byte @*/ ) (resaddr *net.UDPAddr, respr processResult, reserr error /*@ , addrAliasesUb bool @*/) { // (VerifiedSCION) the parameter used to be p.scionLayer, // instead of &p.scionLayer. @@ -2403,6 +2422,7 @@ func (p *scionPacketProcessor) egressInterface() uint16 { // @ ensures respr.OutPkt != nil ==> // @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (p *scionPacketProcessor) validateEgressUp() (respr processResult, reserr error) { egressID := p.egressInterface() // @ p.d.getBfdSessionsMem() @@ -2715,6 +2735,7 @@ func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte @*/ ) ( // @ ensures reserr == nil ==> p.scionLayer.Mem(ub) // @ ensures reserr != nil ==> p.scionLayer.NonInitMem() // @ ensures reserr != nil ==> reserr.ErrorMem() +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int @*/ ) (respr processResult, reserr error /*@, addrAliasesPkt bool @*/) { if r, err := p.parsePath( /*@ ub @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) @@ -2867,6 +2888,7 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, // @ ensures respr.OutPkt !== p.rawPkt && respr.OutPkt != nil ==> // @ sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error /*@ , addrAliasesPkt bool @*/) { // @ ghost ubScionL := p.rawPkt // @ p.scionLayer.ExtractAcc(ubScionL) @@ -3063,6 +3085,7 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // (VerifiedSCION) the type of 's' was changed from slayers.SCION to *slayers.SCION. This makes // specs a lot easier and, makes the implementation faster as well by avoiding passing large data-structures // by value. We should consider porting merging this in upstream SCION. +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (d *DataPlane) resolveLocalDst(s *slayers.SCION /*@, ghost ub []byte @*/) (resaddr *net.UDPAddr, reserr error /*@ , addrAliasesUb bool @*/) { // @ ghost start, end := s.ExtractAcc(ub) // @ assert s.RawDstAddr === ub[start:end] diff --git a/router/svc.go b/router/svc.go index 2f84ec164..8c8fd62bc 100644 --- a/router/svc.go +++ b/router/svc.go @@ -42,6 +42,7 @@ func newServices() (s *services) { // @ preserves acc(s.Mem(), R50) // @ requires acc(a.Mem(), R10) +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (s *services) AddSvc(svc addr.HostSVC, a *net.UDPAddr) { //@ unfold acc(s.Mem(), R50) s.mtx.Lock() @@ -69,6 +70,7 @@ func (s *services) AddSvc(svc addr.HostSVC, a *net.UDPAddr) { // @ preserves acc(s.Mem(), R50) // @ preserves acc(a.Mem(), R10) +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (s *services) DelSvc(svc addr.HostSVC, a *net.UDPAddr) { //@ unfold acc(s.Mem(), R50) s.mtx.Lock() @@ -102,6 +104,7 @@ func (s *services) DelSvc(svc addr.HostSVC, a *net.UDPAddr) { // @ requires acc(s.Mem(), _) // @ ensures !b ==> r == nil // @ ensures b ==> acc(r.Mem(), _) +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (s *services) Any(svc addr.HostSVC) (r *net.UDPAddr, b bool) { //@ unfold acc(s.Mem(), _) s.mtx.Lock() @@ -130,8 +133,6 @@ func (s *services) Any(svc addr.HostSVC) (r *net.UDPAddr, b bool) { // @ ensures b ==> 0 < len(addrs) // @ ensures b ==> 0 <= res && res < len(addrs) // @ ensures !b ==> res == -1 -// We could ensure stronger postconditions for this method, -// but it is unclear right now if we need them. // @ decreases func (s *services) index(a *net.UDPAddr, addrs []*net.UDPAddr /*@ , ghost k addr.HostSVC @*/) (res int, b bool) { // @ unfold acc(validMapValue(k, addrs), R11) diff --git a/verification/dependencies/sync/mutex.gobra b/verification/dependencies/sync/mutex.gobra index feb75100e..7b18c6566 100644 --- a/verification/dependencies/sync/mutex.gobra +++ b/verification/dependencies/sync/mutex.gobra @@ -25,11 +25,16 @@ ensures m.LockP() && m.LockInv() == inv decreases func (m *Mutex) SetInv(ghost inv pred()) +ghost +decreases _ +pure func IgnoreBlockingForTermination() bool + requires acc(m.LockP(), _) -ensures m.LockP() && m.UnlockP() && m.LockInv()() +ensures m.LockP() && m.UnlockP() && m.LockInv()() +decreases _ if IgnoreBlockingForTermination() func (m *Mutex) Lock() requires acc(m.LockP(), _) && m.UnlockP() && m.LockInv()() ensures m.LockP() -decreases +decreases _ func (m *Mutex) Unlock() From 45d3639354bb89e924b83aa38c9ee78d2f5bf363 Mon Sep 17 00:00:00 2001 From: Dionysios Spiliopoulos <32896454+Dspil@users.noreply.github.com> Date: Fri, 12 Apr 2024 11:38:58 +0200 Subject: [PATCH 16/57] Verify the IO behavior (a.k.a., basis PR) (#248) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * manually trigger workflow * manually trigger workflow * raw byte to spec for segments and hopfields * bugfix * import fix * bugfix after gobra update * spec to pkt (currSeg) * spec to pkt (left, mid, right) * spec to pkt (termination) * code clean up * clean up * improvements * instantiate abstract functions with bodies * progress io spec * formatting * specification fixes * IO-spec to pkt rewritten * clean up * improve readability * rename of function lengthOfCurrSeg * extract asid-seqence from raw pkt * missing trigger * quick fix * Update router/dataplane_spec.gobra Co-authored-by: Dionysios Spiliopoulos <32896454+Dspil@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: João Pereira * readability improvements * further improvements * replace 4 by its constant InfoLen * readability improvement * constant for metaLen in package path * Update router/io-spec.gobra Co-authored-by: João Pereira * minor improvements * move validMetaLenInPath() to test file * io-spec in Run * add body to absIO_val * fix merge mistake * fix merge mistake * fix typo * io-spec skeleton for processPkt and processSCION * import fix * Update router/io-spec.gobra * unit * well formdness * relate dataplane with dataplaneSpec * various fixes * Update verification/io/values.gobra * permission fix for dpSpecWellConfigured * permission fix in rc * fix verification error * dp.Valid() as opaque * backup * format spacing * improve perf; drop assumption * fix formatting * Update router/dataplane.go * formatting postconditions of processPkt, processSCION * fix extra permission * typo * processSCION had the same issue * ingressID is preseved intead of sInit * Revert "ingressID is preseved intead of sInit" This reverts commit 88db3fdf804c7dbd9252b5db8b485287b7ca0905. * Revert "processSCION had the same issue" This reverts commit 71aadfe5e248c5640c816988f124b31841333d09. * Updated IO-Spec-Function to Correlate Bytes with Terms (#262) * fixes in the asid extraction functions * pre-/postconditions for process * fix formatting * fix same issue in processSCION * fix var names * precondition changes in hopfield and asidFromIfs * prostcondition fix in process and processSCION * update imports links * Apply suggestions from code review --------- Co-authored-by: Dspil Co-authored-by: João Pereira * made absIO_val opaque * use artificial triggers for quantifiers inside IO resources (#280) * AbsPkt improvements (#270) * absPkt opaque and other improvements * quick fixes * changed permission from R55 to R56 * added missing permission amount in ReadBatch * fixed pre/postconditions of processPkt, processSCION and process * fixed opaque format --------- Co-authored-by: João Pereira * Verify send guard in Run (#263) * remove send operation * lemma for smaller buffer result in same abstract pkt * progress send guard * progress send guard * Fix incompleteness and continue with send guard (#273) * backup * backup * backup * backup * backup * drop space * pick better triggers * add necessary lemma and call to it; contains an assume false that needs to be dropped * backup * backup * add missing loop invariant about ingressID * backup * backup * fix verification error * try out a simpler trigger * widen lemma for absIO_val (#268) * widen lemma for abspkt (non termianting) * abspkt proven * renamed io-spec-lemmas * io val also proven * cleanup * merged markus' abspkt improvements * consdir lemma * proved * reinstate lemma4 * fix verification error * Simplify widen lemma from #268 (#282) * start simplifying * continue simplifying * continue simplifying stuff * continue simplifying * continue simplifying * simplify further * finish for now * Update router/io-spec.gobra --------- Co-authored-by: João Pereira * Continue send (#283) * widen lemma for abspkt (non termianting) * abspkt proven * renamed io-spec-lemmas * io val also proven * cleanup * merged markus' abspkt improvements * consdir lemma * proved * reinstate lemma4 * fix verification error * Simplify widen lemma from #268 (#282) * start simplifying * continue simplifying * continue simplifying stuff * continue simplifying * continue simplifying * simplify further * finish for now * Update router/io-spec.gobra * finish send in Run * propagate changes to processSCION --------- Co-authored-by: Dspil * backup * adapt to the new syntax of backend annotations * clean-up * changes according to feedback --------- Co-authored-by: João Pereira Co-authored-by: Dionysios Spiliopoulos <32896454+Dspil@users.noreply.github.com> Co-authored-by: Dspil * IO specification skeleton in process (#284) * absPkt opaque and other improvements * tests for local enter guard * new approach for absPkt * tests with GetIngressIDNotZero() * fix verification error * progress io-skeleton in process * progress Xover * progress io-spec skeleton in process * removed dulicate of lemma * fix verification error * removed old concurrency test * refactored absPkt * continue refactoring of absPkt * fixed postcondition in process * progress lemmas for io-spec * addressed feedback * progress in updateNonConsDirIngressSegID * fix verification errors * Prove IO lemmas in `path/scion` (#290) * try to prove lemma * backup * fix incompletness via additional lemma * fix verification error * fix verification errors and clean up * fix verification errors introduced in the latest changes to the PR * fix consistency error * add lemmas for updateNonConsDirIngressSegID() * Change to EQAbsHeader (#293) * changed EQAbsHeader * readbility improvements * progress in handleRouterAlerts methods * Fix verification errors in dependencies (#291) * backup * backup * backup * simplify Decoded.Reverse * clean-up * add section header * drop comment * fix verification errors in processEgress and DoXover addressing feedback clean up --------- Co-authored-by: João Pereira * Add functional spec to `InfoField.SerializeTo` (#300) * absPkt opaque and other improvements * tests for local enter guard * new approach for absPkt * tests with GetIngressIDNotZero() * fix verification error * progress io-skeleton in process * progress Xover * progress io-spec skeleton in process * removed dulicate of lemma * fix verification error * removed old concurrency test * refactored absPkt * continue refactoring of absPkt * fixed postcondition in process * progress lemmas for io-spec * addressed feedback * progress in updateNonConsDirIngressSegID * fix verification errors * Prove IO lemmas in `path/scion` (#290) * try to prove lemma * backup * fix incompletness via additional lemma * fix verification error * fix verification errors and clean up * fix verification errors introduced in the latest changes to the PR * fix consistency error * add lemmas for updateNonConsDirIngressSegID() * backup * Change to EQAbsHeader (#293) * changed EQAbsHeader * readbility improvements * backup * backup * simplify Decoded.Reverse * progress in handleRouterAlerts methods * clean-up * add section header * drop comment * Fix verification errors in dependencies (#291) * backup * backup * backup * simplify Decoded.Reverse * clean-up * add section header * drop comment * backup * backup * fix verification errors in processEgress and DoXover addressing feedback clean up * backup * drop one assume * readability improvements * backup * backup * simplify proof * adapt lemmas * verify spec for SerializeTo of infofield * Missing Postcondition in Process (#301) * absPkt opaque and other improvements * tests for local enter guard * new approach for absPkt * tests with GetIngressIDNotZero() * fix verification error * progress io-skeleton in process * progress Xover * progress io-spec skeleton in process * removed dulicate of lemma * fix verification error * removed old concurrency test * refactored absPkt * continue refactoring of absPkt * fixed postcondition in process * progress lemmas for io-spec * addressed feedback * progress in updateNonConsDirIngressSegID * fix verification errors * Prove IO lemmas in `path/scion` (#290) * try to prove lemma * backup * fix incompletness via additional lemma * fix verification error * fix verification errors and clean up * fix verification errors introduced in the latest changes to the PR * fix consistency error * add lemmas for updateNonConsDirIngressSegID() * Change to EQAbsHeader (#293) * changed EQAbsHeader * readbility improvements * Revert "Update gobra.yml to disableNL (#289)" This reverts commit 1e608307ba609fff78c3304f58accd996a4b1dbc. * progress in handleRouterAlerts methods * Fix verification errors in dependencies (#291) * backup * backup * backup * simplify Decoded.Reverse * clean-up * add section header * drop comment * fix verification errors in processEgress and DoXover addressing feedback clean up * fix verification error * changed postcondition in process * fix verification error * fix verification error * Update gobra.yml * added postcondition to packSCMP --------- Co-authored-by: João Pereira * Drop unnecessary function `hopFieldsNotConsDir` (#303) * reverse hopFieldsNotConsDir once * remove hopfieldsNotConsDir * hopFieldsConsDir => hopFields --------- Co-authored-by: João Pereira * Update IO-spec to drop the `xover_core` event (#302) * progress updating the IO-spec * finish updating new IO-spec * Fix precondition of `processSCION` (#307) * start fixing pres of processSCION * backup * backup * Drop unnecessary assertions * tiny fmt * streamline msgterm assumptions (#308) * improve verification time of processPkt * IO-spec update for link check logic (#310) * io-spec update * proof of link logic * fix verification errors * drop assumption in validateSrcDstIA() * fix verification error * Update pkg/slayers/path/scion/raw.go * Pre/Post conditions of processPkt (#312) * progress with pre and post conditions for io-spec in processPkt * fix verification error * changes in process * additional temporary assumptions in process() * cleanup --------- Co-authored-by: João Pereira * fmt --------- Co-authored-by: MLETHZ Co-authored-by: Markus Limbeck <92801626+mlimbeck@users.noreply.github.com> Co-authored-by: João Pereira --- pkg/slayers/path/hopfield_spec.gobra | 51 +- pkg/slayers/path/infofield.go | 25 +- pkg/slayers/path/infofield_spec.gobra | 127 +++ pkg/slayers/path/io_msgterm_spec.gobra | 46 + pkg/slayers/path/scion/base.go | 22 +- pkg/slayers/path/scion/base_spec.gobra | 220 ++++- pkg/slayers/path/scion/decoded.go | 37 +- pkg/slayers/path/scion/raw.go | 122 ++- pkg/slayers/path/scion/raw_spec.gobra | 428 ++++++++-- pkg/slayers/scion.go | 8 + pkg/slayers/scion_spec.gobra | 119 ++- router/dataplane.go | 936 ++++++++++++++++----- router/io-spec-abstract-transitions.gobra | 235 ++++++ router/io-spec-atomic-events.gobra | 160 ++++ router/io-spec-non-proven-lemmas.gobra | 261 ++++++ router/io-spec.gobra | 610 ++------------ router/widen-lemma.gobra | 972 ++++++++++++++++++++++ verification/io/router.gobra | 3 +- verification/io/router_events.gobra | 1 + verification/io/xover.gobra | 2 + 20 files changed, 3503 insertions(+), 882 deletions(-) create mode 100644 pkg/slayers/path/infofield_spec.gobra create mode 100644 pkg/slayers/path/io_msgterm_spec.gobra create mode 100644 router/io-spec-abstract-transitions.gobra create mode 100644 router/io-spec-atomic-events.gobra create mode 100644 router/io-spec-non-proven-lemmas.gobra create mode 100644 router/widen-lemma.gobra diff --git a/pkg/slayers/path/hopfield_spec.gobra b/pkg/slayers/path/hopfield_spec.gobra index 53a84309e..e93e22f39 100644 --- a/pkg/slayers/path/hopfield_spec.gobra +++ b/pkg/slayers/path/hopfield_spec.gobra @@ -16,32 +16,51 @@ package path -ghost const MetaLen = 4 +import ( + "verification/io" + "verification/utils/slices" + "verification/dependencies/encoding/binary" + . "verification/utils/definitions" +) pred (h *HopField) Mem() { acc(h) && h.ConsIngress >= 0 && h.ConsEgress >= 0 } -ghost + +ghost decreases -pure func InfoFieldOffset(currINF int) int { - return MetaLen + InfoLen * currINF +pure func ifsToIO_ifs(ifs uint16) option[io.IO_ifs]{ + return ifs == 0 ? none[io.IO_ifs] : some(io.IO_ifs(ifs)) } -ghost -requires 0 <= currINF -requires InfoFieldOffset(currINF) < len(raw) -requires acc(&raw[InfoFieldOffset(currINF)], _) +ghost +requires 0 <= start && start <= middle +requires middle + HopLen <= end && end <= len(raw) +requires acc(slices.AbsSlice_Bytes(raw, start, end), _) decreases -pure func ConsDir(raw []byte, currINF int) bool { - return raw[InfoFieldOffset(currINF)] & 0x1 == 0x1 +pure func BytesToIO_HF(raw [] byte, start int, middle int, end int) (io.IO_HF) { + return let _ := Asserting(forall k int :: {&raw[middle+2:middle+4][k]} 0 <= k && k < 2 ==> &raw[middle+2:middle+4][k] == &raw[middle + 2 + k]) in + let _ := Asserting(forall k int :: {&raw[middle+4:middle+6][k]} 0 <= k && k < 4 ==> &raw[middle+4:middle+6][k] == &raw[middle + 4 + k]) in + let _ := Asserting(forall k int :: {&raw[middle+6:middle+6+MacLen][k]} 0 <= k && k < MacLen ==> &raw[middle+6:middle+6+MacLen][k] == &raw[middle + 6 + k]) in + unfolding acc(slices.AbsSlice_Bytes(raw, start, end), _) in + let inif2 := binary.BigEndian.Uint16(raw[middle+2:middle+4]) in + let egif2 := binary.BigEndian.Uint16(raw[middle+4:middle+6]) in + let op_inif2 := ifsToIO_ifs(inif2) in + let op_egif2 := ifsToIO_ifs(egif2) in + io.IO_HF(io.IO_HF_{ + InIF2 : op_inif2, + EgIF2 : op_egif2, + HVF : AbsMac(FromSliceToMacArray(raw[middle+6:middle+6+MacLen])), + }) } -ghost -requires 0 <= currINF -requires InfoFieldOffset(currINF) < len(raw) -requires acc(&raw[InfoFieldOffset(currINF)], _) +ghost decreases -pure func Peer(raw []byte, currINF int) bool { - return raw[InfoFieldOffset(currINF)] & 0x2 == 0x2 +pure func (h HopField) ToIO_HF() (io.IO_HF) { + return io.IO_HF(io.IO_HF_{ + InIF2 : ifsToIO_ifs(h.ConsIngress), + EgIF2 : ifsToIO_ifs(h.ConsEgress), + HVF : AbsMac(h.Mac), + }) } diff --git a/pkg/slayers/path/infofield.go b/pkg/slayers/path/infofield.go index 0e1a9442c..b30edb937 100644 --- a/pkg/slayers/path/infofield.go +++ b/pkg/slayers/path/infofield.go @@ -22,8 +22,10 @@ import ( "github.com/scionproto/scion/pkg/private/serrors" "github.com/scionproto/scion/pkg/private/util" + //@ bits "github.com/scionproto/scion/verification/utils/bitwise" //@ . "github.com/scionproto/scion/verification/utils/definitions" //@ "github.com/scionproto/scion/verification/utils/slices" + //@ "verification/io" ) // InfoLen is the size of an InfoField in bytes. @@ -85,26 +87,43 @@ func (inf *InfoField) DecodeFromBytes(raw []byte) (err error) { // @ preserves acc(inf, R10) // @ preserves slices.AbsSlice_Bytes(b, 0, InfoLen) // @ ensures err == nil +// @ ensures inf.ToIntermediateAbsInfoField() == +// @ BytesToIntermediateAbsInfoField(b, 0, 0, InfoLen) // @ decreases func (inf *InfoField) SerializeTo(b []byte) (err error) { if len(b) < InfoLen { return serrors.New("buffer for InfoField too short", "expected", InfoLen, "actual", len(b)) } + //@ ghost targetAbsInfo := inf.ToIntermediateAbsInfoField() //@ unfold slices.AbsSlice_Bytes(b, 0, InfoLen) b[0] = 0 if inf.ConsDir { b[0] |= 0x1 } + //@ ghost tmpInfo1 := BytesToIntermediateAbsInfoFieldHelper(b, 0, InfoLen) + //@ bits.InfoFieldFirstByteSerializationLemmas() + //@ assert tmpInfo1.ConsDir == targetAbsInfo.ConsDir + //@ ghost firstByte := b[0] if inf.Peer { b[0] |= 0x2 } + //@ tmpInfo2 := BytesToIntermediateAbsInfoFieldHelper(b, 0, InfoLen) + //@ assert tmpInfo2.Peer == (b[0] & 0x2 == 0x2) + //@ assert tmpInfo2.ConsDir == (b[0] & 0x1 == 0x1) + //@ assert tmpInfo2.Peer == targetAbsInfo.Peer + //@ assert tmpInfo2.ConsDir == tmpInfo1.ConsDir + //@ assert tmpInfo2.ConsDir == targetAbsInfo.ConsDir b[1] = 0 // reserved //@ assert &b[2:4][0] == &b[2] && &b[2:4][1] == &b[3] binary.BigEndian.PutUint16(b[2:4], inf.SegID) + //@ ghost tmpInfo3 := BytesToIntermediateAbsInfoFieldHelper(b, 0, InfoLen) + //@ assert tmpInfo3.UInfo == targetAbsInfo.UInfo //@ assert &b[4:8][0] == &b[4] && &b[4:8][1] == &b[5] //@ assert &b[4:8][2] == &b[6] && &b[4:8][3] == &b[7] binary.BigEndian.PutUint32(b[4:8], inf.Timestamp) + //@ ghost tmpInfo4 := BytesToIntermediateAbsInfoFieldHelper(b, 0, InfoLen) + //@ assert tmpInfo4.AInfo == targetAbsInfo.AInfo //@ fold slices.AbsSlice_Bytes(b, 0, InfoLen) return nil } @@ -112,11 +131,15 @@ func (inf *InfoField) SerializeTo(b []byte) (err error) { // UpdateSegID updates the SegID field by XORing the SegID field with the 2 // first bytes of the MAC. It is the beta calculation according to // https://docs.scion.org/en/latest/protocols/scion-header.html#hop-field-mac-computation +// @ requires hf.HVF == AbsMac(hfMac) // @ preserves acc(&inf.SegID) +// @ ensures AbsUInfoFromUint16(inf.SegID) == +// @ old(io.upd_uinfo(AbsUInfoFromUint16(inf.SegID), hf)) // @ decreases -func (inf *InfoField) UpdateSegID(hfMac [MacLen]byte) { +func (inf *InfoField) UpdateSegID(hfMac [MacLen]byte /* @, ghost hf io.IO_HF @ */) { //@ share hfMac inf.SegID = inf.SegID ^ binary.BigEndian.Uint16(hfMac[:2]) + // @ AssumeForIO(AbsUInfoFromUint16(inf.SegID) == old(io.upd_uinfo(AbsUInfoFromUint16(inf.SegID), hf))) } // @ decreases diff --git a/pkg/slayers/path/infofield_spec.gobra b/pkg/slayers/path/infofield_spec.gobra new file mode 100644 index 000000000..b0da954d4 --- /dev/null +++ b/pkg/slayers/path/infofield_spec.gobra @@ -0,0 +1,127 @@ +// Copyright 2022 ETH Zurich +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +gobra + +package path + +import ( + "verification/io" + sl "verification/utils/slices" + "verification/dependencies/encoding/binary" + . "verification/utils/definitions" +) + +ghost const MetaLen = 4 + +ghost +decreases +pure func InfoFieldOffset(currINF, headerOffset int) int { + return headerOffset + MetaLen + InfoLen * currINF +} + +ghost +requires 0 <= currINF && 0 <= headerOffset +requires InfoFieldOffset(currINF, headerOffset) < len(raw) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +decreases +pure func ConsDir(raw []byte, currINF int, headerOffset int) bool { + return unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in + raw[InfoFieldOffset(currINF, headerOffset)] & 0x1 == 0x1 +} + +ghost +requires 0 <= currINF && 0 <= headerOffset +requires InfoFieldOffset(currINF, headerOffset) < len(raw) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +decreases +pure func Peer(raw []byte, currINF int, headerOffset int) bool { + return unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in + raw[InfoFieldOffset(currINF, headerOffset)] & 0x2 == 0x2 +} + +ghost +requires 0 <= currINF && 0 <= headerOffset +requires InfoFieldOffset(currINF, headerOffset) + InfoLen < len(raw) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +decreases +pure func Timestamp(raw []byte, currINF int, headerOffset int) io.IO_ainfo { + return let idx := InfoFieldOffset(currINF, headerOffset) + 4 in + unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in + let _ := Asserting(forall i int :: { &raw[idx+i] } { &raw[idx:idx+4][i] } 0 <= i && i < 4 ==> + &raw[idx+i] == &raw[idx:idx+4][i]) in + io.IO_ainfo(binary.BigEndian.Uint32(raw[idx : idx + 4])) +} + +ghost +requires 0 <= currINF && 0 <= headerOffset +requires InfoFieldOffset(currINF, headerOffset) + InfoLen < len(raw) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +decreases +pure func AbsUinfo(raw []byte, currINF int, headerOffset int) set[io.IO_msgterm] { + return let idx := InfoFieldOffset(currINF, headerOffset) + 2 in + unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in + let _ := Asserting(forall k int :: {&raw[idx:idx+2][k]} 0 <= k && k < 2 ==> + &raw[idx:idx+4][k] == &raw[idx + k]) in + AbsUInfoFromUint16(binary.BigEndian.Uint16(raw[idx:idx+2])) +} + +// This type simplifies the infoField, making it easier +// to use than the IO_seg3 from the IO-spec. +type IntermediateAbsInfoField adt { + IntermediateAbsInfoField_ { + AInfo io.IO_ainfo + UInfo set[io.IO_msgterm] + ConsDir bool + Peer bool + } +} + +ghost +requires 0 <= start && start <= middle +requires middle+InfoLen <= end && end <= len(raw) +requires acc(sl.AbsSlice_Bytes(raw, start, end), _) +decreases +pure func BytesToIntermediateAbsInfoField(raw [] byte, start int, middle int, end int) (IntermediateAbsInfoField) { + return unfolding acc(sl.AbsSlice_Bytes(raw, start, end), _) in + BytesToIntermediateAbsInfoFieldHelper(raw, middle, end) +} + +ghost +requires 0 <= middle +requires middle+InfoLen <= end && end <= len(raw) +requires forall i int :: { &raw[i] } middle <= i && i < end ==> + acc(&raw[i], _) +decreases +pure func BytesToIntermediateAbsInfoFieldHelper(raw [] byte, middle int, end int) (IntermediateAbsInfoField) { + return let _ := Asserting(forall k int :: {&raw[middle+2:middle+4][k]} 0 <= k && k < 2 ==> &raw[middle+2:middle+4][k] == &raw[middle+2 + k]) in + let _ := Asserting(forall k int :: {&raw[middle+4:middle+8][k]} 0 <= k && k < 4 ==> &raw[middle+4:middle+8][k] == &raw[middle+4 + k]) in + IntermediateAbsInfoField(IntermediateAbsInfoField_{ + AInfo : io.IO_ainfo(binary.BigEndian.Uint32(raw[middle+4:middle+8])), + UInfo : AbsUInfoFromUint16(binary.BigEndian.Uint16(raw[middle+2:middle+4])), + ConsDir : raw[middle] & 0x1 == 0x1, + Peer : raw[middle] & 0x2 == 0x2, + }) +} + +ghost +decreases +pure func (inf InfoField) ToIntermediateAbsInfoField() (IntermediateAbsInfoField) { + return IntermediateAbsInfoField(IntermediateAbsInfoField_{ + AInfo : io.IO_ainfo(inf.Timestamp), + UInfo : AbsUInfoFromUint16(inf.SegID), + ConsDir : inf.ConsDir, + Peer : inf.Peer, + }) +} \ No newline at end of file diff --git a/pkg/slayers/path/io_msgterm_spec.gobra b/pkg/slayers/path/io_msgterm_spec.gobra new file mode 100644 index 000000000..41e39093d --- /dev/null +++ b/pkg/slayers/path/io_msgterm_spec.gobra @@ -0,0 +1,46 @@ +// Copyright 2020 Anapaya Systems +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +gobra + +package path + +import "verification/io" + +// At the moment, we assume that all cryptographic operations performed at the code level +// imply the desired properties at the IO spec level because we cannot currently prove in +// Gobra the correctness of these operations. Given that we do not prove any properties +// about this function, we currently do not provide a definition for it. + +ghost +decreases +pure func AbsUInfoFromUint16(SegID uint16) set[io.IO_msgterm] + +ghost +decreases +pure func AbsMac(mac [MacLen]byte) (io.IO_msgterm) + +// The following function converts a slice with at least `MacLen` elements into +// an (exclusive) array containing the mac. Note that there are no permissions +// involved for accessing exclusive arrays. This functions is abstract for now +// because Gobra does not allow for array literals in pure functions, even though +// they are no more side-effectful than creating an instance of a struct type. +// This will soon be fixed in Gobra. +ghost +requires MacLen <= len(mac) +requires forall i int :: { &mac[i] } 0 <= i && i < MacLen ==> acc(&mac[i], _) +ensures len(res) == MacLen +ensures forall i int :: { res[i] } 0 <= i && i < MacLen ==> mac[i] == res[i] +decreases +pure func FromSliceToMacArray(mac []byte) (res [MacLen]byte) diff --git a/pkg/slayers/path/scion/base.go b/pkg/slayers/path/scion/base.go index a41185eab..d74fe0c09 100644 --- a/pkg/slayers/path/scion/base.go +++ b/pkg/slayers/path/scion/base.go @@ -259,18 +259,7 @@ type MetaHdr struct { // @ preserves acc(m) // @ preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R50) // @ ensures (len(raw) >= MetaLen) == (e == nil) -// @ ensures e == nil ==> ( -// @ MetaLen <= len(raw) && -// @ 0 <= m.CurrINF && m.CurrINF <= 3 && -// @ 0 <= m.CurrHF && m.CurrHF < 64 && -// @ m.SegsInBounds() && -// @ let lenR := len(raw) in -// @ let b0 := sl.GetByte(raw, 0, lenR, 0) in -// @ let b1 := sl.GetByte(raw, 0, lenR, 1) in -// @ let b2 := sl.GetByte(raw, 0, lenR, 2) in -// @ let b3 := sl.GetByte(raw, 0, lenR, 3) in -// @ let line := binary.BigEndian.Uint32Spec(b0, b1, b2, b3) in -// @ DecodedFrom(line) == *m) +// @ ensures e == nil ==> m.DecodeFromBytesSpec(raw) // @ ensures e != nil ==> e.ErrorMem() // @ decreases func (m *MetaHdr) DecodeFromBytes(raw []byte) (e error) { @@ -300,16 +289,11 @@ func (m *MetaHdr) DecodeFromBytes(raw []byte) (e error) { // @ preserves acc(m, R50) // @ preserves sl.AbsSlice_Bytes(b, 0, len(b)) // @ ensures e == nil -// @ ensures let lenR := len(b) in -// @ let b0 := sl.GetByte(b, 0, lenR, 0) in -// @ let b1 := sl.GetByte(b, 0, lenR, 1) in -// @ let b2 := sl.GetByte(b, 0, lenR, 2) in -// @ let b3 := sl.GetByte(b, 0, lenR, 3) in -// @ let v := m.SerializedToLine() in -// @ binary.BigEndian.PutUint32Spec(b0, b1, b2, b3, v) +// @ ensures m.SerializeToSpec(b) // @ decreases func (m *MetaHdr) SerializeTo(b []byte) (e error) { if len(b) < MetaLen { + // @ Unreachable() return serrors.New("buffer for MetaHdr too short", "expected", MetaLen, "actual", len(b)) } line := uint32(m.CurrINF)<<30 | uint32(m.CurrHF&0x3F)<<24 diff --git a/pkg/slayers/path/scion/base_spec.gobra b/pkg/slayers/path/scion/base_spec.gobra index b12aec326..48faeebcf 100644 --- a/pkg/slayers/path/scion/base_spec.gobra +++ b/pkg/slayers/path/scion/base_spec.gobra @@ -20,6 +20,8 @@ import ( "encoding/binary" "github.com/scionproto/scion/pkg/slayers/path" sl "github.com/scionproto/scion/verification/utils/slices" + + . "github.com/scionproto/scion/verification/utils/definitions" ) pred (b *Base) NonInitMem() { @@ -73,9 +75,8 @@ pure func (b Base) ValidCurrIdxsSpec() bool { (forall i int :: { b.PathMeta.SegLen[i] } 0 <= i && i < b.NumINF ==> b.PathMeta.SegLen[i] != 0) && (forall i int :: { b.PathMeta.SegLen[i] } b.NumINF <= i && i < MaxINFs ==> - b.PathMeta.SegLen[i] == 0) - // Surprisingly, the following does not seem to be needed - // b.PathMeta.CurrINF == b.InfForHfSpec(b.PathMeta.CurrHF) + b.PathMeta.SegLen[i] == 0) && + b.PathMeta.CurrINF == b.InfForHfSpec(b.PathMeta.CurrHF) } ghost @@ -142,6 +143,13 @@ pure func (s *Base) GetMetaHdr() MetaHdr { return unfolding acc(s.Mem(), _) in s.PathMeta } +ghost +requires acc(s.Mem(), _) +decreases +pure func (s *Base) GetBase() Base { + return unfolding acc(s.Mem(), _) in *s +} + ghost requires acc(s.Mem(), _) decreases @@ -180,6 +188,145 @@ pure func (s Base) IncPathSpec() (res Base) { } } +/*************** AbsBase ***************/ + +// There is a good deal of repition in this section of the file and the similarly +// named functions for the type `Base`. While for now this is not too big of a problem, +// we should find ways to streamline the definitions, ideally by having these defs only +// for the abstraction type only. + +type AbsBase adt { + AbsBase_ { + PathMeta AbsMetaHdr + NumINF int + NumHops int + } +} + +type AbsMetaHdr adt { + AbsMetaHdr_ { + // we should change the types of CurrINF and CurrHF to wider types, + // otherwise we might start getting overflow errors here when they + // are fully enabled. + CurrINF uint8 + CurrHF uint8 + SegLen seq[uint8] + } +} + +ghost +decreases +pure func (b Base) Abs() AbsBase { + return AbsBase_{ + PathMeta: b.PathMeta.Abs(), + NumINF: b.NumINF, + NumHops: b.NumHops, + } +} + +ghost +decreases +pure func (b MetaHdr) Abs() AbsMetaHdr { + return AbsMetaHdr_{ + CurrINF: b.CurrINF, + CurrHF: b.CurrHF, + SegLen: seq[uint8]{ b.SegLen[0], b.SegLen[1], b.SegLen[2] }, + } +} + +ghost +requires len(b.PathMeta.SegLen) == 3 +decreases +pure func (b AbsBase) ReverseSpec() AbsBase { + return AbsBase_ { + PathMeta: b.ReverseMetaHdrSpec(), + NumINF: b.NumINF, + NumHops: b.NumHops, + } +} + +ghost +requires len(b.PathMeta.SegLen) == 3 +decreases +pure func (b AbsBase) ReverseMetaHdrSpec() AbsMetaHdr { + return AbsMetaHdr_ { + CurrINF: uint8(b.NumINF) - b.PathMeta.CurrINF - 1, + CurrHF: uint8(b.NumHops) - b.PathMeta.CurrHF - 1, + SegLen: b.ReverseSegLen(), + } +} + +ghost +requires len(b.PathMeta.SegLen) == 3 +decreases +pure func (b AbsBase) ReverseSegLen() seq[uint8] { + return (match b.NumINF { + case 2: seq[uint8]{ b.PathMeta.SegLen[1], b.PathMeta.SegLen[0], b.PathMeta.SegLen[2]} + case 3: seq[uint8]{ b.PathMeta.SegLen[2], b.PathMeta.SegLen[1], b.PathMeta.SegLen[0] } + default: b.PathMeta.SegLen + }) +} + +ghost +decreases +pure func (b AbsBase) ValidCurrIdxsSpec() bool { + return 0 <= b.NumINF && b.NumINF <= MaxINFs && + len(b.PathMeta.SegLen) == 3 && + 0 <= b.NumHops && b.NumHops <= MaxHops && + b.ValidCurrHfSpec() && + b.ValidCurrInfSpec() && + 0 <= b.PathMeta.SegLen[0] && b.PathMeta.SegLen[0] < MaxHops && + 0 <= b.PathMeta.SegLen[1] && b.PathMeta.SegLen[1] < MaxHops && + 0 <= b.PathMeta.SegLen[2] && b.PathMeta.SegLen[2] < MaxHops && + (b.NumINF == 1 ==> b.NumHops == int(b.PathMeta.SegLen[0])) && + (b.NumINF == 2 ==> b.NumHops == int(b.PathMeta.SegLen[0] + b.PathMeta.SegLen[1])) && + (b.NumINF == 3 ==> b.NumHops == int(b.PathMeta.SegLen[0] + b.PathMeta.SegLen[1] + b.PathMeta.SegLen[2])) && + (forall i int :: { b.PathMeta.SegLen[i] } 0 <= i && i < b.NumINF ==> + b.PathMeta.SegLen[i] != 0) && + (forall i int :: { b.PathMeta.SegLen[i] } b.NumINF <= i && i < MaxINFs ==> + b.PathMeta.SegLen[i] == 0) && + b.PathMeta.CurrINF == b.InfForHfSpec(b.PathMeta.CurrHF) +} + +ghost +decreases +pure func (b AbsBase) ValidCurrInfSpec() bool { + return 0 <= b.PathMeta.CurrINF && b.PathMeta.CurrINF < b.NumINF +} + +ghost +decreases +pure func (b AbsBase) ValidCurrHfSpec() bool { + return 0 <= b.PathMeta.CurrHF && b.PathMeta.CurrHF < b.NumHops +} + +ghost +requires len(s.PathMeta.SegLen) == 3 +ensures 0 <= r && r < 3 +decreases +pure func (s AbsBase) InfForHfSpec(hf uint8) (r uint8) { + return hf < s.PathMeta.SegLen[0] ? + 0 : + (hf < s.PathMeta.SegLen[0] + s.PathMeta.SegLen[1] ? 1 : 2) +} + +ghost +requires b.ValidCurrIdxsSpec() +ensures b.ReverseSpec().ValidCurrIdxsSpec() +decreases +pure func (b AbsBase) ReversingValidBaseIsValidBase() Lemma { + return Lemma{} +} + +ghost +ensures b.ValidCurrIdxsSpec() == b.Abs().ValidCurrIdxsSpec() +decreases +pure func (b Base) ValidBaseHasValidAbs() Lemma { + return Lemma{} +} + +/*************** End of AbsBase ***************/ + ghost requires b.Mem() ensures b.NonInitMem() @@ -199,6 +346,23 @@ pure func DecodedFrom(line uint32) MetaHdr { } } +ghost +requires acc(sl.AbsSlice_Bytes(b, 0, len(b)), _) +decreases +pure func (m MetaHdr) DecodeFromBytesSpec(b []byte) bool { + return MetaLen <= len(b) && + 0 <= m.CurrINF && m.CurrINF <= 3 && + 0 <= m.CurrHF && m.CurrHF < 64 && + m.SegsInBounds() && + let lenR := len(b) in + let b0 := sl.GetByte(b, 0, lenR, 0) in + let b1 := sl.GetByte(b, 0, lenR, 1) in + let b2 := sl.GetByte(b, 0, lenR, 2) in + let b3 := sl.GetByte(b, 0, lenR, 3) in + let line := binary.BigEndian.Uint32Spec(b0, b1, b2, b3) in + DecodedFrom(line) == m +} + ghost decreases pure func (m MetaHdr) SegsInBounds() bool { @@ -217,6 +381,20 @@ pure func (m MetaHdr) SerializedToLine() uint32 { uint32(m.SegLen[2] & 0x3F) } +ghost +requires acc(sl.AbsSlice_Bytes(b, 0, len(b)), _) +decreases +pure func (m MetaHdr) SerializeToSpec(b []byte) bool { + return MetaLen <= len(b) && + let lenR := len(b) in + let b0 := sl.GetByte(b, 0, lenR, 0) in + let b1 := sl.GetByte(b, 0, lenR, 1) in + let b2 := sl.GetByte(b, 0, lenR, 2) in + let b3 := sl.GetByte(b, 0, lenR, 3) in + let v := m.SerializedToLine() in + binary.BigEndian.PutUint32Spec(b0, b1, b2, b3, v) +} + ghost decreases pure func (m MetaHdr) InBounds() bool { @@ -227,6 +405,42 @@ pure func (m MetaHdr) InBounds() bool { 0 <= m.SegLen[2] && m.SegLen[2] <= 63 } +ghost +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) +decreases +pure func (s Base) EqAbsHeader(ub []byte) bool { + // we compute the sublice ub[:MetaLen] inside this function instead + // of expecting the correct subslice to be passed, otherwise this function + // becomes too cumbersome to use in calls from (*Raw).EqAbsHeader due to the + // lack of a folding expression. Same goes for MetaHdr.EqAbsHeader. + return MetaLen <= len(ub) && + s == RawBytesToBase(ub) +} + +ghost +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) +decreases +pure func (s MetaHdr) EqAbsHeader(ub []byte) bool { + return MetaLen <= len(ub) && + unfolding acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) in + s == DecodedFrom(binary.BigEndian.Uint32(ub[:MetaLen])) +} + +ghost +opaque +requires MetaLen <= idx && idx <= len(ub) +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R55) +requires acc(sl.AbsSlice_Bytes(ub[:idx], 0, idx), R55) +ensures s.EqAbsHeader(ub) == s.EqAbsHeader(ub[:idx]) +decreases +pure func (s MetaHdr) EqAbsHeaderForSublice(ub []byte, idx int) Lemma { + return let _ := Asserting(ub[:MetaLen] === ub[:idx][:MetaLen]) in + unfolding acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) in + unfolding acc(sl.AbsSlice_Bytes(ub[:idx], 0, idx), R56) in + let _ := Asserting(s.EqAbsHeader(ub) == (s == DecodedFrom(binary.BigEndian.Uint32(ub[:MetaLen])))) in + Lemma{} +} + /** Lemma proven in /VerifiedSCION/verification/utils/bitwise/proofs.dfy **/ ghost requires m.InBounds() diff --git a/pkg/slayers/path/scion/decoded.go b/pkg/slayers/path/scion/decoded.go index 195125e6e..764a63f4d 100644 --- a/pkg/slayers/path/scion/decoded.go +++ b/pkg/slayers/path/scion/decoded.go @@ -218,7 +218,20 @@ func (s *Decoded) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { // @ decreases func (s *Decoded) Reverse( /*@ ghost ubuf []byte @*/ ) (p path.Path, r error) { //@ ghost isValid := s.ValidCurrIdxs(ubuf) - //@ ghost base := s.GetBase(ubuf) + /*@ + ghost base := s.GetBase(ubuf) + ghost absBase := base.Abs() + ghost absMetaHdrAferReversingSegLen := AbsMetaHdr_ { + CurrINF: absBase.PathMeta.CurrINF, + CurrHF: absBase.PathMeta.CurrHF, + SegLen: absBase.ReverseSegLen(), + } + ghost absBaseAfterReversingSegLen := AbsBase_ { + PathMeta: absMetaHdrAferReversingSegLen, + NumINF: absBase.NumINF, + NumHops: absBase.NumHops, + } + @*/ //@ unfold s.Mem(ubuf) //@ unfold s.Base.Mem() if s.NumINF == 0 { @@ -234,19 +247,12 @@ func (s *Decoded) Reverse( /*@ ghost ubuf []byte @*/ ) (p path.Path, r error) { s.PathMeta.SegLen[0], s.PathMeta.SegLen[lastIdx] = s.PathMeta.SegLen[lastIdx], s.PathMeta.SegLen[0] } //@ fold s.Base.Mem() - //@ fold s.Mem(ubuf) - - //@ preserves s.Mem(ubuf) - //@ preserves isValid ==> s.ValidCurrIdxs(ubuf) - //@ decreases - //@ outline( - //@ unfold s.Mem(ubuf) //@ invariant acc(s.Base.Mem(), R10) //@ invariant 0 <= i && i <= s.Base.GetNumINF() //@ invariant acc(&s.InfoFields, R10) //@ invariant len(s.InfoFields) == s.Base.GetNumINF() - //@ invariant forall i int :: { &s.InfoFields[i] } 0 <= i && i < len(s.InfoFields) ==> (acc(&s.InfoFields[i].ConsDir)) - //@ invariant isValid ==> s.Base.ValidCurrIdxs() + //@ invariant forall i int :: { &s.InfoFields[i] } 0 <= i && i < len(s.InfoFields) ==> + //@ (acc(&s.InfoFields[i].ConsDir)) //@ decreases MaxINFs-i // Reverse cons dir flags for i := 0; i < ( /*@ unfolding acc(s.Base.Mem(), R11) in @*/ s.NumINF); i++ { @@ -254,13 +260,12 @@ func (s *Decoded) Reverse( /*@ ghost ubuf []byte @*/ ) (p path.Path, r error) { info.ConsDir = !info.ConsDir } //@ fold s.Mem(ubuf) - //@ ) // Reverse order of hop fields //@ invariant s.Mem(ubuf) //@ invariant 0 <= i && i <= s.GetNumHops(ubuf) //@ invariant -1 <= j && j < s.GetNumHops(ubuf) - //@ invariant isValid ==> s.ValidCurrIdxs(ubuf) + //@ invariant s.GetBase(ubuf).Abs() == absBaseAfterReversingSegLen //@ decreases j-i for i, j := 0, ( /*@ unfolding s.Mem(ubuf) in (unfolding s.Base.Mem() in @*/ s.NumHops - 1 /*@ ) @*/); i < j; i, j = i+1, j-1 { //@ unfold s.Mem(ubuf) @@ -275,17 +280,15 @@ func (s *Decoded) Reverse( /*@ ghost ubuf []byte @*/ ) (p path.Path, r error) { //@ fold s.Mem(ubuf) } // Update CurrINF and CurrHF and SegLens - //@ preserves s.Mem(ubuf) - //@ preserves isValid ==> s.ValidCurrIdxs(ubuf) - //@ decreases - //@ outline( //@ unfold s.Mem(ubuf) //@ unfold s.Base.Mem() s.PathMeta.CurrINF = uint8(s.NumINF) - s.PathMeta.CurrINF - 1 s.PathMeta.CurrHF = uint8(s.NumHops) - s.PathMeta.CurrHF - 1 + //@ assert s.Base.Abs() == absBase.ReverseSpec() + //@ ghost if isValid { absBase.ReversingValidBaseIsValidBase() } + //@ assert isValid ==> s.Base.Abs().ValidCurrIdxsSpec() //@ fold s.Base.Mem() //@ fold s.Mem(ubuf) - //@ ) return s, nil } diff --git a/pkg/slayers/path/scion/raw.go b/pkg/slayers/path/scion/raw.go index 825115cd7..8f3d24e5e 100644 --- a/pkg/slayers/path/scion/raw.go +++ b/pkg/slayers/path/scion/raw.go @@ -22,6 +22,7 @@ import ( "github.com/scionproto/scion/pkg/slayers/path" //@ . "github.com/scionproto/scion/verification/utils/definitions" //@ sl "github.com/scionproto/scion/verification/utils/slices" + //@ io "verification/io" ) // Raw is a raw representation of the SCION (data-plane) path type. It is designed to parse as @@ -217,10 +218,13 @@ func (s *Raw) ToDecoded( /*@ ghost ubuf []byte @*/ ) (d *Decoded, err error) { // IncPath increments the path and writes it to the buffer. // @ requires s.Mem(ubuf) -// @ preserves sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) +// @ requires sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) +// @ requires s.EqAbsHeader(ubuf) +// @ ensures sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) // @ ensures old(unfolding s.Mem(ubuf) in unfolding // @ s.Base.Mem() in (s.NumINF <= 0 || int(s.PathMeta.CurrHF) >= s.NumHops-1)) ==> r != nil // @ ensures r == nil ==> s.Mem(ubuf) +// @ ensures r == nil && s.InBounds(ubuf) ==> s.EqAbsHeader(ubuf) // @ ensures r != nil ==> s.NonInitMem() // @ ensures r != nil ==> r.ErrorMem() // @ decreases @@ -230,12 +234,38 @@ func (s *Raw) IncPath( /*@ ghost ubuf []byte @*/ ) (r error) { //@ fold s.NonInitMem() return err } - //@ fold s.Mem(ubuf) - //@ s.RawIdxPerm(ubuf, MetaLen, writePerm) - //@ unfold acc(s.Base.Mem(), 1/2) + //@ sl.SplitByIndex_Bytes(ubuf, 0, len(ubuf), MetaLen, HalfPerm) + //@ sl.SplitByIndex_Bytes(ubuf, 0, len(ubuf), MetaLen, HalfPerm) + //@ sl.Reslice_Bytes(ubuf, 0, MetaLen, HalfPerm) + //@ sl.Reslice_Bytes(ubuf, 0, MetaLen, HalfPerm) + + //@ unfold acc(s.Base.Mem(), R2) err := s.PathMeta.SerializeTo(s.Raw[:MetaLen]) - //@ fold acc(s.Base.Mem(), 1/2) - //@ s.UndoRawIdxPerm(ubuf, MetaLen, writePerm) + //@ ghost if s.PathMeta.InBounds() { + //@ v := s.Raw[:MetaLen] + //@ b0 := sl.GetByte(v, 0, MetaLen, 0) + //@ b1 := sl.GetByte(v, 0, MetaLen, 1) + //@ b2 := sl.GetByte(v, 0, MetaLen, 2) + //@ b3 := sl.GetByte(v, 0, MetaLen, 3) + //@ s.PathMeta.SerializeAndDeserializeLemma(b0, b1, b2, b3) + //@ } + //@ assert s.PathMeta.InBounds() ==> s.PathMeta.EqAbsHeader(s.Raw[:MetaLen]) + //@ fold acc(s.Base.Mem(), R3) + + //@ sl.Unslice_Bytes(ubuf, 0, MetaLen, R2) + //@ sl.CombineAtIndex_Bytes(ubuf, 0, len(ubuf), MetaLen, R2) + //@ fold acc(s.Mem(ubuf), R2) + //@ assert s.InBounds(ubuf) == s.PathMeta.InBounds() + //@ assert s.EqAbsHeader(ubuf) == s.PathMeta.EqAbsHeader(ubuf) + //@ s.PathMeta.EqAbsHeaderForSublice(ubuf, MetaLen) + //@ assert s.EqAbsHeader(ubuf) == s.PathMeta.EqAbsHeader(s.Raw[:MetaLen]) + //@ assert s.InBounds(ubuf) ==> s.EqAbsHeader(ubuf) + + //@ sl.Unslice_Bytes(ubuf, 0, MetaLen, 1-R2) + //@ sl.CombineAtIndex_Bytes(ubuf, 0, len(ubuf), MetaLen, 1-R2) + //@ fold acc(s.Base.Mem(), R3) + //@ fold acc(s.Mem(ubuf), 1-R2) + //@ assert s.InBounds(ubuf) ==> s.EqAbsHeader(ubuf) return err } @@ -272,7 +302,7 @@ func (s *Raw) GetInfoField(idx int /*@, ghost ubuf []byte @*/) (ifield path.Info // GetCurrentInfoField is a convenience method that returns the current hop field pointed to by the // CurrINF index in the path meta header. // @ preserves acc(s.Mem(ubuf), R8) -// @ preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R1) +// @ preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R2) // @ ensures (r == nil) == (s.GetCurrINF(ubuf) < s.GetNumINF(ubuf)) // @ ensures r != nil ==> r.ErrorMem() // @ decreases @@ -289,30 +319,63 @@ func (s *Raw) GetCurrentInfoField( /*@ ghost ubuf []byte @*/ ) (res path.InfoFie } // SetInfoField updates the InfoField at a given index. -// @ requires 0 <= idx -// @ preserves acc(s.Mem(ubuf), R20) -// @ preserves sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) -// @ ensures r != nil ==> r.ErrorMem() +// @ requires 0 <= idx +// @ requires sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) +// @ requires acc(s.Mem(ubuf), R20) +// pres for IO: +// @ requires dp.Valid() && validPktMetaHdr(ubuf) && s.EqAbsHeader(ubuf) +// @ ensures acc(s.Mem(ubuf), R20) +// @ ensures sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) +// @ ensures r != nil ==> r.ErrorMem() +// posts for IO: +// @ ensures r == nil && idx == int(old(s.GetCurrINF(ubuf))) ==> +// @ validPktMetaHdr(ubuf) && s.EqAbsHeader(ubuf) +// @ ensures r == nil && idx == int(old(s.GetCurrINF(ubuf))) ==> +// @ let oldPkt := old(s.absPkt(dp, ubuf)) in +// @ let newPkt := AbsSetInfoField(oldPkt, info.ToIntermediateAbsInfoField()) in +// @ s.absPkt(dp, ubuf) == newPkt // @ decreases -func (s *Raw) SetInfoField(info path.InfoField, idx int /*@, ghost ubuf []byte @*/) (r error) { +func (s *Raw) SetInfoField(info path.InfoField, idx int /*@, ghost ubuf []byte, ghost dp io.DataPlaneSpec@*/) (r error) { //@ share info - //@ unfold acc(s.Mem(ubuf), R20) - //@ unfold acc(s.Base.Mem(), R20) + //@ ghost oldCurrINF := int(old(s.GetCurrINF(ubuf))) + //@ unfold acc(s.Mem(ubuf), R50) + //@ unfold acc(s.Base.Mem(), R50) if idx >= s.NumINF { err := serrors.New("InfoField index out of bounds", "max", s.NumINF-1, "actual", idx) - //@ fold acc(s.Base.Mem(), R20) - //@ fold acc(s.Mem(ubuf), R20) + //@ fold acc(s.Base.Mem(), R50) + //@ fold acc(s.Mem(ubuf), R50) return err } infOffset := MetaLen + idx*path.InfoLen - //@ sl.SplitRange_Bytes(ubuf, 0, len(s.Raw), writePerm) + //@ assert idx == oldCurrINF ==> reveal validPktMetaHdr(ubuf) + //@ assert idx == oldCurrINF ==> s.EqAbsHeader(ubuf) + + //@ sl.SplitRange_Bytes(ubuf, 0, len(s.Raw), HalfPerm) + //@ ValidPktMetaHdrSublice(ubuf, len(s.Raw)) + //@ sl.SplitRange_Bytes(ubuf, 0, len(s.Raw), HalfPerm) + //@ assert idx == oldCurrINF ==> RawBytesToBase(ubuf[:len(s.Raw)]).ValidCurrIdxsSpec() + //@ assert sl.AbsSlice_Bytes(s.Raw, 0, len(s.Raw)) - //@ sl.SplitRange_Bytes(s.Raw, infOffset, infOffset+path.InfoLen, writePerm) + //@ sl.SplitRange_Bytes(s.Raw, infOffset, infOffset+path.InfoLen, HalfPerm) + //@ assert acc(sl.AbsSlice_Bytes(s.Raw, 0, infOffset), HalfPerm) + //@ sl.Reslice_Bytes(s.Raw, 0, infOffset, HalfPerm/2) + //@ ValidPktMetaHdrSublice(s.Raw, infOffset) + //@ sl.SplitRange_Bytes(s.Raw, infOffset, infOffset+path.InfoLen, HalfPerm) + //@ assert idx == oldCurrINF ==> RawBytesToBase(s.Raw[:infOffset]).ValidCurrIdxsSpec() + ret := info.SerializeTo(s.Raw[infOffset : infOffset+path.InfoLen]) - //@ sl.CombineRange_Bytes(s.Raw, infOffset, infOffset+path.InfoLen, writePerm) - //@ sl.CombineRange_Bytes(ubuf, 0, len(s.Raw), writePerm) - //@ fold acc(s.Base.Mem(), R20) - //@ fold acc(s.Mem(ubuf), R20) + //@ sl.CombineRange_Bytes(s.Raw, infOffset, infOffset+path.InfoLen, HalfPerm) + //@ sl.CombineRange_Bytes(ubuf, 0, len(s.Raw), HalfPerm) + //@ ValidPktMetaHdrSublice(ubuf, infOffset) + + //@ sl.Unslice_Bytes(s.Raw, 0, infOffset, HalfPerm/2) + //@ sl.CombineRange_Bytes(s.Raw, infOffset, infOffset+path.InfoLen, HalfPerm) + //@ assert idx == oldCurrINF ==> RawBytesToBase(ubuf).ValidCurrIdxsSpec() + //@ sl.CombineRange_Bytes(ubuf, 0, len(s.Raw), HalfPerm) + //@ fold acc(s.Base.Mem(), R50) + //@ fold acc(s.Mem(ubuf), R50) + //@ assert idx == oldCurrINF ==> reveal validPktMetaHdr(ubuf) + //@ TemporaryAssumeForIO(idx == oldCurrINF ==> s.absPkt(dp, ubuf) == AbsSetInfoField(old(s.absPkt(dp, ubuf)), info.ToIntermediateAbsInfoField())) return ret } @@ -349,7 +412,7 @@ func (s *Raw) GetHopField(idx int /*@, ghost ubuf []byte @*/) (res path.HopField // GetCurrentHopField is a convenience method that returns the current hop field pointed to by the // CurrHF index in the path meta header. // @ preserves acc(s.Mem(ubuf), R8) -// @ preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R1) +// @ preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R2) // @ ensures (r == nil) == (s.GetCurrHF(ubuf) < s.GetNumHops(ubuf)) // @ ensures r != nil ==> r.ErrorMem() // @ decreases @@ -418,12 +481,13 @@ func (s *Raw) IsPenultimateHop( /*@ ghost ubuf []byte @*/ ) bool { } // IsLastHop returns whether the current hop is the last hop on the path. -// @ preserves acc(s.Mem(ubuf), R20) +// @ preserves acc(s.Mem(ubuf), R40) +// @ ensures res == s.IsLastHopSpec(ubuf) // @ decreases -func (s *Raw) IsLastHop( /*@ ghost ubuf []byte @*/ ) bool { - //@ unfold acc(s.Mem(ubuf), R20) - //@ defer fold acc(s.Mem(ubuf), R20) - //@ unfold acc(s.Base.Mem(), R20) - //@ defer fold acc(s.Base.Mem(), R20) +func (s *Raw) IsLastHop( /*@ ghost ubuf []byte @*/ ) (res bool) { + //@ unfold acc(s.Mem(ubuf), R40) + //@ defer fold acc(s.Mem(ubuf), R40) + //@ unfold acc(s.Base.Mem(), R40) + //@ defer fold acc(s.Base.Mem(), R40) return int(s.PathMeta.CurrHF) == (s.NumHops - 1) } diff --git a/pkg/slayers/path/scion/raw_spec.gobra b/pkg/slayers/path/scion/raw_spec.gobra index da1a0d05a..848f1d808 100644 --- a/pkg/slayers/path/scion/raw_spec.gobra +++ b/pkg/slayers/path/scion/raw_spec.gobra @@ -18,8 +18,10 @@ package scion import ( "github.com/scionproto/scion/pkg/slayers/path" - . "github.com/scionproto/scion/verification/utils/definitions" - sl "github.com/scionproto/scion/verification/utils/slices" + . "verification/utils/definitions" + sl "verification/utils/slices" + "verification/dependencies/encoding/binary" + "verification/io" ) /**** Predicates ****/ @@ -91,8 +93,9 @@ func (s *Raw) IsFirstHopAfterXover(ghost ub []byte) (res bool) { * introduced this wrapper method which acts as a wrapper. */ preserves acc(s.Mem(ub), R9) +ensures res == s.GetIsXoverSpec(ub) decreases -func (s *Raw) IsXover(ghost ub []byte) bool { +func (s *Raw) IsXover(ghost ub []byte) (res bool) { unfold acc(s.Mem(ub), R9) defer fold acc(s.Mem(ub), R9) return s.Base.IsXover() @@ -122,6 +125,32 @@ pure func (s *Raw) ValidCurrIdxs(ghost ub []byte) bool { s.Base.ValidCurrIdxs() } +ghost +requires acc(s.Mem(ub), _) +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) +decreases +pure func (s *Raw) EqAbsHeader(ub []byte) bool { + return unfolding acc(s.Mem(ub), _) in + unfolding acc(s.Base.Mem(), _) in + s.Base.EqAbsHeader(ub) +} + +ghost +requires acc(s.Mem(ub), _) +decreases +pure func (s *Raw) GetIsXoverSpec(ub []byte) bool { + return unfolding acc(s.Mem(ub), _) in s.Base.IsXoverSpec() +} + +ghost +requires acc(s.Mem(ub), _) +decreases +pure func (s *Raw) InBounds(ub []byte) bool { + return unfolding acc(s.Mem(ub), _) in + unfolding acc(s.Base.Mem(), _) in + s.PathMeta.InBounds() +} + /**** End of Stubs ****/ /**** Lemmas ****/ @@ -181,52 +210,6 @@ func (r *Raw) RawPerm(ubuf []byte, p perm) { } /******** End of Lemma: RawPerm ********/ -/******** Lemma: RawIdxPerm ********/ -pred (r *Raw) RawIdxPermRemainder(ubuf []byte, idx int, p perm) { - 0 < p && - acc(r.Base.Mem(), p/2) && - acc(&r.Raw, p/2) && - len(r.Raw) <= len(ubuf) && - r.Raw === ubuf[:len(r.Raw)] && - acc(sl.AbsSlice_Bytes(ubuf, idx, len(ubuf)), p) && - len(r.Raw) == r.Base.Len() && - idx <= len(r.Raw) -} - -ghost -requires 0 < p -requires acc(&r.Raw, p/2) -requires 0 <= idx && idx <= len(r.Raw) -requires acc(sl.AbsSlice_Bytes(r.Raw[:idx], 0, idx), p) && acc(r.Base.Mem(), p/2) -requires r.RawIdxPermRemainder(ubuf, idx, p) -ensures acc(r.Mem(ubuf), p) -ensures acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), p) -decreases -func (r *Raw) UndoRawIdxPerm(ubuf []byte, idx int, p perm) { - unfold r.RawIdxPermRemainder(ubuf, idx, p) - sl.Unslice_Bytes(ubuf, 0, idx, p) - sl.CombineAtIndex_Bytes(ubuf, 0, len(ubuf), idx, p) - fold acc(r.Mem(ubuf), p) -} - -ghost -requires 0 < p -requires acc(r.Mem(ubuf), p) -requires acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), p) -requires 0 <= idx && idx <= unfolding acc(r.Mem(ubuf), p) in len(r.Raw) -ensures acc(&r.Raw, p/2) -ensures r.Raw === old(unfolding acc(r.Mem(ubuf), p) in r.Raw) -ensures acc(sl.AbsSlice_Bytes(r.Raw[:idx], 0, idx), p) && acc(r.Base.Mem(), p/2) -ensures r.RawIdxPermRemainder(ubuf, idx, p) -decreases -func (r *Raw) RawIdxPerm(ubuf []byte, idx int, p perm) { - unfold acc(r.Mem(ubuf), p) - sl.SplitByIndex_Bytes(ubuf, 0, len(ubuf), idx, p) - sl.Reslice_Bytes(ubuf, 0, idx, p) - fold r.RawIdxPermRemainder(ubuf, idx, p) -} -/******** End of Lemma: RawIdxPerm ********/ - /******** Lemma: RawRangePerm ********/ pred (r *Raw) RawRangePermRemainder(ubuf []byte, start, end int, p perm) { 0 < p && @@ -338,4 +321,349 @@ decreases func (s *Raw) RawBufferNonInitMem() []byte { return unfolding acc(s.NonInitMem(), _) in s.Raw } -/**** End of helpful pure functions ****/ \ No newline at end of file +/**** End of helpful pure functions ****/ + +ghost +decreases +pure func NumInfoFields(seg1Len int, seg2Len int, seg3Len int) int { + return seg3Len > 0 ? 3 : (seg2Len > 0 ? 2 : 1) +} + +ghost +decreases +pure func HopFieldOffset(numINF int, currHF int, headerOffset int) int { + return path.InfoFieldOffset(numINF, headerOffset) + path.HopLen * currHF +} + +ghost +decreases +pure func pktLen(seg1Len int, seg2Len int, seg3Len int, headerOffset int) int { + return HopFieldOffset(NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) + + path.HopLen * (seg1Len + seg2Len + seg3Len) +} + + +ghost +decreases +pure func LengthOfCurrSeg(currHF int, seg1Len int, seg2Len int, seg3Len int) int { + return seg1Len > currHF ? seg1Len : ((seg1Len + seg2Len) > currHF ? seg2Len : seg3Len) +} + +ghost +requires 0 <= currHF +ensures res <= currHF +decreases +pure func LengthOfPrevSeg(currHF int, seg1Len int, seg2Len int, seg3Len int) (res int) { + return seg1Len > currHF ? 0 : ((seg1Len + seg2Len) > currHF ? seg1Len : seg1Len + seg2Len) +} + +ghost +requires 0 <= offset +requires 0 <= currHFIdx && currHFIdx <= segLen +requires offset + path.HopLen * segLen <= len(raw) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +ensures len(res) == segLen - currHFIdx +decreases segLen - currHFIdx +pure func hopFields( + raw []byte, + offset int, + currHFIdx int, + segLen int) (res seq[io.IO_HF]) { + return currHFIdx == segLen ? seq[io.IO_HF]{} : + let hf := path.BytesToIO_HF(raw, 0, offset + path.HopLen * currHFIdx, len(raw)) in + seq[io.IO_HF]{hf} ++ hopFields(raw, offset, currHFIdx + 1, segLen) +} + +ghost +requires -1 <= currHFIdx && currHFIdx < len(hopfields) +ensures len(res) == currHFIdx + 1 +decreases currHFIdx + 1 +pure func segPast(hopfields seq[io.IO_HF], currHFIdx int) (res seq[io.IO_HF]) { + return currHFIdx == -1 ? + seq[io.IO_HF]{} : + seq[io.IO_HF]{hopfields[currHFIdx]} ++ segPast(hopfields, currHFIdx - 1) +} + +ghost +requires 0 <= currHFIdx && currHFIdx <= len(hopfields) +ensures len(res) == len(hopfields) - currHFIdx +decreases len(hopfields) - currHFIdx +pure func segFuture(hopfields seq[io.IO_HF], currHFIdx int) (res seq[io.IO_HF]) { + return currHFIdx == len(hopfields) ? seq[io.IO_HF]{} : + seq[io.IO_HF]{hopfields[currHFIdx]} ++ segFuture(hopfields, currHFIdx + 1) +} + +ghost +requires -1 <= currHFIdx && currHFIdx < len(hopfields) +ensures len(res) == currHFIdx + 1 +decreases currHFIdx + 1 +pure func segHistory(hopfields seq[io.IO_HF], currHFIdx int) (res seq[io.IO_ahi]) { + return currHFIdx == -1 ? seq[io.IO_ahi]{} : + seq[io.IO_ahi]{hopfields[currHFIdx].Toab()} ++ segHistory(hopfields, currHFIdx - 1) +} + +ghost +requires 0 <= offset +requires 0 < segLen +requires 0 <= currHFIdx && currHFIdx <= segLen +requires offset + path.HopLen * segLen <= len(raw) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +ensures len(res.Future) == segLen - currHFIdx +ensures len(res.History) == currHFIdx +ensures len(res.Past) == currHFIdx +decreases +pure func segment(raw []byte, + offset int, + currHFIdx int, + ainfo io.IO_ainfo, + uinfo set[io.IO_msgterm], + consDir bool, + peer bool, + segLen int) (res io.IO_seg2) { + return let hopfields := hopFields(raw, offset, 0, segLen) in + io.IO_seg2(io.IO_seg3_{ + AInfo :ainfo, + UInfo : uinfo, + ConsDir : consDir, + Peer : peer, + Past : segPast(hopfields, currHFIdx - 1), + Future : segFuture(hopfields, currHFIdx), + History : segHistory(hopfields, currHFIdx - 1), + }) +} + +ghost +opaque +requires 0 <= headerOffset +requires path.InfoFieldOffset(currINFIdx, headerOffset) + path.InfoLen <= offset +requires 0 < segLen +requires offset + path.HopLen * segLen <= len(raw) +requires 0 <= currHFIdx && currHFIdx <= segLen +requires 0 <= currINFIdx && currINFIdx < 3 +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +decreases +pure func CurrSeg(raw []byte, + offset int, + currINFIdx int, + currHFIdx int, + segLen int, + headerOffset int) io.IO_seg3 { + return let ainfo := path.Timestamp(raw, currINFIdx, headerOffset) in + let consDir := path.ConsDir(raw, currINFIdx, headerOffset) in + let peer := path.Peer(raw, currINFIdx, headerOffset) in + let uinfo := path.AbsUinfo(raw, currINFIdx, headerOffset) in + segment(raw, offset, currHFIdx, ainfo, uinfo, consDir, peer, segLen) +} + +ghost +opaque +requires 0 <= headerOffset +requires 0 < seg1Len +requires 0 <= seg2Len +requires 0 <= seg3Len +requires pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= len(raw) +requires 1 <= currINFIdx && currINFIdx < 4 +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +decreases +pure func LeftSeg( + raw []byte, + currINFIdx int, + seg1Len int, + seg2Len int, + seg3Len int, + headerOffset int) option[io.IO_seg3] { + return let offset := HopFieldOffset(NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) in + (currINFIdx == 1 && seg2Len > 0) ? + some(reveal CurrSeg(raw, offset + path.HopLen * seg1Len, currINFIdx, 0, seg2Len, headerOffset)) : + ((currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ? + some(reveal CurrSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, seg3Len, headerOffset)) : + none[io.IO_seg3]) +} + +ghost +opaque +requires 0 <= headerOffset +requires 0 < seg1Len +requires 0 <= seg2Len +requires 0 <= seg3Len +requires pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= len(raw) +requires -1 <= currINFIdx && currINFIdx < 2 +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +decreases +pure func RightSeg( + raw []byte, + currINFIdx int, + seg1Len int, + seg2Len int, + seg3Len int, + headerOffset int) option[io.IO_seg3] { + return let offset := HopFieldOffset(NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) in + (currINFIdx == 1 && seg2Len > 0 && seg3Len > 0) ? + some(CurrSeg(raw, offset + path.HopLen * seg1Len, currINFIdx, seg2Len, seg2Len, headerOffset)) : + (currINFIdx == 0 && seg2Len > 0) ? + some(CurrSeg(raw, offset, currINFIdx, seg1Len, seg1Len, headerOffset)) : + none[io.IO_seg3] +} + +ghost +opaque +requires 0 <= headerOffset +requires 0 < seg1Len +requires 0 <= seg2Len +requires 0 <= seg3Len +requires pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= len(raw) +requires 2 <= currINFIdx && currINFIdx < 5 +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +decreases +pure func MidSeg( + raw []byte, + currINFIdx int, + seg1Len int, + seg2Len int, + seg3Len int, + headerOffset int) option[io.IO_seg3] { + return let offset := HopFieldOffset(NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) in + (currINFIdx == 4 && seg2Len > 0) ? + some(CurrSeg(raw, offset, 0, seg1Len, seg1Len, headerOffset)) : + ((currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ? + some(CurrSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, seg3Len, headerOffset)) : + none[io.IO_seg3]) +} + +ghost +opaque +requires dp.Valid() +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +requires validPktMetaHdr(raw) +decreases +pure func (s *Raw) absPkt(dp io.DataPlaneSpec, raw []byte) (res io.IO_pkt2) { + return let _ := reveal validPktMetaHdr(raw) in + let hdr := (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in binary.BigEndian.Uint32(raw[:MetaLen])) in + let metaHdr := DecodedFrom(hdr) in + let currINFIdx := int(metaHdr.CurrINF) in + let currHFIdx := int(metaHdr.CurrHF) in + let seg1Len := int(metaHdr.SegLen[0]) in + let seg2Len := int(metaHdr.SegLen[1]) in + let seg3Len := int(metaHdr.SegLen[2]) in + let segLen := LengthOfCurrSeg(currHFIdx, seg1Len, seg2Len, seg3Len) in + let prevSegLen := LengthOfPrevSeg(currHFIdx, seg1Len, seg2Len, seg3Len) in + let numINF := NumInfoFields(seg1Len, seg2Len, seg3Len) in + let offset := HopFieldOffset(numINF, 0, 0) in + io.IO_pkt2(io.IO_Packet2{ + CurrSeg : CurrSeg(raw, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, 0), + LeftSeg : LeftSeg(raw, currINFIdx + 1, seg1Len, seg2Len , seg3Len, 0), + MidSeg : MidSeg(raw, currINFIdx + 2, seg1Len, seg2Len , seg3Len, 0), + RightSeg : RightSeg(raw, currINFIdx - 1, seg1Len, seg2Len , seg3Len, 0), + }) +} + +// In the future, this should probably use AbsMetaHdr as +// the return type. +ghost +requires MetaLen <= len(raw) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +decreases +pure func RawBytesToMetaHdr(raw []byte) MetaHdr { + return unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in + let hdr := binary.BigEndian.Uint32(raw[:MetaLen]) in + DecodedFrom(hdr) +} + +// In the future, this should probably use AbsBase as +// the return type. +ghost +requires MetaLen <= len(raw) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +decreases +pure func RawBytesToBase(raw []byte) Base { + return let metaHdr := RawBytesToMetaHdr(raw) in + let seg1 := int(metaHdr.SegLen[0]) in + let seg2 := int(metaHdr.SegLen[1]) in + let seg3 := int(metaHdr.SegLen[2]) in + Base{metaHdr, NumInfoFields(seg1, seg2, seg3), seg1 + seg2 + seg3} +} + +ghost +opaque +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +decreases +pure func validPktMetaHdr(raw []byte) bool { + return MetaLen <= len(raw) && + let metaHdr := RawBytesToMetaHdr(raw) in + let seg1 := int(metaHdr.SegLen[0]) in + let seg2 := int(metaHdr.SegLen[1]) in + let seg3 := int(metaHdr.SegLen[2]) in + let base := RawBytesToBase(raw) in + 0 < metaHdr.SegLen[0] && + base.ValidCurrIdxsSpec() && + pktLen(seg1, seg2, seg3, 0) <= len(raw) +} + +ghost +requires MetaLen <= idx && idx <= len(raw) +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +preserves acc(sl.AbsSlice_Bytes(raw[:idx], 0, idx), R56) +ensures RawBytesToMetaHdr(raw) == RawBytesToMetaHdr(raw[:idx]) +ensures RawBytesToBase(raw) == RawBytesToBase(raw[:idx]) +decreases +func ValidPktMetaHdrSublice(raw []byte, idx int) { + reveal validPktMetaHdr(raw) + reveal validPktMetaHdr(raw[:idx]) + unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) + unfold acc(sl.AbsSlice_Bytes(raw[:idx], 0, idx), R56) + assert forall i int :: { &raw[:MetaLen][i] } 0 <= i && i < MetaLen ==> + &raw[:MetaLen][i] == &raw[:idx][:MetaLen][i] + fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) + fold acc(sl.AbsSlice_Bytes(raw[:idx], 0, idx), R56) +} + +ghost +decreases +pure func AbsSetInfoField(oldPkt io.IO_pkt2, info path.IntermediateAbsInfoField) (newPkt io.IO_pkt2) { + return let newCurrSeg := io.IO_seg3(io.IO_seg3_{ + info.AInfo, + info.UInfo, + info.ConsDir, + info.Peer, + oldPkt.CurrSeg.Past, + oldPkt.CurrSeg.Future, + oldPkt.CurrSeg.History}) in + io.IO_pkt2(io.IO_Packet2{newCurrSeg, oldPkt.LeftSeg, oldPkt.MidSeg, oldPkt.RightSeg}) +} + +ghost +requires acc(s.Mem(ub), _) +decreases +pure func (s *Raw) IsLastHopSpec(ub []byte) bool { + return unfolding acc(s.Mem(ub), _) in + unfolding acc(s.Base.Mem(), _) in + int(s.PathMeta.CurrHF) == (s.NumHops - 1) +} + +ghost +preserves acc(s.Mem(ubuf), R55) +preserves s.IsLastHopSpec(ubuf) +preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R56) +preserves dp.Valid() +preserves validPktMetaHdr(ubuf) +preserves s.EqAbsHeader(ubuf) +ensures len(s.absPkt(dp, ubuf).CurrSeg.Future) == 1 +decreases +func (s *Raw) LastHopLemma(ubuf []byte, dp io.DataPlaneSpec) { + reveal validPktMetaHdr(ubuf) + hdr := (unfolding acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R56) in + binary.BigEndian.Uint32(ubuf[:MetaLen])) + metaHdr := DecodedFrom(hdr) + currINFIdx := int(metaHdr.CurrINF) + currHFIdx := int(metaHdr.CurrHF) + seg1Len := int(metaHdr.SegLen[0]) + seg2Len := int(metaHdr.SegLen[1]) + seg3Len := int(metaHdr.SegLen[2]) + segLen := LengthOfCurrSeg(currHFIdx, seg1Len, seg2Len, seg3Len) + prevSegLen := LengthOfPrevSeg(currHFIdx, seg1Len, seg2Len, seg3Len) + numINF := NumInfoFields(seg1Len, seg2Len, seg3Len) + offset := HopFieldOffset(numINF, 0, 0) + pkt := reveal s.absPkt(dp, ubuf) + assert pkt.CurrSeg == reveal CurrSeg(ubuf, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, 0) + assert len(pkt.CurrSeg.Future) == 1 +} \ No newline at end of file diff --git a/pkg/slayers/scion.go b/pkg/slayers/scion.go index 77f5d1aac..2f0a44735 100644 --- a/pkg/slayers/scion.go +++ b/pkg/slayers/scion.go @@ -320,6 +320,11 @@ func (s *SCION) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeO // @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) // @ preserves df != nil && df.Mem() // @ ensures res == nil ==> s.Mem(data) +// @ ensures res == nil && typeOf(s.GetPath(data)) == *scion.Raw ==> +// @ ValidPktMetaHdr(data) +// @ ensures res == nil && typeOf(s.GetPath(data)) == *scion.Raw ==> +// @ s.EqAbsHeader(data) +// @ ensures res == nil ==> s.EqPathType(data) // @ ensures res != nil ==> s.NonInitMem() && res.ErrorMem() // @ decreases func (s *SCION) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res error) { @@ -424,6 +429,9 @@ func (s *SCION) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res er // @ sl.CombineRange_Bytes(data, offset, offset+pathLen, R40) // @ fold s.Mem(data) + // @ TemporaryAssumeForIO(typeOf(s.GetPath(data)) == *scion.Raw ==> ValidPktMetaHdr(data)) + // @ TemporaryAssumeForIO(typeOf(s.GetPath(data)) == *scion.Raw ==> s.EqAbsHeader(data)) + // @ TemporaryAssumeForIO(s.EqPathType(data)) return nil } diff --git a/pkg/slayers/scion_spec.gobra b/pkg/slayers/scion_spec.gobra index 632535619..01bd159e6 100644 --- a/pkg/slayers/scion_spec.gobra +++ b/pkg/slayers/scion_spec.gobra @@ -27,8 +27,9 @@ import ( "github.com/scionproto/scion/pkg/slayers/path/onehop" "github.com/scionproto/scion/pkg/slayers/path/scion" - . "github.com/scionproto/scion/verification/utils/definitions" - "github.com/scionproto/scion/verification/utils/slices" + . "verification/utils/definitions" + sl "verification/utils/slices" + "encoding/binary" ) pred PathPoolMem(pathPool []path.Path, pathPoolRaw path.Path) { @@ -231,8 +232,8 @@ pred (s *SCION) ChecksumMem() { acc(&s.RawSrcAddr) && acc(&s.RawDstAddr) && len(s.RawSrcAddr) % 2 == 0 && len(s.RawDstAddr) % 2 == 0 && acc(&s.SrcIA) && acc(&s.DstIA) && - slices.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)) && - slices.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)) + sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)) && + sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)) } pred (b *BaseLayer) Mem(ghost ub []byte, ghost breakPoint int) { @@ -349,6 +350,116 @@ func (s *SCION) GetPath(ub []byte) path.Path { return unfolding acc(s.Mem(ub), _) in s.Path } +ghost +opaque +pure +requires acc(s.Mem(ub), _) +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) +decreases +func (s *SCION) EqAbsHeader(ub []byte) bool { + return unfolding acc(s.Mem(ub), _) in + let low := CmnHdrLen+s.AddrHdrLenSpecInternal() in + let high := s.HdrLen*LineLen in + GetAddressOffset(ub) == low && + GetLength(ub) == int(high) && + // Might be worth introducing EqAbsHeader as an interface method on Path + // to avoid doing these casts, especially when we add support for EPIC. + typeOf(s.Path) == (*scion.Raw) && + unfolding acc(s.Path.Mem(ub[low:high]), _) in + unfolding acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) in + let _ := Asserting(forall k int :: {&ub[low:high][k]} 0 <= k && k < high ==> + &ub[low:high][k] == &ub[low + k]) in + let _ := Asserting(forall k int :: {&ub[low:high][:scion.MetaLen][k]} 0 <= k && k < scion.MetaLen ==> + &ub[low:high][:scion.MetaLen][k] == &ub[low:high][k]) in + s.Path.(*scion.Raw).Base.GetMetaHdr() == + scion.DecodedFrom(binary.BigEndian.Uint32(ub[low:high][:scion.MetaLen])) +} + +// Checks if the common path header is valid in the serialized scion packet. +ghost +opaque +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +decreases +pure func ValidPktMetaHdr(raw []byte) bool { + return CmnHdrLen <= len(raw) && + let start := GetAddressOffset(raw) in + let end := start+scion.MetaLen in + 0 <= start && end <= len(raw) && + let rawHdr := raw[start:end] in + let length := GetLength(raw) in + length <= len(raw) && + unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in + let _ := Asserting(forall k int :: {&rawHdr[k]} 0 <= k && k < scion.MetaLen ==> &rawHdr[k] == &raw[start + k]) in + let hdr := binary.BigEndian.Uint32(rawHdr) in + let metaHdr := scion.DecodedFrom(hdr) in + let seg1 := int(metaHdr.SegLen[0]) in + let seg2 := int(metaHdr.SegLen[1]) in + let seg3 := int(metaHdr.SegLen[2]) in + let base := scion.Base{metaHdr, scion.NumInfoFields(seg1, seg2, seg3), seg1+seg2+seg3} in + metaHdr.InBounds() && + 0 < metaHdr.SegLen[0] && + base.ValidCurrIdxsSpec() && + scion.pktLen(seg1, seg2, seg3, start) <= length +} + +ghost +opaque +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +decreases +pure func IsSupportedPkt(raw []byte) bool { + return CmnHdrLen <= len(raw) && + let pathType := path.Type(GetPathType(raw)) in + let nextHdr := L4ProtocolType(GetNextHdr(raw)) in + pathType == scion.PathType && + nextHdr != L4SCMP +} + +ghost +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) +requires CmnHdrLen <= len(ub) +decreases +pure func GetAddressOffset(ub []byte) int { + return unfolding acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) in + let dstAddrLen := AddrType(ub[9] >> 4 & 0x7).Length() in + let srcAddrLen := AddrType(ub[9] & 0x7).Length() in + CmnHdrLen + 2*addr.IABytes + dstAddrLen + srcAddrLen +} + +ghost +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) +requires CmnHdrLen <= len(ub) +decreases +pure func GetLength(ub []byte) int { + return unfolding acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) in int(ub[5])*LineLen +} + +ghost +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) +requires CmnHdrLen <= len(ub) +decreases +pure func GetPathType(ub []byte) int { + return unfolding acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) in int(ub[8]) +} + +ghost +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) +requires CmnHdrLen <= len(ub) +decreases +pure func GetNextHdr(ub []byte) int { + return unfolding acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) in int(ub[4]) +} + +ghost +opaque +requires acc(s.Mem(ub), _) +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) +decreases +pure func (s *SCION) EqPathType(ub []byte) bool { + return unfolding acc(s.Mem(ub), _) in + path.Type(GetPathType(ub)) == s.PathType && + L4ProtocolType(GetNextHdr(ub)) == s.NextHdr +} + ghost pure requires acc(s.Mem(ub), _) diff --git a/router/dataplane.go b/router/dataplane.go index 4c9391ac5..2e69a0c3a 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -137,11 +137,10 @@ type BatchConn interface { // @ ensures err != nil ==> prophecyM == 0 // @ ensures err == nil ==> prophecyM == n // @ ensures io.token(old(MultiReadBioNext(place, prophecyM))) - // @ ensures old(MultiReadBioCorrectIfs(place, prophecyM, ifsToIO_ifs(ingressID))) + // @ ensures old(MultiReadBioCorrectIfs(place, prophecyM, path.ifsToIO_ifs(ingressID))) // @ ensures err == nil ==> - // @ forall i int :: { &msgs[i] } 0 <= i && i < n ==> - // @ unfolding acc(msgs[i].Mem(), _) in absIO_val(dp, msgs[i].Buffers[0], ingressID) == - // @ old(MultiReadBioIO_val(place, n)[i]) + // @ forall i int :: { &msgs[i] } 0 <= i && i < n ==> + // @ MsgToAbsVal(dp, &msgs[i], ingressID) == old(MultiReadBioIO_val(place, n)[i]) // TODO (Markus): uint16 or option[io.IO_ifs] for ingress ReadBatch(msgs underlayconn.Messages /*@, ghost ingressID uint16, ghost prophecyM int, ghost place io.Place, ghost dp io.DataPlaneSpec @*/) (n int, err error) // @ requires acc(addr.Mem(), _) @@ -154,10 +153,19 @@ type BatchConn interface { // (VerifiedSCION) opted for less reusable spec for WriteBatch for // performance reasons. // @ requires len(msgs) == 1 - // @ preserves acc(msgs[0].Mem(), R50) && msgs[0].HasActiveAddr() + // @ requires acc(msgs[0].Mem(), R50) && msgs[0].HasActiveAddr() + // @ ensures acc(msgs[0].Mem(), R50) && msgs[0].HasActiveAddr() // @ ensures err == nil ==> 0 <= n && n <= len(msgs) // @ ensures err != nil ==> err.ErrorMem() - WriteBatch(msgs underlayconn.Messages, flags int) (n int, err error) + // contracts for IO-spec + // @ requires dp.Valid() + // @ requires MsgToAbsVal(dp, &msgs[0], egressID) == ioAbsPkts + // @ requires io.token(place) && io.CBioIO_bio3s_send(place, ioAbsPkts) + // @ ensures dp.Valid() + // (VerifiedSCION) the permission to the protocol must always be returned, otherwise the router could not continue + // after failing to send a packet. + // @ ensures io.token(old(io.dp3s_iospec_bio3s_send_T(place, ioAbsPkts))) + WriteBatch(msgs underlayconn.Messages, flags int /*@, ghost egressID uint16, ghost place io.Place, ghost ioAbsPkts io.IO_val, ghost dp io.DataPlaneSpec @*/) (n int, err error) // @ requires Mem() // @ ensures err != nil ==> err.ErrorMem() // @ decreases @@ -744,6 +752,7 @@ func (d *DataPlane) AddNextHopBFD(ifID uint16, src, dst *net.UDPAddr, cfg contro // @ requires dp.Valid() // @ requires d.DpAgreesWithSpec(dp) // @ requires io.token(place) && dp.dp3s_iospec_ordered(state, place) +// @ #backend[moreJoins()] func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost state io.IO_dp3s_state_local, ghost dp io.DataPlaneSpec @*/) error { // @ share d, ctx d.mtx.Lock() @@ -803,8 +812,8 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ requires let d := *dPtr in // @ d.DpAgreesWithSpec(dp) // @ requires acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; + // @ #backend[moreJoins()] func /*@ rc @*/ (ingressID uint16, rd BatchConn, dPtr **DataPlane /*@, ghost ioLock *sync.Mutex, ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/) { - // @ ghost ioIngressID := ifsToIO_ifs(ingressID) d := *dPtr msgs := conn.NewReadMessages(inputBatchCnt) // @ requires forall i int :: { &msgs[i] } 0 <= i && i < len(msgs) ==> @@ -867,8 +876,11 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ invariant ingressID in d.getDomForwardingMetrics() // @ invariant acc(rd.Mem(), _) // @ invariant processor.sInit() && processor.sInitD() === d + // @ invariant processor.getIngressID() == ingressID // @ invariant acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> + // @ invariant d.DpAgreesWithSpec(dp) && dp.Valid() for d.running { + // @ ghost ioIngressID := path.ifsToIO_ifs(ingressID) // Multi recv event // @ ghost ioLock.Lock() // @ unfold SharedInv!< dp, ioSharedArg !>() @@ -876,14 +888,24 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ ghost numberOfReceivedPacketsProphecy := AllocProphecy() // @ ExtractMultiReadBio(dp, t, numberOfReceivedPacketsProphecy, s) // @ MultiUpdateElemWitness(t, numberOfReceivedPacketsProphecy, ioIngressID, s, ioSharedArg) - // @ ghost ioValSeq := MultiReadBioIO_val(t,numberOfReceivedPacketsProphecy) + // @ ghost ioValSeq := MultiReadBioIO_val(t, numberOfReceivedPacketsProphecy) // @ ghost sN := MultiReadBioUpd(t, numberOfReceivedPacketsProphecy, s) // @ ghost tN := MultiReadBioNext(t, numberOfReceivedPacketsProphecy) // @ assert dp.dp3s_iospec_ordered(sN, tN) + // @ BeforeReadBatch: pkts, err := rd.ReadBatch(msgs /*@, ingressID, numberOfReceivedPacketsProphecy, t , dp @*/) + // @ assert old[BeforeReadBatch](MultiReadBioIO_val(t, numberOfReceivedPacketsProphecy)) == ioValSeq + // @ assert err == nil ==> + // @ forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> + // @ ioValSeq[i] == old[BeforeReadBatch](MultiReadBioIO_val(t, numberOfReceivedPacketsProphecy)[i]) + // @ assert err == nil ==> + // @ forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> MsgToAbsVal(dp, &msgs[i], ingressID) == ioValSeq[i] // @ ghost *ioSharedArg.State = sN // @ ghost *ioSharedArg.Place = tN + // @ assert err == nil ==> + // @ forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> + // @ MsgToAbsVal(dp, &msgs[i], ingressID) == old[BeforeReadBatch](MultiReadBioIO_val(t, numberOfReceivedPacketsProphecy)[i]) // @ MultiElemWitnessConv(ioSharedArg.IBufY, ioIngressID, ioValSeq) // @ fold SharedInv!< dp, ioSharedArg !>() // @ ioLock.Unlock() @@ -905,6 +927,8 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ !msgs[i].HasWildcardPermAddr() // @ assert forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> // @ msgs[i].GetN() <= len(msgs[i].GetFstBuffer()) + // @ assert forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> + // @ MsgToAbsVal(dp, &msgs[i], ingressID) == ioValSeq[i] // (VerifiedSCION) using regular for loop instead of range loop to avoid unnecessary // complications with permissions @@ -927,6 +951,15 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ invariant forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> // @ msgs[i].GetN() <= len(msgs[i].GetFstBuffer()) // @ invariant processor.sInit() && processor.sInitD() === d + // @ invariant processor.getIngressID() == ingressID + // contracts for IO-spec + // @ invariant pkts <= len(ioValSeq) + // @ invariant d.DpAgreesWithSpec(dp) && dp.Valid() + // @ invariant ioIngressID == path.ifsToIO_ifs(ingressID) + // @ invariant acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; + // @ invariant forall i int :: { &msgs[i] } i0 <= i && i < pkts ==> + // @ MsgToAbsVal(dp, &msgs[i], ingressID) == ioValSeq[i] + // @ invariant MultiElemWitnessWithIndex(ioSharedArg.IBufY, ioIngressID, ioValSeq, i0) // @ decreases pkts - i0 for i0 := 0; i0 < pkts; i0++ { // @ assert &msgs[:pkts][i0] == &msgs[i0] @@ -959,12 +992,24 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ assert p.Buffers === m.Buffers // @ assert acc(&p.Buffers[0]) // @ assert p.N <= len(p.Buffers[0]) - // @ sl.SplitRange_Bytes(p.Buffers[0], 0, p.N, writePerm) + // @ sl.SplitRange_Bytes(p.Buffers[0], 0, p.N, HalfPerm) tmpBuf := p.Buffers[0][:p.N] + // @ ghost absPktTmpBuf := absIO_val(dp, tmpBuf, ingressID) + // @ ghost absPktBuf0 := absIO_val(dp, msgs[i0].Buffers[0], ingressID) + // @ assert msgs[i0] === p + // @ absIO_valWidenLemma(dp, p.Buffers[0], ingressID, p.N) + // @ assert absPktTmpBuf.isIO_val_Pkt2 ==> absPktTmpBuf === absPktBuf0 + // @ MultiElemWitnessStep(ioSharedArg.IBufY, ioIngressID, ioValSeq, i0) + // @ assert ioValSeq[i0].isIO_val_Pkt2 ==> + // @ ElemWitness(ioSharedArg.IBufY, ioIngressID, ioValSeq[i0].IO_val_Pkt2_2) + // @ assert absPktTmpBuf.isIO_val_Pkt2 ==> absPktTmpBuf == ioValSeq[i0] + // @ assert path.ifsToIO_ifs(processor.getIngressID()) == ioIngressID + // @ sl.SplitRange_Bytes(p.Buffers[0], 0, p.N, HalfPerm) // @ assert sl.AbsSlice_Bytes(tmpBuf, 0, p.N) // @ assert sl.AbsSlice_Bytes(tmpBuf, 0, len(tmpBuf)) - result, err /*@ , addrAliasesPkt @*/ := processor.processPkt(tmpBuf, srcAddr) + result, err /*@ , addrAliasesPkt, newAbsPkt @*/ := processor.processPkt(tmpBuf, srcAddr /*@, ioLock, ioSharedArg, dp @*/) // @ fold scmpErr.Mem() + switch { case err == nil: // @ unfold scmpErr.Mem() @@ -1012,6 +1057,13 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta continue } + // (VerifiedSCION) we currently have this assumption because we cannot think of a sound way to capture + // the behaviour of errors.As(...) in our specifications. Nonetheless, we checked extensively that, when + // processPkt does not return an error or returns an scmpError (and thus errors.As(err, &scmpErr) succeeds), + // result.OutPkt is always non-nil. For the other kinds of errors, the result is nil, but that branch is killed + // before this point. + // @ assume result.OutPkt != nil + // Write to OutConn; drop the packet if this would block. // Use WriteBatch because it's the only available function that // supports MSG_DONTWAIT. @@ -1025,8 +1077,26 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta writeMsgs[0].Addr = result.OutAddr } // @ sl.NilAcc_Bytes() + // @ assert absIO_val(dp, result.OutPkt, result.EgressID) == absIO_val(dp, writeMsgs[0].Buffers[0], result.EgressID) + // @ assert result.OutPkt != nil ==> newAbsPkt == absIO_val(dp, writeMsgs[0].Buffers[0], result.EgressID) // @ fold acc(writeMsgs[0].Mem(), R50) - _, err = result.OutConn.WriteBatch(writeMsgs, syscall.MSG_DONTWAIT) + + // @ ghost ioLock.Lock() + // @ unfold SharedInv!< dp, ioSharedArg !>() + // @ ghost t, s := *ioSharedArg.Place, *ioSharedArg.State + // @ ghost if(newAbsPkt.isIO_val_Pkt2) { + // @ ApplyElemWitness(s.obuf, ioSharedArg.OBufY, newAbsPkt.IO_val_Pkt2_1, newAbsPkt.IO_val_Pkt2_2) + // @ assert newAbsPkt.IO_val_Pkt2_2 in AsSet(s.obuf[newAbsPkt.IO_val_Pkt2_1]) + // @ assert dp.dp3s_iospec_bio3s_send_guard(s, t, newAbsPkt) + // @ } else { assert newAbsPkt.isIO_val_Unsupported } + // @ unfold dp.dp3s_iospec_ordered(s, t) + // @ unfold dp.dp3s_iospec_bio3s_send(s, t) + // @ io.TriggerBodyIoSend(newAbsPkt) + // @ ghost tN := io.dp3s_iospec_bio3s_send_T(t, newAbsPkt) + _, err = result.OutConn.WriteBatch(writeMsgs, syscall.MSG_DONTWAIT /*@, result.EgressID, t, newAbsPkt, dp @*/) + // @ ghost *ioSharedArg.Place = tN + // @ fold SharedInv!< dp, ioSharedArg !>() + // @ ghost ioLock.Unlock() // @ unfold acc(writeMsgs[0].Mem(), R50) // @ ghost if addrAliasesPkt && result.OutAddr != nil { // @ apply acc(result.OutAddr.Mem(), R15) --* acc(sl.AbsSlice_Bytes(tmpBuf, 0, len(tmpBuf)), R15) @@ -1293,7 +1363,7 @@ type processResult struct { } // @ requires acc(d.Mem(), _) && d.getMacFactory() != nil -// @ ensures res.sInit() && res.sInitD() == d +// @ ensures res.sInit() && res.sInitD() == d && res.getIngressID() == ingressID // @ decreases func newPacketProcessor(d *DataPlane, ingressID uint16) (res *scionPacketProcessor) { var verScionTmp gopacket.SerializeBuffer @@ -1322,6 +1392,7 @@ func newPacketProcessor(d *DataPlane, ingressID uint16) (res *scionPacketProcess // @ preserves p.sInit() // @ ensures p.sInitD() == old(p.sInitD()) +// @ ensures p.getIngressID() == old(p.getIngressID()) // @ ensures p.sInitRawPkt() == nil // @ ensures p.sInitPath() == nil // @ ensures p.sInitHopField() == path.HopField{} @@ -1353,10 +1424,12 @@ func (p *scionPacketProcessor) reset() (err error) { // @ acc(d.Mem(), _) && // @ d.WellConfigured() && // @ d.getValSvc() != nil && -// @ d.getValForwardingMetrics() != nil +// @ d.getValForwardingMetrics() != nil && +// @ d.DpAgreesWithSpec(dp) // @ ensures p.sInit() // @ ensures acc(p.sInitD().Mem(), _) // @ ensures p.sInitD() == old(p.sInitD()) +// @ ensures p.getIngressID() == old(p.getIngressID()) // @ ensures p.sInitD().validResult(respr, addrAliasesPkt) // @ ensures acc(sl.AbsSlice_Bytes(rawPkt, 0, len(rawPkt)), 1 - R15) // @ ensures addrAliasesPkt ==> ( @@ -1366,16 +1439,30 @@ func (p *scionPacketProcessor) reset() (err error) { // @ ensures respr.OutPkt !== rawPkt && respr.OutPkt != nil ==> // @ sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires dp.Valid() +// @ requires acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; +// @ requires let absPkt := absIO_val(dp, rawPkt, p.getIngressID()) in +// @ absPkt.isIO_val_Pkt2 ==> ElemWitness(ioSharedArg.IBufY, path.ifsToIO_ifs(p.getIngressID()), absPkt.IO_val_Pkt2_2) +// @ ensures dp.Valid() +// @ ensures respr.OutPkt != nil ==> +// @ newAbsPkt == absIO_val(dp, respr.OutPkt, respr.EgressID) +// @ ensures (respr.OutPkt == nil) == (newAbsPkt == io.IO_val_Unit{}) +// @ ensures newAbsPkt.isIO_val_Pkt2 ==> +// @ ElemWitness(ioSharedArg.OBufY, newAbsPkt.IO_val_Pkt2_1, newAbsPkt.IO_val_Pkt2_2) +// @ ensures reserr != nil && respr.OutPkt != nil ==> newAbsPkt.isIO_val_Unsupported // @ decreases 0 if sync.IgnoreBlockingForTermination() +// @ #backend[moreJoins(1)] func (p *scionPacketProcessor) processPkt(rawPkt []byte, - srcAddr *net.UDPAddr) (respr processResult, reserr error /*@ , addrAliasesPkt bool @*/) { + srcAddr *net.UDPAddr /*@, ghost ioLock *sync.Mutex, ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/) (respr processResult, reserr error /*@ , addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { if err := p.reset(); err != nil { // @ fold p.sInitD().validResult(processResult{}, false) - return processResult{}, err /*@, false @*/ + return processResult{}, err /*@, false, io.IO_val_Unit{} @*/ } // @ assert p.sInitD().getValForwardingMetrics() != nil // @ unfold p.sInit() + // @ assert !p.segmentChange // @ ghost d := p.d p.rawPkt = rawPkt p.srcAddr = srcAddr @@ -1389,7 +1476,7 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, if err != nil { // @ fold p.sInit() // @ fold p.sInitD().validResult(processResult{}, false) - return processResult{}, err /*@, false @*/ + return processResult{}, err /*@, false, io.IO_val_Unit{} @*/ } /*@ ghost var ub []byte @@ -1438,7 +1525,7 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, // @ defer fold p.sInit() // @ defer fold p.d.validResult(processResult{}, false) // @ ghost defer sl.CombineRange_Bytes(ub, start, end, writePerm) - return processResult{}, p.processIntraBFD(pld) /*@, false @*/ + return processResult{}, p.processIntraBFD(pld) /*@, false, io.IO_val_Unit{} @*/ } // @ establishMemUnsupportedPathTypeNextHeader() // @ defer fold p.sInit() @@ -1446,7 +1533,7 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, // @ ghost defer ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, true, hasHbhLayer, hasE2eLayer) // @ ghost defer sl.CombineRange_Bytes(ub, start, end, writePerm) return processResult{}, serrors.WithCtx(unsupportedPathTypeNextHeader, - "type", pathType, "header", nextHdr(p.lastLayer /*@, ub @*/)) /*@, false @*/ + "type", pathType, "header", nextHdr(p.lastLayer /*@, ub @*/)) /*@, false, io.IO_val_Unit{} @*/ case onehop.PathType: if p.lastLayer.NextLayerType( /*@ ub @*/ ) == layers.LayerTypeBFD { // @ ghost if mustCombineRanges { ghost defer sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, writePerm) } @@ -1459,12 +1546,12 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, // @ defer fold p.sInit() // @ defer fold p.d.validResult(processResult{}, false) // @ ghost defer ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, true, hasHbhLayer, hasE2eLayer) - return processResult{}, malformedPath /*@, false @*/ + return processResult{}, malformedPath /*@, false, io.IO_val_Unit{} @*/ } // @ defer fold p.sInit() // @ defer fold p.d.validResult(processResult{}, false) // @ ghost defer ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, true, hasHbhLayer, hasE2eLayer) - return processResult{}, p.processInterBFD(ohp, pld) /*@, false @*/ + return processResult{}, p.processInterBFD(ohp, pld) /*@, false, io.IO_val_Unit{} @*/ } // @ sl.CombineRange_Bytes(ub, start, end, writePerm) // @ ghost if lastLayerIdx >= 0 && !offsets[lastLayerIdx].isNil { @@ -1473,10 +1560,12 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, // @ } // @ assert sl.AbsSlice_Bytes(p.rawPkt, 0, len(p.rawPkt)) // @ unfold acc(p.d.Mem(), _) - v1, v2 /*@, aliasesPkt @*/ := p.processOHP() + // @ TemporaryAssumeForIO(reveal p.scionLayer.EqPathType(p.rawPkt)) + // @ assert !(reveal slayers.IsSupportedPkt(p.rawPkt)) + v1, v2 /*@, aliasesPkt, newAbsPkt @*/ := p.processOHP( /* @ dp @ */ ) // @ ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, true, hasHbhLayer, hasE2eLayer) // @ fold p.sInit() - return v1, v2 /*@, aliasesPkt @*/ + return v1, v2 /*@, aliasesPkt, newAbsPkt @*/ case scion.PathType: // @ sl.CombineRange_Bytes(ub, start, end, writePerm) // @ ghost if lastLayerIdx >= 0 && !offsets[lastLayerIdx].isNil { @@ -1484,15 +1573,22 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, // @ sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, writePerm) // @ } // @ assert sl.AbsSlice_Bytes(p.rawPkt, 0, len(p.rawPkt)) - v1, v2 /*@ , addrAliasesPkt @*/ := p.processSCION( /*@ p.rawPkt, ub == nil, llStart, llEnd @*/ ) + // (VerifiedSCION) the following statements assume properties that follow directly + // from `decodeLayers`, but we cannot currently establish them because we cannot + // properly frame this yet around calls to the ghost slice operations. + // @ TemporaryAssumeForIO((typeOf(p.scionLayer.GetPath(p.rawPkt)) == *scion.Raw) ==> slayers.ValidPktMetaHdr(p.rawPkt)) + // @ TemporaryAssumeForIO((typeOf(p.scionLayer.GetPath(p.rawPkt)) == *scion.Raw) ==> p.scionLayer.EqAbsHeader(p.rawPkt)) + // @ TemporaryAssumeForIO(p.scionLayer.EqPathType(p.rawPkt)) + // @ TemporaryAssumeForIOWitness(absIO_val(dp, p.rawPkt, p.ingressID), p.ingressID, ioSharedArg) + v1, v2 /*@ , addrAliasesPkt, newAbsPkt @*/ := p.processSCION( /*@ p.rawPkt, ub == nil, llStart, llEnd, ioLock, ioSharedArg, dp @*/ ) // @ ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, v2 == nil, hasHbhLayer, hasE2eLayer) // @ fold p.sInit() - return v1, v2 /*@, addrAliasesPkt @*/ + return v1, v2 /*@, addrAliasesPkt, newAbsPkt @*/ case epic.PathType: // @ TODO() v1, v2 := p.processEPIC() // @ fold p.sInit() - return v1, v2 /*@, false @*/ + return v1, v2 /*@, false, io.IO_val_Unit{} @*/ default: // @ ghost if mustCombineRanges { ghost defer sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, writePerm) } // @ ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, true, hasHbhLayer, hasE2eLayer) @@ -1500,7 +1596,7 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, // @ fold p.d.validResult(processResult{}, false) // @ fold p.sInit() // @ establishMemUnsupportedPathType() - return processResult{}, serrors.WithCtx(unsupportedPathType, "type", pathType) /*@, false @*/ + return processResult{}, serrors.WithCtx(unsupportedPathType, "type", pathType) /*@, false, io.IO_val_Unit{} @*/ } } @@ -1617,6 +1713,7 @@ func (p *scionPacketProcessor) processIntraBFD(data []byte) (res error) { // @ requires acc(&p.path) // @ requires p.scionLayer.Mem(ub) // @ requires sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires acc(&p.segmentChange) && !p.segmentChange // @ preserves acc(&p.srcAddr, R10) && acc(p.srcAddr.Mem(), _) // @ preserves acc(&p.lastLayer, R10) // @ preserves p.lastLayer != nil @@ -1624,14 +1721,15 @@ func (p *scionPacketProcessor) processIntraBFD(data []byte) (res error) { // @ acc(p.lastLayer.Mem(nil), R10) // @ preserves (p.lastLayer !== &p.scionLayer && !llIsNil) ==> // @ acc(p.lastLayer.Mem(ub[startLL:endLL]), R10) -// @ preserves acc(&p.ingressID, R20) +// @ requires acc(&p.ingressID, R20) // @ preserves acc(&p.infoField) // @ preserves acc(&p.hopField) -// @ preserves acc(&p.segmentChange) // @ preserves acc(&p.mac, R10) && p.mac != nil && p.mac.Mem() // @ preserves acc(&p.macBuffers.scionInput, R10) // @ preserves sl.AbsSlice_Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) // @ preserves acc(&p.cachedMac) +// @ ensures acc(&p.segmentChange) +// @ ensures acc(&p.ingressID, R20) // @ ensures acc(&p.d, R5) // @ ensures acc(&p.path) // @ ensures acc(&p.rawPkt, R1) @@ -1646,8 +1744,24 @@ func (p *scionPacketProcessor) processIntraBFD(data []byte) (res error) { // @ ensures respr.OutPkt !== ub && respr.OutPkt != nil ==> // @ sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires p.d.DpAgreesWithSpec(dp) +// @ requires dp.Valid() +// @ requires (typeOf(p.scionLayer.GetPath(ub)) == *scion.Raw) ==> slayers.ValidPktMetaHdr(ub) +// @ requires (typeOf(p.scionLayer.GetPath(ub)) == *scion.Raw) ==> p.scionLayer.EqAbsHeader(ub) +// @ requires p.scionLayer.EqPathType(ub) +// @ requires acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; +// @ requires let absPkt := absIO_val(dp, p.rawPkt, p.ingressID) in +// @ absPkt.isIO_val_Pkt2 ==> ElemWitness(ioSharedArg.IBufY, path.ifsToIO_ifs(p.ingressID), absPkt.IO_val_Pkt2_2) +// @ ensures reserr == nil && newAbsPkt.isIO_val_Pkt2 ==> +// @ ElemWitness(ioSharedArg.OBufY, newAbsPkt.IO_val_Pkt2_1, newAbsPkt.IO_val_Pkt2_2) +// @ ensures respr.OutPkt != nil ==> +// @ newAbsPkt == absIO_val(dp, respr.OutPkt, respr.EgressID) +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ newAbsPkt.isIO_val_Unsupported +// @ ensures (respr.OutPkt == nil) == (newAbsPkt == io.IO_val_Unit{}) // @ decreases 0 if sync.IgnoreBlockingForTermination() -func (p *scionPacketProcessor) processSCION( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int @*/ ) (respr processResult, reserr error /*@ , addrAliasesPkt bool @*/) { +func (p *scionPacketProcessor) processSCION( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int, ghost ioLock *sync.Mutex, ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error /*@ , addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { var ok bool // @ unfold acc(p.scionLayer.Mem(ub), R20) @@ -1658,9 +1772,9 @@ func (p *scionPacketProcessor) processSCION( /*@ ghost ub []byte, ghost llIsNil // @ p.scionLayer.DowngradePerm(ub) // @ establishMemMalformedPath() // @ fold p.d.validResult(processResult{}, false) - return processResult{}, malformedPath /*@ , false @*/ + return processResult{}, malformedPath /*@ , false, io.IO_val_Unit{} @*/ } - return p.process( /*@ ub, llIsNil, startLL, endLL @*/ ) + return p.process( /*@ ub, llIsNil, startLL, endLL , ioLock, ioSharedArg, dp @*/ ) } // @ trusted @@ -1766,13 +1880,17 @@ type macBuffersT struct { } // @ trusted -// @ requires false +// @ requires false +// @ requires dp.Valid() +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported func (p *scionPacketProcessor) packSCMP( typ slayers.SCMPType, code slayers.SCMPCode, scmpP gopacket.SerializableLayer, cause error, -) (processResult, error) { + /* @ ghost dp io.DataPlaneSpec, @*/ +) (respr processResult, reserr error) { // check invoking packet was an SCMP error: if p.lastLayer.NextLayerType() == slayers.LayerTypeSCMP { @@ -1795,7 +1913,8 @@ func (p *scionPacketProcessor) packSCMP( // @ requires acc(&p.path, R20) // @ requires p.path === p.scionLayer.GetPath(ub) // @ requires acc(&p.hopField) && acc(&p.infoField) -// @ preserves acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R1) +// @ requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R1) +// @ ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R1) // @ ensures acc(&p.d, R50) // @ ensures acc(p.scionLayer.Mem(ub), R6) // @ ensures acc(&p.path, R20) @@ -1813,16 +1932,26 @@ func (p *scionPacketProcessor) packSCMP( // @ unfolding acc(p.scionLayer.Mem(ub), R10) in // @ p.path.GetCurrINF(ubPath) < p.path.GetNumINF(ubPath)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires dp.Valid() +// @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ requires len(absPkt(dp, ub).CurrSeg.Future) > 0 +// @ ensures dp.Valid() +// @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ ensures reserr == nil ==> len(absPkt(dp, ub).CurrSeg.Future) > 0 +// @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(dp, ub)) +// @ ensures reserr == nil ==> p.EqAbsInfoField(absPkt(dp, ub)) +// @ ensures respr.OutPkt == nil // @ decreases -func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte @*/ ) (respr processResult, reserr error) { +func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { var err error // @ unfold acc(p.scionLayer.Mem(ub), R6) // @ defer fold acc(p.scionLayer.Mem(ub), R6) // @ ghost startP := p.scionLayer.PathStartIdx(ub) // @ ghost endP := p.scionLayer.PathEndIdx(ub) // @ ghost ubPath := ub[startP:endP] - // @ sl.SplitRange_Bytes(ub, startP, endP, R1) - // @ ghost defer sl.CombineRange_Bytes(ub, startP, endP, R1) + // @ sl.SplitRange_Bytes(ub, startP, endP, R2) + // @ ghost defer sl.CombineRange_Bytes(ub, startP, endP, R2) p.hopField, err = p.path.GetCurrentHopField( /*@ ubPath @*/ ) // @ fold p.d.validResult(processResult{}, false) if err != nil { @@ -1834,6 +1963,10 @@ func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte @*/ ) (respr proce // TODO(lukedirtwalker) parameter problem invalid path? return processResult{}, err } + // @ TemporaryAssumeForIO(slayers.ValidPktMetaHdr(ub)) + // @ TemporaryAssumeForIO(len(absPkt(dp, ub).CurrSeg.Future) > 0) + // @ TemporaryAssumeForIO(p.EqAbsHopField(absPkt(dp, ub))) + // @ TemporaryAssumeForIO(p.EqAbsInfoField(absPkt(dp, ub))) return processResult{}, nil } @@ -1844,8 +1977,12 @@ func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte @*/ ) (respr proce // @ ensures respr.OutPkt != nil ==> // @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires dp.Valid() +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases -func (p *scionPacketProcessor) validateHopExpiry() (respr processResult, reserr error) { +func (p *scionPacketProcessor) validateHopExpiry( /*@ ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { expiration := util.SecsToTime(p.infoField.Timestamp). Add(path.ExpTimeToDuration(p.hopField.ExpTime)) expired := expiration.Before(time.Now()) @@ -1870,13 +2007,17 @@ func (p *scionPacketProcessor) validateHopExpiry() (respr processResult, reserr &slayers.SCMPParameterProblem{Pointer: p.currentHopPointer( /*@ nil @*/ )}, serrors.New("expired hop", "cons_dir", p.infoField.ConsDir, "if_id", p.ingressID, "curr_inf", p.path.PathMeta.CurrINF, "curr_hf", p.path.PathMeta.CurrHF), + /*@ dp, @*/ ) } -// @ preserves acc(&p.infoField, R20) -// @ preserves acc(&p.hopField, R20) -// @ preserves acc(&p.ingressID, R20) +// @ requires acc(&p.ingressID, R21) +// @ requires acc(&p.hopField, R20) +// @ requires acc(&p.infoField, R20) // @ preserves acc(&p.d, R50) && acc(p.d.Mem(), _) +// @ ensures acc(&p.infoField, R20) +// @ ensures acc(&p.hopField, R20) +// @ ensures acc(&p.ingressID, R21) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() @@ -1884,8 +2025,16 @@ func (p *scionPacketProcessor) validateHopExpiry() (respr processResult, reserr // @ p.ingressID == 0 || p.hopField.ConsIngress == p.ingressID) // @ ensures reserr == nil && !p.infoField.ConsDir ==> ( // @ p.ingressID == 0 || p.hopField.ConsEgress == p.ingressID) +// contracts for IO-spec +// @ requires dp.Valid() +// @ requires len(oldPkt.CurrSeg.Future) > 0 +// @ requires p.EqAbsHopField(oldPkt) +// @ requires p.EqAbsInfoField(oldPkt) +// @ ensures reserr == nil ==> AbsValidateIngressIDConstraint(oldPkt, path.ifsToIO_ifs(p.ingressID)) +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases -func (p *scionPacketProcessor) validateIngressID() (respr processResult, reserr error) { +func (p *scionPacketProcessor) validateIngressID( /*@ ghost oldPkt io.IO_pkt2, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { pktIngressID := p.hopField.ConsIngress errCode := slayers.SCMPCodeUnknownHopFieldIngress if !p.infoField.ConsDir { @@ -1900,8 +2049,12 @@ func (p *scionPacketProcessor) validateIngressID() (respr processResult, reserr &slayers.SCMPParameterProblem{Pointer: p.currentHopPointer( /*@ nil @*/ )}, serrors.New("ingress interface invalid", "pkt_ingress", pktIngressID, "router_ingress", p.ingressID), + /*@ dp, @*/ ) } + // @ reveal p.EqAbsHopField(oldPkt) + // @ reveal p.EqAbsInfoField(oldPkt) + // @ assert reveal AbsValidateIngressIDConstraint(oldPkt, path.ifsToIO_ifs(p.ingressID)) // @ fold p.d.validResult(respr, false) return processResult{}, nil } @@ -1910,19 +2063,33 @@ func (p *scionPacketProcessor) validateIngressID() (respr processResult, reserr // @ requires acc(p.scionLayer.Mem(ubScionL), R19) // @ requires acc(&p.path, R20) // @ requires p.path === p.scionLayer.GetPath(ubScionL) -// @ preserves acc(&p.ingressID, R20) +// @ preserves acc(&p.ingressID, R21) // @ ensures acc(p.scionLayer.Mem(ubScionL), R19) // @ ensures acc(&p.path, R20) -// @ ensures acc(&p.d, R20) +// @ ensures acc(&p.d, R20) && acc(p.d.Mem(), _) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> // @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires acc(sl.AbsSlice_Bytes(ubScionL, 0, len(ubScionL)), R20) +// @ requires dp.Valid() && slayers.ValidPktMetaHdr(ubScionL) && p.scionLayer.EqAbsHeader(ubScionL) +// @ ensures acc(sl.AbsSlice_Bytes(ubScionL, 0, len(ubScionL)), R20) +// @ ensures reserr == nil ==> dp.Valid() && slayers.ValidPktMetaHdr(ubScionL) +// @ ensures reserr == nil ==> p.DstIsLocalIngressID(ubScionL) +// @ ensures reserr == nil ==> p.LastHopLen(ubScionL, dp) +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases -func (p *scionPacketProcessor) validateSrcDstIA( /*@ ghost ubScionL []byte @*/ ) (respr processResult, reserr error) { - // @ ghost ubPath := p.scionLayer.UBPath(ubScionL) +func (p *scionPacketProcessor) validateSrcDstIA( /*@ ghost ubScionL []byte, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { // @ unfold acc(p.scionLayer.Mem(ubScionL), R20) // @ defer fold acc(p.scionLayer.Mem(ubScionL), R20) + // @ ghost startP := p.scionLayer.PathStartIdx(ubScionL) + // @ ghost endP := p.scionLayer.PathEndIdx(ubScionL) + // @ ghost ubPath := ubScionL[startP:endP] + // @ sl.SplitRange_Bytes(ubScionL, startP, endP, R55) + // @ p.AbsPktToSubSliceAbsPkt(ubScionL, startP, endP, dp) + // @ ghost defer sl.CombineRange_Bytes(ubScionL, startP, endP, R55) // @ unfold acc(p.scionLayer.HeaderMem(ubScionL[slayers.CmnHdrLen:]), R20) // @ defer fold acc(p.scionLayer.HeaderMem(ubScionL[slayers.CmnHdrLen:]), R20) // @ p.d.getLocalIA() @@ -1935,48 +2102,65 @@ func (p *scionPacketProcessor) validateSrcDstIA( /*@ ghost ubScionL []byte @*/ ) // don't start with the first hop. if p.path.IsFirstHop( /*@ ubPath @*/ ) && !srcIsLocal { // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP - return p.invalidSrcIA() + return p.invalidSrcIA( /*@ dp @*/ ) } if dstIsLocal { // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP - return p.invalidDstIA() + return p.invalidDstIA( /*@ dp @*/ ) } } else { // Inbound if srcIsLocal { // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP - return p.invalidSrcIA() + return p.invalidSrcIA( /*@ dp @*/ ) } if p.path.IsLastHop( /*@ ubPath @*/ ) != dstIsLocal { // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP - return p.invalidDstIA() + return p.invalidDstIA( /*@ dp @*/ ) } + // @ ghost if(p.path.IsLastHopSpec(ubPath)) { + // @ p.path.LastHopLemma(ubPath, dp) + // @ p.SubSliceAbsPktToAbsPkt(ubScionL, startP, endP, dp) + // @ } } // @ fold p.d.validResult(processResult{}, false) + + // @ assert (unfolding acc(p.scionLayer.Mem(ubScionL), R55) in + // @ (unfolding acc(p.scionLayer.HeaderMem(ubScionL[slayers.CmnHdrLen:]), R55) in + // @ p.scionLayer.DstIA) == (unfolding acc(p.d.Mem(), _) in p.d.localIA)) ==> p.ingressID != 0 + // @ assert (unfolding acc(p.scionLayer.Mem(ubScionL), R55) in + // @ (unfolding acc(p.scionLayer.HeaderMem(ubScionL[slayers.CmnHdrLen:]), R55) in + // @ p.scionLayer.DstIA) == (unfolding acc(p.d.Mem(), _) in p.d.localIA)) ==> p.path.IsLastHopSpec(ubPath) + // @ assert reveal p.DstIsLocalIngressID(ubScionL) + // @ assert reveal p.LastHopLen(ubScionL, dp) return processResult{}, nil } // invalidSrcIA is a helper to return an SCMP error for an invalid SrcIA. // @ trusted +// @ requires dp.Valid() // @ requires false -func (p *scionPacketProcessor) invalidSrcIA() (processResult, error) { +func (p *scionPacketProcessor) invalidSrcIA( /*@ ghost dp io.DataPlaneSpec @*/ ) (processResult, error) { return p.packSCMP( slayers.SCMPTypeParameterProblem, slayers.SCMPCodeInvalidSourceAddress, &slayers.SCMPParameterProblem{Pointer: uint16(slayers.CmnHdrLen + addr.IABytes)}, invalidSrcIA, + /*@ dp, @*/ ) } // invalidDstIA is a helper to return an SCMP error for an invalid DstIA. // @ trusted +// @ requires dp.Valid() // @ requires false -func (p *scionPacketProcessor) invalidDstIA() (processResult, error) { +func (p *scionPacketProcessor) invalidDstIA( /*@ ghost dp io.DataPlaneSpec @*/ ) (processResult, error) { return p.packSCMP( slayers.SCMPTypeParameterProblem, slayers.SCMPCodeInvalidDestinationAddress, &slayers.SCMPParameterProblem{Pointer: uint16(slayers.CmnHdrLen)}, invalidDstIA, + /*@ dp, @*/ ) } @@ -1988,7 +2172,7 @@ func (p *scionPacketProcessor) invalidDstIA() (processResult, error) { // @ requires acc(&p.path, R15) // @ requires acc(p.scionLayer.Mem(ub), R4) // @ requires p.path === p.scionLayer.GetPath(ub) -// @ requires acc(&p.ingressID, R20) +// @ requires acc(&p.ingressID, R21) // @ requires acc(&p.infoField, R4) && acc(&p.hopField, R4) // @ requires let ubPath := p.scionLayer.UBPath(ub) in // @ unfolding acc(p.scionLayer.Mem(ub), R10) in @@ -2001,7 +2185,7 @@ func (p *scionPacketProcessor) invalidDstIA() (processResult, error) { // @ preserves acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R4) // @ ensures acc(&p.path, R15) // @ ensures acc(p.scionLayer.Mem(ub), R4) -// @ ensures acc(&p.ingressID, R20) +// @ ensures acc(&p.ingressID, R21) // @ ensures acc(&p.infoField, R4) && acc(&p.hopField, R4) // @ ensures acc(&p.d, R20) // @ ensures acc(&p.srcAddr, R20) @@ -2045,18 +2229,37 @@ func (p *scionPacketProcessor) validateTransitUnderlaySrc( /*@ ghost ub []byte @ } // @ requires acc(&p.d, R20) && acc(p.d.Mem(), _) -// @ preserves acc(&p.ingressID, R20) -// @ preserves acc(&p.segmentChange, R20) -// @ preserves acc(&p.infoField, R20) -// @ preserves acc(&p.hopField, R20) +// @ requires acc(&p.segmentChange, R20) +// @ requires acc(&p.ingressID, R21) +// @ requires acc(&p.infoField, R20) +// @ requires acc(&p.hopField, R20) +// @ ensures acc(&p.infoField, R20) +// @ ensures acc(&p.hopField, R20) +// @ ensures acc(&p.ingressID, R21) +// @ ensures acc(&p.segmentChange, R20) // @ ensures acc(&p.d, R20) // @ ensures p.d.validResult(respr, false) // @ ensures reserr == nil ==> respr === processResult{} // @ ensures reserr != nil ==> sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires dp.Valid() +// @ requires p.d.DpAgreesWithSpec(dp) +// @ requires len(oldPkt.CurrSeg.Future) > 0 +// @ requires p.EqAbsHopField(oldPkt) +// @ requires p.EqAbsInfoField(oldPkt) +// @ requires p.segmentChange ==> oldPkt.RightSeg != none[io.IO_seg2] && len(get(oldPkt.RightSeg).Past) > 0 +// @ requires !p.segmentChange ==> AbsValidateIngressIDConstraint(oldPkt, path.ifsToIO_ifs(p.ingressID)) +// @ requires p.segmentChange ==> AbsValidateIngressIDConstraintXover(oldPkt, path.ifsToIO_ifs(p.ingressID)) +// @ ensures reserr == nil && !p.segmentChange ==> AbsValidateEgressIDConstraint(oldPkt, (p.ingressID != 0), dp) +// @ ensures reserr == nil && p.segmentChange ==> oldPkt.RightSeg != none[io.IO_seg2] && len(get(oldPkt.RightSeg).Past) > 0 +// @ ensures reserr == nil && p.segmentChange ==> p.ingressID != 0 && AbsValidateEgressIDConstraintXover(oldPkt, dp) +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases -func (p *scionPacketProcessor) validateEgressID() (respr processResult, reserr error) { - pktEgressID := p.egressInterface() +func (p *scionPacketProcessor) validateEgressID( /*@ ghost oldPkt io.IO_pkt2, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { + pktEgressID := p.egressInterface( /*@ oldPkt @*/ ) + // @ reveal AbsEgressInterfaceConstraint(oldPkt, path.ifsToIO_ifs(pktEgressID)) // @ p.d.getInternalNextHops() // @ if p.d.internalNextHops != nil { unfold acc(accAddr(p.d.internalNextHops), _) } _, ih := p.d.internalNextHops[pktEgressID] @@ -2074,25 +2277,34 @@ func (p *scionPacketProcessor) validateEgressID() (respr processResult, reserr e errCode, &slayers.SCMPParameterProblem{Pointer: p.currentHopPointer( /*@ nil @*/ )}, cannotRoute, + /*@ dp, @*/ ) } + // @ TemporaryAssumeForIO(pktEgressID != 0 && + // @ (io.IO_ifs(pktEgressID) in domain(dp.GetNeighborIAs()))) // @ p.d.getLinkTypesMem() ingress, egress := p.d.linkTypes[p.ingressID], p.d.linkTypes[pktEgressID] + // @ p.d.LinkTypesLemma(dp) if !p.segmentChange { // Check that the interface pair is valid within a single segment. // No check required if the packet is received from an internal interface. + // @ assert reveal AbsValidateIngressIDConstraint(oldPkt, path.ifsToIO_ifs(p.ingressID)) switch { case p.ingressID == 0: + // @ assert reveal AbsValidateEgressIDConstraint(oldPkt, (p.ingressID != 0), dp) // @ fold p.d.validResult(respr, false) return processResult{}, nil case ingress == topology.Core && egress == topology.Core: + // @ assert reveal AbsValidateEgressIDConstraint(oldPkt, (p.ingressID != 0), dp) // @ fold p.d.validResult(respr, false) return processResult{}, nil case ingress == topology.Child && egress == topology.Parent: + // @ assert reveal AbsValidateEgressIDConstraint(oldPkt, (p.ingressID != 0), dp) // @ fold p.d.validResult(respr, false) return processResult{}, nil case ingress == topology.Parent && egress == topology.Child: + // @ assert reveal AbsValidateEgressIDConstraint(oldPkt, (p.ingressID != 0), dp) // @ fold p.d.validResult(respr, false) return processResult{}, nil default: // malicious @@ -2102,19 +2314,23 @@ func (p *scionPacketProcessor) validateEgressID() (respr processResult, reserr e slayers.SCMPCodeInvalidPath, // XXX(matzf) new code InvalidHop? &slayers.SCMPParameterProblem{Pointer: p.currentHopPointer( /*@ nil @*/ )}, serrors.WithCtx(cannotRoute, "ingress_id", p.ingressID, "ingress_type", ingress, - "egress_id", pktEgressID, "egress_type", egress)) + "egress_id", pktEgressID, "egress_type", egress) /*@, dp, @*/) } } + // @ assert reveal AbsValidateIngressIDConstraintXover(oldPkt, path.ifsToIO_ifs(p.ingressID)) // Check that the interface pair is valid on a segment switch. // Having a segment change received from the internal interface is never valid. switch { case ingress == topology.Core && egress == topology.Child: + // @ assert reveal AbsValidateEgressIDConstraintXover(oldPkt, dp) // @ fold p.d.validResult(respr, false) return processResult{}, nil case ingress == topology.Child && egress == topology.Core: + // @ assert reveal AbsValidateEgressIDConstraintXover(oldPkt, dp) // @ fold p.d.validResult(respr, false) return processResult{}, nil case ingress == topology.Child && egress == topology.Child: + // @ assert reveal AbsValidateEgressIDConstraintXover(oldPkt, dp) // @ fold p.d.validResult(respr, false) return processResult{}, nil default: @@ -2124,22 +2340,39 @@ func (p *scionPacketProcessor) validateEgressID() (respr processResult, reserr e slayers.SCMPCodeInvalidSegmentChange, &slayers.SCMPParameterProblem{Pointer: p.currentInfoPointer( /*@ nil @*/ )}, serrors.WithCtx(cannotRoute, "ingress_id", p.ingressID, "ingress_type", ingress, - "egress_id", pktEgressID, "egress_type", egress)) + "egress_id", pktEgressID, "egress_type", egress) /*@, dp, @*/) } } -// @ preserves acc(&p.infoField) +// @ requires acc(&p.infoField) // @ requires acc(&p.path, R20) // @ requires acc(p.scionLayer.Mem(ub), R19) // @ requires p.path === p.scionLayer.GetPath(ub) -// @ preserves acc(&p.ingressID, R20) -// @ preserves acc(&p.hopField, R20) -// @ preserves sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires acc(&p.hopField, R20) +// @ requires sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ preserves acc(&p.ingressID, R21) +// @ ensures acc(&p.hopField, R20) +// @ ensures sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ ensures acc(&p.infoField) // @ ensures acc(&p.path, R20) // @ ensures acc(p.scionLayer.Mem(ub), R19) // @ ensures err != nil ==> err.ErrorMem() +// contracts for IO-spec +// @ requires dp.Valid() && slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ requires len(absPkt(dp, ub).CurrSeg.Future) > 0 +// @ requires acc(&p.d, R55) && acc(p.d.Mem(), _) && acc(&p.ingressID, R55) +// @ requires p.LastHopLen(ub, dp) +// @ requires p.EqAbsHopField(absPkt(dp, ub)) +// @ requires p.EqAbsInfoField(absPkt(dp, ub)) +// @ ensures acc(&p.d, R55) && acc(p.d.Mem(), _) && acc(&p.ingressID, R55) +// @ ensures err == nil ==> dp.Valid() && slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ ensures err == nil ==> len(absPkt(dp, ub).CurrSeg.Future) > 0 +// @ ensures err == nil ==> absPkt(dp, ub) == AbsUpdateNonConsDirIngressSegID(old(absPkt(dp, ub)), path.ifsToIO_ifs(p.ingressID)) +// @ ensures err == nil ==> p.LastHopLen(ub, dp) +// @ ensures err == nil ==> p.EqAbsHopField(absPkt(dp, ub)) +// @ ensures err == nil ==> p.EqAbsInfoField(absPkt(dp, ub)) // @ decreases -func (p *scionPacketProcessor) updateNonConsDirIngressSegID( /*@ ghost ub []byte @*/ ) (err error) { +func (p *scionPacketProcessor) updateNonConsDirIngressSegID( /*@ ghost ub []byte, ghost dp io.DataPlaneSpec @*/ ) (err error) { // @ ghost ubPath := p.scionLayer.UBPath(ub) // @ ghost start := p.scionLayer.PathStartIdx(ub) // @ ghost end := p.scionLayer.PathEndIdx(ub) @@ -2151,16 +2384,31 @@ func (p *scionPacketProcessor) updateNonConsDirIngressSegID( /*@ ghost ub []byte // means this comes from this AS itself, so nothing has to be done. // TODO(lukedirtwalker): For packets destined to peer links this shouldn't // be updated. + // @ reveal p.EqAbsInfoField(absPkt(dp, ub)) + // @ reveal p.EqAbsHopField(absPkt(dp, ub)) if !p.infoField.ConsDir && p.ingressID != 0 { - p.infoField.UpdateSegID(p.hopField.Mac) + p.infoField.UpdateSegID(p.hopField.Mac /*@, p.hopField.ToIO_HF() @*/) + // @ reveal p.LastHopLen(ub, dp) + // @ assert path.AbsUInfoFromUint16(p.infoField.SegID) == old(io.upd_uinfo(path.AbsUInfoFromUint16(p.infoField.SegID), p.hopField.ToIO_HF())) // (VerifiedSCION) the following property is guaranteed by the type system, but Gobra cannot infer it yet // @ assume 0 <= p.path.GetCurrINF(ubPath) - // @ sl.SplitRange_Bytes(ub, start, end, writePerm) - // @ ghost defer sl.CombineRange_Bytes(ub, start, end, writePerm) - if err := p.path.SetInfoField(p.infoField, int( /*@ unfolding acc(p.path.Mem(ubPath), R45) in (unfolding acc(p.path.Base.Mem(), R50) in @*/ p.path.PathMeta.CurrINF) /*@ ) , ubPath @*/); err != nil { + // @ sl.SplitRange_Bytes(ub, start, end, HalfPerm) + // @ p.AbsPktToSubSliceAbsPkt(ub, start, end, dp) + // @ sl.SplitRange_Bytes(ub, start, end, HalfPerm) + if err := p.path.SetInfoField(p.infoField, int( /*@ unfolding acc(p.path.Mem(ubPath), R45) in (unfolding acc(p.path.Base.Mem(), R50) in @*/ p.path.PathMeta.CurrINF) /*@ ) , ubPath , dp@*/); err != nil { + // @ ghost sl.CombineRange_Bytes(ub, start, end, writePerm) return serrors.WrapStr("update info field", err) } - } + // @ ghost sl.CombineRange_Bytes(ub, start, end, HalfPerm) + // @ p.SubSliceAbsPktToAbsPkt(ub, start, end, dp) + // @ ghost sl.CombineRange_Bytes(ub, start, end, HalfPerm) + // @ absPktFutureLemma(dp, ub) + // @ assert absPkt(dp, ub).CurrSeg.UInfo == old(io.upd_uinfo(path.AbsUInfoFromUint16(p.infoField.SegID), p.hopField.ToIO_HF())) + // @ assert reveal p.EqAbsInfoField(absPkt(dp, ub)) + // @ assert reveal p.EqAbsHopField(absPkt(dp, ub)) + // @ assert reveal p.LastHopLen(ub, dp) + } + // @ assert absPkt(dp, ub) == reveal AbsUpdateNonConsDirIngressSegID(old(absPkt(dp, ub)), path.ifsToIO_ifs(p.ingressID)) return nil } @@ -2202,26 +2450,36 @@ func (p *scionPacketProcessor) currentHopPointer( /*@ ghost ubScionL []byte @*/ scion.MetaLen + path.InfoLen*p.path.NumINF + path.HopLen*int(p.path.PathMeta.CurrHF)) } +// @ requires acc(&p.infoField, R20) +// @ requires acc(&p.hopField, R20) // @ preserves acc(&p.mac, R20) && p.mac != nil && p.mac.Mem() -// @ preserves acc(&p.infoField, R20) -// @ preserves acc(&p.hopField, R20) // @ preserves acc(&p.macBuffers.scionInput, R20) // @ preserves sl.AbsSlice_Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) // @ preserves acc(&p.cachedMac) // @ preserves acc(&p.d, R50) && acc(p.d.Mem(), _) +// @ ensures acc(&p.infoField, R20) +// @ ensures acc(&p.hopField, R20) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> // @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures len(p.cachedMac) == path.MACBufferSize // @ ensures sl.AbsSlice_Bytes(p.cachedMac, 0, len(p.cachedMac)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires dp.Valid() +// @ requires len(oldPkt.CurrSeg.Future) > 0 +// @ requires p.EqAbsHopField(oldPkt) +// @ requires p.EqAbsInfoField(oldPkt) +// @ ensures reserr == nil ==> AbsVerifyCurrentMACConstraint(oldPkt, dp) +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases -func (p *scionPacketProcessor) verifyCurrentMAC() (respr processResult, reserr error) { +func (p *scionPacketProcessor) verifyCurrentMAC( /*@ ghost oldPkt io.IO_pkt2, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { fullMac := path.FullMAC(p.mac, p.infoField, p.hopField, p.macBuffers.scionInput) - // @ fold acc(sl.AbsSlice_Bytes(p.hopField.Mac[:path.MacLen], 0, path.MacLen), R20) - // @ defer unfold acc(sl.AbsSlice_Bytes(p.hopField.Mac[:path.MacLen], 0, path.MacLen), R20) - // @ sl.SplitRange_Bytes(fullMac, 0, path.MacLen, R20) - // @ ghost defer sl.CombineRange_Bytes(fullMac, 0, path.MacLen, R20) + // @ fold acc(sl.AbsSlice_Bytes(p.hopField.Mac[:path.MacLen], 0, path.MacLen), R21) + // @ defer unfold acc(sl.AbsSlice_Bytes(p.hopField.Mac[:path.MacLen], 0, path.MacLen), R21) + // @ sl.SplitRange_Bytes(fullMac, 0, path.MacLen, R21) + // @ ghost defer sl.CombineRange_Bytes(fullMac, 0, path.MacLen, R21) if subtle.ConstantTimeCompare(p.hopField.Mac[:path.MacLen], fullMac[:path.MacLen]) == 0 { // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") return p.packSCMP( @@ -2234,12 +2492,19 @@ func (p *scionPacketProcessor) verifyCurrentMAC() (respr processResult, reserr e "cons_dir", p.infoField.ConsDir, "if_id", p.ingressID, "curr_inf", p.path.PathMeta.CurrINF, "curr_hf", p.path.PathMeta.CurrHF, "seg_id", p.infoField.SegID), + /*@ dp, @*/ ) } // Add the full MAC to the SCION packet processor, // such that EPIC does not need to recalculate it. p.cachedMac = fullMac - + // @ reveal p.EqAbsInfoField(oldPkt) + // @ reveal p.EqAbsHopField(oldPkt) + // (VerifiedSCION) Assumptions for Cryptography: + // @ absInf := p.infoField.ToIntermediateAbsInfoField() + // @ absHF := p.hopField.ToIO_HF() + // @ AssumeForIO(dp.hf_valid(absInf.ConsDir, absInf.AInfo, absInf.UInfo, absHF)) + // @ reveal AbsVerifyCurrentMACConstraint(oldPkt, dp) // @ fold p.d.validResult(processResult{}, false) return processResult{}, nil } @@ -2260,8 +2525,12 @@ func (p *scionPacketProcessor) verifyCurrentMAC() (respr processResult, reserr e // @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> !addrAliasesUb // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires dp.Valid() +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases 0 if sync.IgnoreBlockingForTermination() -func (p *scionPacketProcessor) resolveInbound( /*@ ghost ubScionL []byte @*/ ) (resaddr *net.UDPAddr, respr processResult, reserr error /*@ , addrAliasesUb bool @*/) { +func (p *scionPacketProcessor) resolveInbound( /*@ ghost ubScionL []byte, ghost dp io.DataPlaneSpec @*/ ) (resaddr *net.UDPAddr, respr processResult, reserr error /*@ , addrAliasesUb bool @*/) { // (VerifiedSCION) the parameter used to be p.scionLayer, // instead of &p.scionLayer. a, err /*@ , addrAliases @*/ := p.d.resolveLocalDst(&p.scionLayer /*@, ubScionL @*/) @@ -2275,7 +2544,7 @@ func (p *scionPacketProcessor) resolveInbound( /*@ ghost ubScionL []byte @*/ ) ( r, err := p.packSCMP( slayers.SCMPTypeDestinationUnreachable, slayers.SCMPCodeNoRoute, - &slayers.SCMPDestinationUnreachable{}, err) + &slayers.SCMPDestinationUnreachable{}, err /*@, dp, @*/) return nil, r, err /*@ , false @*/ default: // @ fold p.d.validResult(respr, addrAliases) @@ -2286,30 +2555,47 @@ func (p *scionPacketProcessor) resolveInbound( /*@ ghost ubScionL []byte @*/ ) ( // @ requires acc(&p.path, R20) // @ requires p.scionLayer.Mem(ub) // @ requires p.path === p.scionLayer.GetPath(ub) -// @ preserves acc(&p.infoField) -// @ preserves acc(&p.hopField, R20) -// @ preserves sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires acc(&p.infoField) +// @ requires acc(&p.hopField, R20) +// @ ensures acc(&p.infoField) +// @ ensures acc(&p.hopField, R20) +// @ ensures sl.AbsSlice_Bytes(ub, 0, len(ub)) // @ ensures acc(&p.path, R20) // @ ensures reserr == nil ==> p.scionLayer.Mem(ub) // @ ensures reserr != nil ==> p.scionLayer.NonInitMem() // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires dp.Valid() && slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ requires len(absPkt(dp, ub).CurrSeg.Future) > 0 +// @ requires p.EqAbsHopField(absPkt(dp, ub)) +// @ requires p.EqAbsInfoField(absPkt(dp, ub)) +// @ ensures reserr == nil ==> dp.Valid() && slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ ensures reserr == nil ==> len(absPkt(dp, ub).CurrSeg.Future) >= 0 +// @ ensures reserr == nil ==> absPkt(dp, ub) == AbsProcessEgress(old(absPkt(dp, ub))) // @ decreases -func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr error) { +func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte, ghost dp io.DataPlaneSpec @*/ ) (reserr error) { // @ ghost ubPath := p.scionLayer.UBPath(ub) // @ ghost startP := p.scionLayer.PathStartIdx(ub) // @ ghost endP := p.scionLayer.PathEndIdx(ub) // @ assert ub[startP:endP] === ubPath - // @ unfold p.scionLayer.Mem(ub) - // @ sl.SplitRange_Bytes(ub, startP, endP, writePerm) - // @ ghost defer sl.CombineRange_Bytes(ub, startP, endP, writePerm) + // @ unfold acc(p.scionLayer.Mem(ub), 1-R55) + // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) + // @ p.AbsPktToSubSliceAbsPkt(ub, startP, endP, dp) + // @ reveal p.EqAbsInfoField(absPkt(dp, ub)) + // @ reveal p.EqAbsHopField(absPkt(dp, ub)) + // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) + // @ unfold acc(p.scionLayer.Mem(ub), R55) // we are the egress router and if we go in construction direction we // need to update the SegID. if p.infoField.ConsDir { - p.infoField.UpdateSegID(p.hopField.Mac) + p.infoField.UpdateSegID(p.hopField.Mac /*@, p.hopField.ToIO_HF() @*/) + // @ assert path.AbsUInfoFromUint16(p.infoField.SegID) == old(io.upd_uinfo(path.AbsUInfoFromUint16(p.infoField.SegID), p.hopField.ToIO_HF())) // @ assume 0 <= p.path.GetCurrINF(ubPath) - if err := p.path.SetInfoField(p.infoField, int( /*@ unfolding acc(p.path.Mem(ubPath), R45) in (unfolding acc(p.path.Base.Mem(), R50) in @*/ p.path.PathMeta.CurrINF /*@ ) @*/) /*@ , ubPath @*/); err != nil { + if err := p.path.SetInfoField(p.infoField, int( /*@ unfolding acc(p.path.Mem(ubPath), R45) in (unfolding acc(p.path.Base.Mem(), R50) in @*/ p.path.PathMeta.CurrINF /*@ ) @*/) /*@ , ubPath, dp @*/); err != nil { // TODO parameter problem invalid path + // @ ghost sl.CombineRange_Bytes(ub, startP, endP, writePerm) // @ p.path.DowngradePerm(ubPath) // @ p.scionLayer.PathPoolMemExchange(p.scionLayer.PathType, p.scionLayer.Path) // @ unfold p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]) @@ -2318,58 +2604,99 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr } } if err := p.path.IncPath( /*@ ubPath @*/ ); err != nil { + // @ ghost sl.CombineRange_Bytes(ub, startP, endP, writePerm) // @ p.scionLayer.PathPoolMemExchange(p.scionLayer.PathType, p.scionLayer.Path) // @ unfold p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]) // @ fold p.scionLayer.NonInitMem() // TODO parameter problem invalid path return serrors.WrapStr("incrementing path", err) } - // @ fold p.scionLayer.Mem(ub) + // @ fold acc(p.scionLayer.Mem(ub), R55) + // @ ghost sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) + // @ TemporaryAssumeForIO(dp.Valid() && scion.validPktMetaHdr(ubPath) && p.path.EqAbsHeader(ubPath)) + // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP, dp) + // @ ghost sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) + // @ absPktFutureLemma(dp, ub) + // @ TemporaryAssumeForIO(absPkt(dp, ub) == AbsProcessEgress(old(absPkt(dp, ub)))) + // @ fold acc(p.scionLayer.Mem(ub), 1-R55) return nil } // @ requires acc(&p.path, R20) // @ requires p.scionLayer.Mem(ub) // @ requires p.path == p.scionLayer.GetPath(ub) -// @ preserves acc(&p.segmentChange) && acc(&p.hopField) && acc(&p.infoField) -// @ preserves sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ preserves acc(&p.segmentChange) +// @ preserves acc(&p.hopField) +// @ preserves acc(&p.infoField) +// @ ensures sl.AbsSlice_Bytes(ub, 0, len(ub)) // @ ensures acc(&p.path, R20) -// @ ensures reserr == nil ==> (p.scionLayer.Mem(ub) && p.scionLayer.UBPath(ub) === old(p.scionLayer.UBPath(ub)) && p.scionLayer.GetPath(ub) === old(p.scionLayer.GetPath(ub))) +// @ ensures reserr == nil ==> p.scionLayer.Mem(ub) +// @ ensures reserr == nil ==> p.scionLayer.UBPath(ub) === old(p.scionLayer.UBPath(ub)) +// @ ensures reserr == nil ==> p.scionLayer.GetPath(ub) === old(p.scionLayer.GetPath(ub)) // @ ensures reserr != nil ==> p.scionLayer.NonInitMem() // @ ensures p.segmentChange // @ ensures respr === processResult{} // @ ensures reserr != nil ==> reserr.ErrorMem() +// contract for IO-spec +// @ requires dp.Valid() && slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ requires p.GetIsXoverSpec(ub) +// @ ensures reserr == nil ==> len(old(absPkt(dp, ub)).CurrSeg.Future) == 1 +// @ ensures reserr == nil ==> old(absPkt(dp, ub)).LeftSeg != none[io.IO_seg2] +// @ ensures reserr == nil ==> len(get(old(absPkt(dp, ub)).LeftSeg).Future) > 0 +// @ ensures reserr == nil ==> len(get(old(absPkt(dp, ub)).LeftSeg).History) == 0 +// @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ ensures reserr == nil ==> len(absPkt(dp, ub).CurrSeg.Future) > 0 +// @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(dp, ub)) +// @ ensures reserr == nil ==> p.EqAbsInfoField(absPkt(dp, ub)) +// @ ensures reserr == nil ==> absPkt(dp, ub) == AbsDoXover(old(absPkt(dp, ub))) // @ decreases -func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte @*/ ) (respr processResult, reserr error) { +func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { p.segmentChange = true - // @ unfold p.scionLayer.Mem(ub) - // @ ghost startP := int(slayers.CmnHdrLen + p.scionLayer.AddrHdrLen(nil, true)) - // @ ghost endP := int(p.scionLayer.HdrLen * slayers.LineLen) + // @ ghost startP := p.scionLayer.PathStartIdx(ub) + // @ ghost endP := p.scionLayer.PathEndIdx(ub) // @ ghost ubPath := ub[startP:endP] - // @ sl.SplitRange_Bytes(ub, startP, endP, writePerm) - // @ ghost defer sl.CombineRange_Bytes(ub, startP, endP, writePerm) + + // @ unfold acc(p.scionLayer.Mem(ub), 1-R55) + // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) + // @ p.AbsPktToSubSliceAbsPkt(ub, startP, endP, dp) + // @ TemporaryAssumeForIO(len(old(absPkt(dp, ub)).CurrSeg.Future) == 1) + // @ reveal p.EqAbsInfoField(absPkt(dp, ub)) + // @ reveal p.EqAbsHopField(absPkt(dp, ub)) + // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) + // @ unfold acc(p.scionLayer.Mem(ub), R55) if err := p.path.IncPath( /*@ ubPath @*/ ); err != nil { // TODO parameter problem invalid path // TODO(joao): we currently expose a lot of internal information from slayers here. Can we avoid it? + // @ ghost sl.CombineRange_Bytes(ub, startP, endP, writePerm) // @ unfold p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]) // @ p.scionLayer.PathPoolMemExchange(p.scionLayer.PathType, p.scionLayer.Path) // @ fold p.scionLayer.NonInitMem() return processResult{}, serrors.WrapStr("incrementing path", err) } + // @ fold acc(p.scionLayer.Mem(ub), R55) var err error if p.hopField, err = p.path.GetCurrentHopField( /*@ ubPath @*/ ); err != nil { + // @ ghost sl.CombineRange_Bytes(ub, startP, endP, writePerm) // @ fold p.scionLayer.Mem(ub) // @ p.scionLayer.DowngradePerm(ub) // TODO parameter problem invalid path return processResult{}, err } if p.infoField, err = p.path.GetCurrentInfoField( /*@ ubPath @*/ ); err != nil { + // @ ghost sl.CombineRange_Bytes(ub, startP, endP, writePerm) // @ fold p.scionLayer.Mem(ub) // @ p.scionLayer.DowngradePerm(ub) // TODO parameter problem invalid path return processResult{}, err } - // @ fold p.scionLayer.Mem(ub) + // @ ghost sl.CombineRange_Bytes(ub, startP, endP, writePerm) + // @ TemporaryAssumeForIO(old(absPkt(dp, ub)).LeftSeg != none[io.IO_seg2]) + // @ TemporaryAssumeForIO(len(get(old(absPkt(dp, ub)).LeftSeg).Future) > 0) + // @ TemporaryAssumeForIO(len(get(old(absPkt(dp, ub)).LeftSeg).History) == 0) + // @ TemporaryAssumeForIO(slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub)) + // @ TemporaryAssumeForIO(absPkt(dp, ub) == AbsDoXover(old(absPkt(dp, ub)))) + // @ fold acc(p.scionLayer.Mem(ub), 1-R55) return processResult{}, nil } @@ -2403,28 +2730,50 @@ func (p *scionPacketProcessor) ingressInterface( /*@ ghost ubPath []byte @*/ ) u return hop.ConsEgress } -// @ preserves acc(&p.infoField, R20) -// @ preserves acc(&p.hopField, R20) +// @ requires acc(&p.infoField, R21) +// @ requires acc(&p.hopField, R21) +// @ ensures acc(&p.infoField, R21) +// @ ensures acc(&p.hopField, R21) +// contracts for IO-spec +// @ requires len(oldPkt.CurrSeg.Future) > 0 +// @ requires p.EqAbsInfoField(oldPkt) +// @ requires p.EqAbsHopField(oldPkt) +// @ ensures p.EqAbsInfoField(oldPkt) +// @ ensures p.EqAbsHopField(oldPkt) +// @ ensures AbsEgressInterfaceConstraint(oldPkt, path.ifsToIO_ifs(egress)) // @ decreases -func (p *scionPacketProcessor) egressInterface() uint16 { +func (p *scionPacketProcessor) egressInterface( /*@ ghost oldPkt io.IO_pkt2 @*/ ) /*@ (egress @*/ uint16 /*@ ) @*/ { + // @ reveal p.EqAbsInfoField(oldPkt) + // @ reveal p.EqAbsHopField(oldPkt) if p.infoField.ConsDir { + // @ assert reveal AbsEgressInterfaceConstraint(oldPkt, path.ifsToIO_ifs(p.hopField.ConsEgress)) return p.hopField.ConsEgress } + // @ assert reveal AbsEgressInterfaceConstraint(oldPkt, path.ifsToIO_ifs(p.hopField.ConsIngress)) return p.hopField.ConsIngress } // @ requires acc(&p.d, R20) && acc(p.d.Mem(), _) -// @ preserves acc(&p.infoField, R20) -// @ preserves acc(&p.hopField, R20) -// @ preserves acc(&p.ingressID, R20) +// @ requires acc(&p.infoField, R20) +// @ requires acc(&p.hopField, R20) +// @ preserves acc(&p.ingressID, R21) +// @ ensures acc(&p.infoField, R20) +// @ ensures acc(&p.hopField, R20) // @ ensures acc(&p.d, R20) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> // @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires dp.Valid() +// @ requires len(oldPkt.CurrSeg.Future) > 0 +// @ requires p.EqAbsInfoField(oldPkt) +// @ requires p.EqAbsHopField(oldPkt) +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases 0 if sync.IgnoreBlockingForTermination() -func (p *scionPacketProcessor) validateEgressUp() (respr processResult, reserr error) { - egressID := p.egressInterface() +func (p *scionPacketProcessor) validateEgressUp( /*@ ghost oldPkt io.IO_pkt2, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { + egressID := p.egressInterface( /*@ oldPkt @ */ ) // @ p.d.getBfdSessionsMem() // @ ghost if p.d.bfdSessions != nil { unfold acc(accBfdSession(p.d.bfdSessions), _) } if v, ok := p.d.bfdSessions[egressID]; ok { @@ -2446,7 +2795,7 @@ func (p *scionPacketProcessor) validateEgressUp() (respr processResult, reserr e } } // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP - return p.packSCMP(typ, 0, scmpP, serrors.New("bfd session down")) + return p.packSCMP(typ, 0, scmpP, serrors.New("bfd session down") /*@, dp @*/) } } // @ fold p.d.validResult(processResult{}, false) @@ -2458,25 +2807,42 @@ func (p *scionPacketProcessor) validateEgressUp() (respr processResult, reserr e // @ requires acc(p.scionLayer.Mem(ub), R10) // @ requires p.path === p.scionLayer.GetPath(ub) // @ requires acc(&p.d, R20) && acc(p.d.Mem(), _) -// @ preserves sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires acc(&p.ingressID, R21) +// @ requires acc(&p.hopField) // @ preserves acc(&p.lastLayer, R19) // @ preserves p.lastLayer != nil // @ preserves (&p.scionLayer !== p.lastLayer && llIsNil) ==> // @ acc(p.lastLayer.Mem(nil), R15) // @ preserves (&p.scionLayer !== p.lastLayer && !llIsNil) ==> // @ acc(p.lastLayer.Mem(ub[startLL:endLL]), R15) -// @ preserves acc(&p.ingressID, R20) // @ preserves acc(&p.infoField, R20) -// @ preserves acc(&p.hopField) +// @ ensures acc(&p.hopField) +// @ ensures acc(&p.ingressID, R21) +// @ ensures sl.AbsSlice_Bytes(ub, 0, len(ub)) // @ ensures acc(&p.path, R20) // @ ensures acc(p.scionLayer.Mem(ub), R10) -// @ ensures acc(&p.d, R20) +// @ ensures acc(&p.d, R20) && acc(p.d.Mem(), _) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> // @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// constracts for IO-spec +// @ requires dp.Valid() && slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ requires p.DstIsLocalIngressID(ub) +// @ requires p.LastHopLen(ub, dp) +// @ requires len(absPkt(dp, ub).CurrSeg.Future) > 0 +// @ requires p.EqAbsHopField(absPkt(dp, ub)) +// @ ensures reserr == nil ==> p.DstIsLocalIngressID(ub) +// @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ ensures reserr == nil ==> p.LastHopLen(ub, dp) +// @ ensures reserr == nil ==> len(absPkt(dp, ub).CurrSeg.Future) > 0 +// @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(dp, ub)) +// @ ensures reserr == nil ==> absPkt(dp, ub) == old(absPkt(dp, ub)) +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases -func (p *scionPacketProcessor) handleIngressRouterAlert( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int @*/ ) (respr processResult, reserr error) { +func (p *scionPacketProcessor) handleIngressRouterAlert( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { // @ ghost ubPath := p.scionLayer.UBPath(ub) // @ ghost startP := p.scionLayer.PathStartIdx(ub) // @ ghost endP := p.scionLayer.PathEndIdx(ub) @@ -2495,13 +2861,27 @@ func (p *scionPacketProcessor) handleIngressRouterAlert( /*@ ghost ub []byte, gh // @ defer fold acc(p.scionLayer.Mem(ub), R20) // (VerifiedSCION) the following is guaranteed by the type system, but Gobra cannot prove it yet // @ assume 0 <= p.path.GetCurrHF(ubPath) - // @ sl.SplitRange_Bytes(ub, startP, endP, writePerm) + // @ reveal p.LastHopLen(ub, dp) + // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) + // @ p.AbsPktToSubSliceAbsPkt(ub, startP, endP, dp) + // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) if err := p.path.SetHopField(p.hopField, int( /*@ unfolding acc(p.path.Mem(ubPath), R50) in (unfolding acc(p.path.Base.Mem(), R55) in @*/ p.path.PathMeta.CurrHF /*@ ) @*/) /*@ , ubPath @*/); err != nil { // @ sl.CombineRange_Bytes(ub, startP, endP, writePerm) // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr("update hop field", err) } - // @ sl.CombineRange_Bytes(ub, startP, endP, writePerm) + // @ sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) + // @ assert p.DstIsLocalIngressID(ub) + // @ TemporaryAssumeForIO(dp.Valid() && scion.validPktMetaHdr(ubPath) && p.path.EqAbsHeader(ubPath)) // postcondition of SetHopfield + // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP, dp) + // @ absPktFutureLemma(dp, ub) + // @ TemporaryAssumeForIO(p.EqAbsHopField(absPkt(dp, ub))) // postcondition of SetHopfield + // @ TemporaryAssumeForIO(absPkt(dp, ub) == old(absPkt(dp, ub))) + // @ sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) + // @ assert dp.Valid() + // @ assert slayers.ValidPktMetaHdr(ub) + // @ assert reveal p.LastHopLen(ub, dp) + // @ assert p.scionLayer.EqAbsHeader(ub) /*@ ghost var ubLL []byte ghost if &p.scionLayer === p.lastLayer { @@ -2511,11 +2891,11 @@ func (p *scionPacketProcessor) handleIngressRouterAlert( /*@ ghost ub []byte, gh sl.NilAcc_Bytes() } else { ubLL = ub[startLL:endLL] - sl.SplitRange_Bytes(ub, startLL, endLL, writePerm) - ghost defer sl.CombineRange_Bytes(ub, startLL, endLL, writePerm) + sl.SplitRange_Bytes(ub, startLL, endLL, R1) + ghost defer sl.CombineRange_Bytes(ub, startLL, endLL, R1) } @*/ - return p.handleSCMPTraceRouteRequest(p.ingressID /*@ , ubLL @*/) + return p.handleSCMPTraceRouteRequest(p.ingressID /*@ , ubLL, dp @*/) } // @ preserves acc(&p.infoField, R20) @@ -2530,28 +2910,43 @@ func (p *scionPacketProcessor) ingressRouterAlertFlag() (res *bool) { // @ requires 0 <= startLL && startLL <= endLL && endLL <= len(ub) // @ requires acc(&p.path, R20) -// @ requires acc(p.scionLayer.Mem(ub), R14) +// @ requires acc(p.scionLayer.Mem(ub), R13) // @ requires p.path === p.scionLayer.GetPath(ub) // @ requires acc(&p.d, R20) && acc(p.d.Mem(), _) -// @ preserves sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires acc(&p.infoField, R20) +// @ requires acc(&p.hopField) // @ preserves acc(&p.lastLayer, R19) // @ preserves p.lastLayer != nil // @ preserves (&p.scionLayer !== p.lastLayer && llIsNil) ==> // @ acc(p.lastLayer.Mem(nil), R15) // @ preserves (&p.scionLayer !== p.lastLayer && !llIsNil) ==> // @ acc(p.lastLayer.Mem(ub[startLL:endLL]), R15) -// @ preserves acc(&p.ingressID, R20) -// @ preserves acc(&p.infoField, R20) -// @ preserves acc(&p.hopField) +// @ preserves acc(&p.ingressID, R21) +// @ ensures acc(&p.infoField, R20) +// @ ensures acc(&p.hopField) +// @ ensures sl.AbsSlice_Bytes(ub, 0, len(ub)) // @ ensures acc(&p.path, R20) -// @ ensures acc(p.scionLayer.Mem(ub), R14) +// @ ensures acc(p.scionLayer.Mem(ub), R13) // @ ensures acc(&p.d, R20) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> // @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// constracts for IO-spec +// @ requires dp.Valid() && slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ requires len(absPkt(dp, ub).CurrSeg.Future) > 0 +// @ requires p.EqAbsHopField(absPkt(dp, ub)) +// @ requires p.EqAbsInfoField(absPkt(dp, ub)) +// @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ ensures reserr == nil ==> len(absPkt(dp, ub).CurrSeg.Future) > 0 +// @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(dp, ub)) +// @ ensures reserr == nil ==> p.EqAbsInfoField(absPkt(dp, ub)) +// @ ensures reserr == nil ==> absPkt(dp, ub) == old(absPkt(dp, ub)) +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases -func (p *scionPacketProcessor) handleEgressRouterAlert( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int @*/ ) (respr processResult, reserr error) { +func (p *scionPacketProcessor) handleEgressRouterAlert( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int , ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { // @ ghost ubPath := p.scionLayer.UBPath(ub) // @ ghost startP := p.scionLayer.PathStartIdx(ub) // @ ghost endP := p.scionLayer.PathEndIdx(ub) @@ -2562,7 +2957,7 @@ func (p *scionPacketProcessor) handleEgressRouterAlert( /*@ ghost ub []byte, gho // @ fold p.d.validResult(processResult{}, false) return processResult{}, nil } - egressID := p.egressInterface() + egressID := p.egressInterface( /*@ absPkt(dp, ub) @*/ ) // @ p.d.getExternalMem() // @ if p.d.external != nil { unfold acc(accBatchConn(p.d.external), _) } if _, ok := p.d.external[egressID]; !ok { @@ -2575,13 +2970,22 @@ func (p *scionPacketProcessor) handleEgressRouterAlert( /*@ ghost ub []byte, gho // (VerifiedSCION) the following is guaranteed by the type system, // but Gobra cannot prove it yet // @ assume 0 <= p.path.GetCurrHF(ubPath) - // @ sl.SplitRange_Bytes(ub, startP, endP, writePerm) + // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) + // @ p.AbsPktToSubSliceAbsPkt(ub, startP, endP, dp) + // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) if err := p.path.SetHopField(p.hopField, int( /*@ unfolding acc(p.path.Mem(ubPath), R50) in (unfolding acc(p.path.Base.Mem(), R55) in @*/ p.path.PathMeta.CurrHF /*@ ) @*/) /*@ , ubPath @*/); err != nil { // @ sl.CombineRange_Bytes(ub, startP, endP, writePerm) // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr("update hop field", err) } - // @ sl.CombineRange_Bytes(ub, startP, endP, writePerm) + // @ sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) + // @ TemporaryAssumeForIO(dp.Valid() && scion.validPktMetaHdr(ubPath) && p.path.EqAbsHeader(ubPath)) // postcondition of SetHopfield + // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP, dp) + // @ absPktFutureLemma(dp, ub) + // @ TemporaryAssumeForIO(p.EqAbsHopField(absPkt(dp, ub))) // postcondition of SetHopfield + // @ TemporaryAssumeForIO(p.EqAbsInfoField(absPkt(dp, ub))) + // @ sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) + // @ TemporaryAssumeForIO(absPkt(dp, ub) == old(absPkt(dp, ub))) /*@ ghost var ubLL []byte ghost if &p.scionLayer === p.lastLayer { @@ -2591,14 +2995,14 @@ func (p *scionPacketProcessor) handleEgressRouterAlert( /*@ ghost ub []byte, gho sl.NilAcc_Bytes() } else { ubLL = ub[startLL:endLL] - sl.SplitRange_Bytes(ub, startLL, endLL, writePerm) - ghost defer sl.CombineRange_Bytes(ub, startLL, endLL, writePerm) + sl.SplitRange_Bytes(ub, startLL, endLL, R1) + ghost defer sl.CombineRange_Bytes(ub, startLL, endLL, R1) } @*/ - return p.handleSCMPTraceRouteRequest(egressID /*@ , ubLL @*/) + return p.handleSCMPTraceRouteRequest(egressID /*@ , ubLL, dp @*/) } -// @ preserves acc(&p.infoField, R20) +// @ preserves acc(&p.infoField, R21) // @ ensures res == &p.hopField.IngressRouterAlert || res == &p.hopField.EgressRouterAlert // @ decreases func (p *scionPacketProcessor) egressRouterAlertFlag() (res *bool) { @@ -2610,18 +3014,22 @@ func (p *scionPacketProcessor) egressRouterAlertFlag() (res *bool) { // @ requires acc(&p.lastLayer, R20) // @ requires p.lastLayer != nil && acc(p.lastLayer.Mem(ubLastLayer), R15) -// @ requires acc(&p.d, R20) && acc(p.d.Mem(), _) -// @ preserves sl.AbsSlice_Bytes(ubLastLayer, 0, len(ubLastLayer)) +// @ requires acc(&p.d, R21) && acc(p.d.Mem(), _) +// @ preserves acc(sl.AbsSlice_Bytes(ubLastLayer, 0, len(ubLastLayer)), R1) // @ ensures acc(&p.lastLayer, R20) // @ ensures acc(p.lastLayer.Mem(ubLastLayer), R15) -// @ ensures acc(&p.d, R20) +// @ ensures acc(&p.d, R21) && acc(p.d.Mem(), _) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> // @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires dp.Valid() +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases func (p *scionPacketProcessor) handleSCMPTraceRouteRequest( - interfaceID uint16 /*@ , ghost ubLastLayer []byte @*/) (respr processResult, reserr error) { + interfaceID uint16 /*@ , ghost ubLastLayer []byte, ghost dp io.DataPlaneSpec @*/) (respr processResult, reserr error) { if p.lastLayer.NextLayerType( /*@ ubLastLayer @*/ ) != slayers.LayerTypeSCMP { log.Debug("Packet with router alert, but not SCMP") @@ -2631,8 +3039,8 @@ func (p *scionPacketProcessor) handleSCMPTraceRouteRequest( scionPld /*@ , start, end @*/ := p.lastLayer.LayerPayload( /*@ ubLastLayer @*/ ) // @ assert scionPld === ubLastLayer[start:end] || scionPld == nil // @ if scionPld == nil { sl.NilAcc_Bytes() } else { - // @ sl.SplitRange_Bytes(ubLastLayer, start, end, writePerm) - // @ ghost defer sl.CombineRange_Bytes(ubLastLayer, start, end, writePerm) + // @ sl.SplitRange_Bytes(ubLastLayer, start, end, R1) + // @ ghost defer sl.CombineRange_Bytes(ubLastLayer, start, end, R1) // @ } // @ gopacket.AssertInvariantNilDecodeFeedback() var scmpH /*@@@*/ slayers.SCMP @@ -2652,8 +3060,8 @@ func (p *scionPacketProcessor) handleSCMPTraceRouteRequest( // @ fold scmpP.NonInitMem() // @ unfold scmpH.Mem(scionPld) // @ unfold scmpH.BaseLayer.Mem(scionPld, 4) - // @ sl.SplitRange_Bytes(scionPld, 4, len(scionPld), writePerm) - // @ ghost defer sl.CombineRange_Bytes(scionPld, 4, len(scionPld), writePerm) + // @ sl.SplitRange_Bytes(scionPld, 4, len(scionPld), R1) + // @ ghost defer sl.CombineRange_Bytes(scionPld, 4, len(scionPld), R1) if err := scmpP.DecodeFromBytes(scmpH.Payload, gopacket.NilDecodeFeedback); err != nil { log.Debug("Parsing SCMPTraceroute", "err", err) // @ fold p.d.validResult(processResult{}, false) @@ -2669,7 +3077,7 @@ func (p *scionPacketProcessor) handleSCMPTraceRouteRequest( Interface: uint64(interfaceID), } // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP - return p.packSCMP(slayers.SCMPTypeTracerouteReply, 0, &scmpP, nil) + return p.packSCMP(slayers.SCMPTypeTracerouteReply, 0, &scmpP, nil /*@, dp @*/) } // @ preserves acc(p.scionLayer.Mem(ubScionL), R20) @@ -2678,8 +3086,12 @@ func (p *scionPacketProcessor) handleSCMPTraceRouteRequest( // @ ensures respr.OutPkt != nil ==> reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr == nil ==> int(p.scionLayer.GetPayloadLen(ubScionL)) == len(p.scionLayer.GetPayload(ubScionL)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires dp.Valid() +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases -func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte @*/ ) (respr processResult, reserr error) { +func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { // @ unfold acc(p.scionLayer.Mem(ubScionL), R20) // @ defer fold acc(p.scionLayer.Mem(ubScionL), R20) if int(p.scionLayer.PayloadLen) == len(p.scionLayer.Payload) { @@ -2693,6 +3105,7 @@ func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte @*/ ) ( &slayers.SCMPParameterProblem{Pointer: 0}, serrors.New("bad packet size", "header", p.scionLayer.PayloadLen, "actual", len(p.scionLayer.Payload)), + /*@ dp, @*/ ) } @@ -2706,6 +3119,8 @@ func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte @*/ ) ( // @ requires p.scionLayer.Mem(ub) // @ requires p.path == p.scionLayer.GetPath(ub) // @ requires sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires acc(&p.ingressID, R20) +// @ requires acc(&p.segmentChange) && !p.segmentChange // @ preserves acc(&p.srcAddr, R10) && acc(p.srcAddr.Mem(), _) // @ preserves acc(&p.lastLayer, R10) // @ preserves p.lastLayer != nil @@ -2713,14 +3128,14 @@ func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte @*/ ) ( // @ acc(p.lastLayer.Mem(nil), R10) // @ preserves (p.lastLayer !== &p.scionLayer && !llIsNil) ==> // @ acc(p.lastLayer.Mem(ub[startLL:endLL]), R10) -// @ preserves acc(&p.ingressID, R20) // @ preserves acc(&p.infoField) // @ preserves acc(&p.hopField) -// @ preserves acc(&p.segmentChange) // @ preserves acc(&p.mac, R10) && p.mac != nil && p.mac.Mem() // @ preserves acc(&p.macBuffers.scionInput, R10) // @ preserves sl.AbsSlice_Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) // @ preserves acc(&p.cachedMac) +// @ ensures acc(&p.segmentChange) +// @ ensures acc(&p.ingressID, R20) // @ ensures acc(&p.d, R5) // @ ensures acc(&p.path, R10) // @ ensures acc(&p.rawPkt, R1) @@ -2735,118 +3150,192 @@ func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte @*/ ) ( // @ ensures reserr == nil ==> p.scionLayer.Mem(ub) // @ ensures reserr != nil ==> p.scionLayer.NonInitMem() // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires p.d.DpAgreesWithSpec(dp) +// @ requires dp.Valid() +// @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) && p.scionLayer.EqPathType(ub) +// @ requires acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; +// @ requires let absPkt := absIO_val(dp, ub, p.ingressID) in +// @ absPkt.isIO_val_Pkt2 ==> ElemWitness(ioSharedArg.IBufY, path.ifsToIO_ifs(p.ingressID), absPkt.IO_val_Pkt2_2) +// @ ensures reserr == nil && newAbsPkt.isIO_val_Pkt2 ==> +// @ ElemWitness(ioSharedArg.OBufY, newAbsPkt.IO_val_Pkt2_1, newAbsPkt.IO_val_Pkt2_2) +// @ ensures respr.OutPkt != nil ==> +// @ newAbsPkt == absIO_val(dp, respr.OutPkt, respr.EgressID) +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ newAbsPkt.isIO_val_Unsupported +// @ ensures (respr.OutPkt == nil) == (newAbsPkt == io.IO_val_Unit{}) // @ decreases 0 if sync.IgnoreBlockingForTermination() -func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int @*/ ) (respr processResult, reserr error /*@, addrAliasesPkt bool @*/) { - if r, err := p.parsePath( /*@ ub @*/ ); err != nil { +// @ #backend[stateConsolidationMode(6)] +func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int, ghost ioLock *sync.Mutex, ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error /*@, addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { + // @ ghost var oldPkt io.IO_pkt2 + // @ ghost if(slayers.IsSupportedPkt(ub)) { + // @ absIO_valLemma(dp, ub, p.ingressID) + // @ oldPkt = absIO_val(dp, ub, p.ingressID).IO_val_Pkt2_2 + // @ } else { + // @ absPktFutureLemma(dp, ub) + // @ oldPkt = absPkt(dp, ub) + // @ } + // @ nextPkt := oldPkt + if r, err := p.parsePath( /*@ ub , dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(dp, r) @*/ } - if r, err := p.validateHopExpiry(); err != nil { + if r, err := p.validateHopExpiry( /*@ dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(dp, r) @*/ } - if r, err := p.validateIngressID(); err != nil { + if r, err := p.validateIngressID( /*@ nextPkt, dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(dp, r) @*/ } - if r, err := p.validatePktLen( /*@ ub @*/ ); err != nil { + // @ assert AbsValidateIngressIDConstraint(nextPkt, path.ifsToIO_ifs(p.ingressID)) + if r, err := p.validatePktLen( /*@ ub, dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(dp, r) @*/ } if r, err := p.validateTransitUnderlaySrc( /*@ ub @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(dp, r) @*/ } - if r, err := p.validateSrcDstIA( /*@ ub @*/ ); err != nil { + if r, err := p.validateSrcDstIA( /*@ ub, dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(dp, r) @*/ } - if err := p.updateNonConsDirIngressSegID( /*@ ub @*/ ); err != nil { + if err := p.updateNonConsDirIngressSegID( /*@ ub, dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return processResult{}, err /*@, false @*/ + return processResult{}, err /*@, false, absReturnErr(dp, processResult{}) @*/ } - if r, err := p.verifyCurrentMAC(); err != nil { + // @ assert absPkt(dp, ub) == AbsUpdateNonConsDirIngressSegID(oldPkt, path.ifsToIO_ifs(p.ingressID)) + // @ nextPkt = absPkt(dp, ub) + // @ AbsValidateIngressIDLemma(oldPkt, nextPkt, path.ifsToIO_ifs(p.ingressID)) + if r, err := p.verifyCurrentMAC( /*@ nextPkt, dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(dp, r) @*/ } - if r, err := p.handleIngressRouterAlert( /*@ ub, llIsNil, startLL, endLL @*/ ); err != nil { + // @ assert AbsVerifyCurrentMACConstraint(nextPkt, dp) + if r, err := p.handleIngressRouterAlert( /*@ ub, llIsNil, startLL, endLL, dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(dp, r) @*/ } - + // @ assert nextPkt == absPkt(dp, ub) // Inbound: pkts destined to the local IA. // @ p.d.getLocalIA() if /*@ unfolding acc(p.scionLayer.Mem(ub), R50) in (unfolding acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R55) in @*/ p.scionLayer.DstIA /*@ ) @*/ == p.d.localIA { - a, r, err /*@, aliasesUb @*/ := p.resolveInbound( /*@ ub @*/ ) + // @ assert p.DstIsLocalIngressID(ub) + // @ assert unfolding acc(p.scionLayer.Mem(ub), R50) in (unfolding acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R55) in p.scionLayer.DstIA) == p.d.localIA + // @ p.LocalDstLemma(ub, dp) + // @ assert p.ingressID != 0 + // @ assert len(nextPkt.CurrSeg.Future) == 1 + a, r, err /*@, aliasesUb @*/ := p.resolveInbound( /*@ ub, dp @*/ ) if err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, aliasesUb @*/ + return r, err /*@, aliasesUb, absReturnErr(dp, r) @*/ } // @ p.d.getInternal() // @ unfold p.d.validResult(r, aliasesUb) // @ fold p.d.validResult(processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, aliasesUb) // @ assert ub === p.rawPkt - return processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, nil /*@, aliasesUb @*/ + // @ TemporaryAssumeForIO(old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub)) + // @ ghost if(slayers.IsSupportedPkt(ub)) { + // @ InternalEnterEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, none[io.IO_ifs], ioLock, ioSharedArg, dp) + // @ } + // @ newAbsPkt = reveal absIO_val(dp, p.rawPkt, 0) + return processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, nil /*@, aliasesUb, newAbsPkt @*/ } - // Outbound: pkts leaving the local IA. // BRTransit: pkts leaving from the same BR different interface. - // @ unfold acc(p.scionLayer.Mem(ub), R3) // @ ghost ubPath := p.scionLayer.UBPath(ub) if p.path.IsXover( /*@ ubPath @*/ ) { + // @ assert p.GetIsXoverSpec(ub) // @ fold acc(p.scionLayer.Mem(ub), R3) - if r, err := p.doXover( /*@ ub @*/ ); err != nil { - // @ fold p.d.validResult(r, false) - return r, err /*@, false @*/ + if r, err := p.doXover( /*@ ub, dp @*/ ); err != nil { + // @ fold p.d.validResult(processResult{}, false) + return r, err /*@, false, absReturnErr(dp, r) @*/ } - if r, err := p.validateHopExpiry(); err != nil { + // @ assert absPkt(dp, ub) == AbsDoXover(nextPkt) + // @ AbsValidateIngressIDXoverLemma(nextPkt, AbsDoXover(nextPkt), path.ifsToIO_ifs(p.ingressID)) + // @ nextPkt = absPkt(dp, ub) + if r, err := p.validateHopExpiry( /*@ dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, serrors.WithCtx(err, "info", "after xover") /*@, false @*/ + return r, serrors.WithCtx(err, "info", "after xover") /*@, false, absReturnErr(dp, r) @*/ } // verify the new block - if r, err := p.verifyCurrentMAC(); err != nil { - // fold acc(p.scionLayer.Mem(ub), R3) + if r, err := p.verifyCurrentMAC( /*@ nextPkt, dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, serrors.WithCtx(err, "info", "after xover") /*@, false @*/ + return r, serrors.WithCtx(err, "info", "after xover") /*@, false, absReturnErr(dp, r) @*/ } + // @ assert AbsVerifyCurrentMACConstraint(nextPkt, dp) } // @ fold acc(p.scionLayer.Mem(ub), R3) - if r, err := p.validateEgressID(); err != nil { + // @ assert p.segmentChange ==> nextPkt.RightSeg != none[io.IO_seg2] + if r, err := p.validateEgressID( /*@ nextPkt, dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(dp, r) @*/ } + // @ assert !p.segmentChange ==> AbsValidateEgressIDConstraint(nextPkt, (p.ingressID != 0), dp) + // @ assert p.segmentChange ==> p.ingressID != 0 && AbsValidateEgressIDConstraintXover(nextPkt, dp) // handle egress router alert before we check if it's up because we want to // send the reply anyway, so that trace route can pinpoint the exact link // that failed. - if r, err := p.handleEgressRouterAlert( /*@ ub, llIsNil, startLL, endLL @*/ ); err != nil { + if r, err := p.handleEgressRouterAlert( /*@ ub, llIsNil, startLL, endLL , dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(dp, r) @*/ } - if r, err := p.validateEgressUp(); err != nil { + // @ assert nextPkt == absPkt(dp, ub) + if r, err := p.validateEgressUp( /*@ nextPkt, dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(dp, r) @*/ } - egressID := p.egressInterface() + // @ assert nextPkt == absPkt(dp, ub) + egressID := p.egressInterface( /*@ nextPkt @*/ ) + // @ assert AbsEgressInterfaceConstraint(nextPkt, path.ifsToIO_ifs(egressID)) // @ p.d.getExternalMem() // @ if p.d.external != nil { unfold acc(accBatchConn(p.d.external), _) } if c, ok := p.d.external[egressID]; ok { - if err := p.processEgress( /*@ ub @*/ ); err != nil { + if err := p.processEgress( /*@ ub, dp @*/ ); err != nil { // @ fold p.d.validResult(processResult{}, false) - return processResult{}, err /*@, false @*/ + return processResult{}, err /*@, false, absReturnErr(dp, processResult{}) @*/ } // @ p.d.InDomainExternalInForwardingMetrics2(egressID) + // @ assert absPkt(dp, ub) == AbsProcessEgress(nextPkt) + // @ nextPkt = absPkt(dp, ub) + // @ TemporaryAssumeForIO(old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub)) + // @ ghost if(slayers.IsSupportedPkt(ub)) { + // @ ghost if(!p.segmentChange) { + // enter/exit event + // @ ExternalEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, path.ifsToIO_ifs(egressID), ioLock, ioSharedArg, dp) + // @ } else { + // xover event + // @ XoverEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, path.ifsToIO_ifs(egressID), ioLock, ioSharedArg, dp) + // @ } + // @ } + // @ newAbsPkt = reveal absIO_val(dp, p.rawPkt, egressID) // @ fold p.d.validResult(processResult{EgressID: egressID, OutConn: c, OutPkt: p.rawPkt}, false) - return processResult{EgressID: egressID, OutConn: c, OutPkt: p.rawPkt}, nil /*@, false @*/ + return processResult{EgressID: egressID, OutConn: c, OutPkt: p.rawPkt}, nil /*@, false, newAbsPkt @*/ } - // ASTransit: pkts leaving from another AS BR. // @ p.d.getInternalNextHops() // @ ghost if p.d.internalNextHops != nil { unfold acc(accAddr(p.d.internalNextHops), _) } if a, ok := p.d.internalNextHops[egressID]; ok { // @ p.d.getInternal() + // @ ghost if(path.ifsToIO_ifs(p.ingressID) != none[io.IO_ifs]) { + // @ TemporaryAssumeForIO(old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub)) + // @ ghost if(slayers.IsSupportedPkt(ub)) { + // @ if(!p.segmentChange) { + // enter event + // @ InternalEnterEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, none[io.IO_ifs], ioLock, ioSharedArg, dp) + // @ } else { + // xover event + // @ XoverEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, none[io.IO_ifs], ioLock, ioSharedArg, dp) + // @ } + // @ } + // @ newAbsPkt = reveal absIO_val(dp, p.rawPkt, 0) + // @ } else { + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4497") + // @ } // @ fold p.d.validResult(processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, false) - return processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, nil /*@, false @*/ + return processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, nil /*@, false, newAbsPkt @*/ } errCode := slayers.SCMPCodeUnknownHopFieldEgress if !p.infoField.ConsDir { @@ -2859,8 +3348,9 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, errCode, &slayers.SCMPParameterProblem{Pointer: p.currentHopPointer( /*@ nil @*/ )}, cannotRoute, + /*@ dp, @*/ ) - return tmp, err /*@, false @*/ + return tmp, err /*@, false, absReturnErr(dp, tmp) @*/ } // @ requires acc(&p.rawPkt, R15) @@ -2888,8 +3378,16 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, // @ ensures respr.OutPkt !== p.rawPkt && respr.OutPkt != nil ==> // @ sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires p.scionLayer.EqPathType(p.rawPkt) +// @ requires !slayers.IsSupportedPkt(p.rawPkt) +// @ requires dp.Valid() +// @ ensures (respr.OutPkt == nil) == (newAbsPkt == io.IO_val_Unit{}) +// @ ensures respr.OutPkt != nil ==> +// @ newAbsPkt == absIO_val(dp, respr.OutPkt, respr.EgressID) && +// @ newAbsPkt.isIO_val_Unsupported // @ decreases 0 if sync.IgnoreBlockingForTermination() -func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error /*@ , addrAliasesPkt bool @*/) { +func (p *scionPacketProcessor) processOHP( /* @ ghost dp io.DataPlaneSpec @ */ ) (respr processResult, reserr error /*@ , addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { // @ ghost ubScionL := p.rawPkt // @ p.scionLayer.ExtractAcc(ubScionL) s := p.scionLayer @@ -2905,7 +3403,7 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // @ establishMemMalformedPath() // @ fold p.scionLayer.Mem(ubScionL) // @ fold p.d.validResult(processResult{}, false) - return processResult{}, malformedPath /*@ , false @*/ + return processResult{}, malformedPath /*@ , false, absReturnErr(dp, processResult{}) @*/ } if /*@ unfolding acc(s.Path.Mem(ubPath), R50) in @*/ !ohp.Info.ConsDir { // TODO parameter problem -> invalid path @@ -2914,7 +3412,7 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr( "OneHop path in reverse construction direction is not allowed", - malformedPath, "srcIA", s.SrcIA, "dstIA", s.DstIA) /*@ , false @*/ + malformedPath, "srcIA", s.SrcIA, "dstIA", s.DstIA) /*@ , false, absReturnErr(dp, processResult{}) @*/ } // OHP leaving our IA @@ -2927,7 +3425,7 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr("bad source IA", cannotRoute, "type", "ohp", "egress", ( /*@ unfolding acc(ohp.Mem(ubPath), R50) in (unfolding acc(ohp.FirstHop.Mem(), R55) in @*/ ohp.FirstHop.ConsEgress /*@ ) @*/), - "localIA", p.d.localIA, "srcIA", s.SrcIA) /*@ , false @*/ + "localIA", p.d.localIA, "srcIA", s.SrcIA) /*@ , false, absReturnErr(dp, processResult{}) @*/ } // @ p.d.getNeighborIAs() neighborIA, ok := p.d.neighborIAs[ /*@ unfolding acc(ohp.Mem(ubPath), R50) in (unfolding acc(ohp.FirstHop.Mem(), R55) in @*/ ohp.FirstHop.ConsEgress /*@ ) @*/] @@ -2937,7 +3435,7 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // @ defer fold p.scionLayer.Mem(ubScionL) // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WithCtx(cannotRoute, - "type", "ohp", "egress", ( /*@ unfolding acc(ohp.Mem(ubPath), R50) in (unfolding acc(ohp.FirstHop.Mem(), R55) in @*/ ohp.FirstHop.ConsEgress /*@ ) @*/)) /*@ , false @*/ + "type", "ohp", "egress", ( /*@ unfolding acc(ohp.Mem(ubPath), R50) in (unfolding acc(ohp.FirstHop.Mem(), R55) in @*/ ohp.FirstHop.ConsEgress /*@ ) @*/)) /*@ , false, absReturnErr(dp, processResult{}) @*/ } if !neighborIA.Equal(s.DstIA) { // @ establishCannotRoute() @@ -2945,7 +3443,7 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr("bad destination IA", cannotRoute, "type", "ohp", "egress", ( /*@ unfolding acc(ohp.Mem(ubPath), R50) in (unfolding acc(ohp.FirstHop.Mem(), R55) in @*/ ohp.FirstHop.ConsEgress /*@ ) @*/), - "neighborIA", neighborIA, "dstIA", s.DstIA) /*@ , false @*/ + "neighborIA", neighborIA, "dstIA", s.DstIA) /*@ , false, absReturnErr(dp, processResult{}) @*/ } // @ unfold s.Path.Mem(ubPath) // @ unfold ohp.FirstHop.Mem() @@ -2970,9 +3468,9 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // TODO parameter problem -> invalid MAC // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.New("MAC", "expected", fmt.Sprintf("%x", macCopy), - "actual", fmt.Sprintf("%x", ohp.FirstHop.Mac), "type", "ohp") /*@ , false @*/ + "actual", fmt.Sprintf("%x", ohp.FirstHop.Mac), "type", "ohp") /*@ , false, absReturnErr(dp, processResult{}) @*/ } - ohp.Info.UpdateSegID(ohp.FirstHop.Mac) + ohp.Info.UpdateSegID(ohp.FirstHop.Mac /*@, ohp.FirstHop.ToIO_HF() @*/) // @ fold ohp.FirstHop.Mem() // @ fold s.Path.Mem(ubPath) // @ fold p.scionLayer.Mem(ubScionL) @@ -2981,7 +3479,7 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // changes made to 'updateSCIONLayer'. if err := updateSCIONLayer(p.rawPkt, &p.scionLayer /* s */, p.buffer); err != nil { // @ fold p.d.validResult(processResult{}, false) - return processResult{}, err /*@ , false @*/ + return processResult{}, err /*@ , false, absReturnErr(dp, processResult{}) @*/ } // @ unfold p.scionLayer.Mem(ubScionL) // @ defer fold p.scionLayer.Mem(ubScionL) @@ -3002,14 +3500,15 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // domain of forwardingMetrics is the same as the one for external // @ p.d.InDomainExternalInForwardingMetrics(ohp.FirstHop.ConsEgress) // @ fold p.d.validResult(processResult{EgressID: ohp.FirstHop.ConsEgress, OutConn: c, OutPkt: p.rawPkt}, false) + // @ TemporaryAssumeForIO(!slayers.IsSupportedPkt(p.rawPkt)) return processResult{EgressID: ohp.FirstHop.ConsEgress, OutConn: c, OutPkt: p.rawPkt}, - nil /*@ , false @*/ + nil /*@ , false, reveal absIO_val(dp, respr.OutPkt, respr.EgressID) @*/ } // TODO parameter problem invalid interface // @ establishCannotRoute() // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WithCtx(cannotRoute, "type", "ohp", - "egress", ohp.FirstHop.ConsEgress, "consDir", ohp.Info.ConsDir) /*@ , false @*/ + "egress", ohp.FirstHop.ConsEgress, "consDir", ohp.Info.ConsDir) /*@ , false, absReturnErr(dp, processResult{}) @*/ } // OHP entering our IA @@ -3020,7 +3519,7 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr("bad destination IA", cannotRoute, "type", "ohp", "ingress", p.ingressID, - "localIA", p.d.localIA, "dstIA", s.DstIA) /*@ , false @*/ + "localIA", p.d.localIA, "dstIA", s.DstIA) /*@ , false, absReturnErr(dp, processResult{}) @*/ } // @ p.d.getNeighborIAs() neighborIA := p.d.neighborIAs[p.ingressID] @@ -3030,7 +3529,7 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr("bad source IA", cannotRoute, "type", "ohp", "ingress", p.ingressID, - "neighborIA", neighborIA, "srcIA", s.SrcIA) /*@ , false @*/ + "neighborIA", neighborIA, "srcIA", s.SrcIA) /*@ , false, absReturnErr(dp, processResult{}) @*/ } // @ unfold s.Path.Mem(ubPath) @@ -3054,7 +3553,7 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // @ fold p.scionLayer.Mem(ubScionL) if err := updateSCIONLayer(p.rawPkt, &p.scionLayer /* s */, p.buffer); err != nil { // @ fold p.d.validResult(processResult{}, false) - return processResult{}, err /*@ , false @*/ + return processResult{}, err /*@ , false, absReturnErr(dp, processResult{}) @*/ } // (VerifiedSCION) the parameter was changed from 's' to '&p.scionLayer' due to the // changes made to 'resolveLocalDst'. @@ -3064,12 +3563,13 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // @ apply acc(a.Mem(), R15) --* acc(sl.AbsSlice_Bytes(ubScionL, 0, len(ubScionL)), R15) // @ } // @ fold p.d.validResult(processResult{}, false) - return processResult{}, err /*@ , false @*/ + return processResult{}, err /*@ , false, absReturnErr(dp, processResult{}) @*/ } // @ p.d.getInternal() // @ assert p.d.internal != nil ==> acc(p.d.internal.Mem(), _) // @ fold p.d.validResult(processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, addrAliases) - return processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, nil /*@ , addrAliases @*/ + // @ TemporaryAssumeForIO(!slayers.IsSupportedPkt(p.rawPkt)) + return processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, nil /*@ , addrAliases, reveal absIO_val(dp, respr.OutPkt, 0) @*/ } // @ requires acc(d.Mem(), _) @@ -3427,7 +3927,7 @@ func (p *scionPacketProcessor) prepareSCMP( if infoField.ConsDir { hopField := /*@ unfolding acc(revPath.HopFields[revPath.PathMeta.CurrHF].Mem(), _) in @*/ revPath.HopFields[revPath.PathMeta.CurrHF] - infoField.UpdateSegID(hopField.Mac) + infoField.UpdateSegID(hopField.Mac /*@, hopField.ToIO_HF() @*/) } // @ fold revPath.Base.Mem() // @ fold revPath.Mem(rawPath) @@ -3532,6 +4032,11 @@ func (p *scionPacketProcessor) prepareSCMP( // @ ensures reterr == nil && 0 <= idx ==> retl === opts[idx] // @ ensures reterr == nil ==> retl != nil // @ ensures reterr == nil ==> base.Mem(data) +// @ ensures reterr == nil && typeOf(base.GetPath(data)) == *scion.Raw ==> +// @ slayers.ValidPktMetaHdr(data) +// @ ensures reterr == nil && typeOf(base.GetPath(data)) == *scion.Raw ==> +// @ base.EqAbsHeader(data) +// @ ensures reterr == nil ==> base.EqPathType(data) // @ ensures forall i int :: {&opts[i]}{processed[i]} 0 <= i && i < len(opts) ==> // @ (processed[i] ==> (0 <= offsets[i].start && offsets[i].start <= offsets[i].end && offsets[i].end <= len(data))) // @ ensures reterr == nil ==> forall i int :: {&opts[i]}{processed[i]} 0 <= i && i < len(opts) ==> @@ -3544,7 +4049,9 @@ func (p *scionPacketProcessor) prepareSCMP( // @ ensures reterr != nil ==> (forall i int :: { &opts[i] } 0 <= i && i < len(opts) ==> opts[i].NonInitMem()) // @ ensures reterr != nil ==> reterr.ErrorMem() // @ decreases -func decodeLayers(data []byte, base gopacket.DecodingLayer, +// (VerifiedSCION) originally, `base` was declared with type `gopacket.DecodingLayer`. This is unnecessarily complicated for a private function +// that is only called once with a parameter of type `*SCION`, and leads to more annyoing post-conditions. +func decodeLayers(data []byte, base *slayers.SCION, opts ...gopacket.DecodingLayer) (retl gopacket.DecodingLayer, reterr error /*@ , ghost processed seq[bool], ghost offsets seq[offsetPair], ghost idx int @*/) { // @ processed = seqs.NewSeqBool(len(opts)) @@ -3554,15 +4061,20 @@ func decodeLayers(data []byte, base gopacket.DecodingLayer, if err := base.DecodeFromBytes(data, gopacket.NilDecodeFeedback); err != nil { return nil, err /*@ , processed, offsets, idx @*/ } - last := base + var last gopacket.DecodingLayer = base optsSlice := ([](gopacket.DecodingLayer))(opts) // @ ghost oldData := data // @ ghost oldStart := 0 // @ ghost oldEnd := len(data) - // @ invariant acc(sl.AbsSlice_Bytes(oldData, 0, len(oldData)), R40) + // @ invariant acc(sl.AbsSlice_Bytes(oldData, 0, len(oldData)), R39) // @ invariant base.Mem(oldData) + // @ invariant typeOf(base.GetPath(oldData)) == *scion.Raw ==> + // @ slayers.ValidPktMetaHdr(oldData) + // @ invariant typeOf(base.GetPath(oldData)) == *scion.Raw ==> + // @ base.EqAbsHeader(oldData) + // @ invariant base.EqPathType(oldData) // @ invariant 0 < len(opts) ==> 0 <= i0 && i0 <= len(opts) // @ invariant forall i int :: {&opts[i]} 0 <= i && i < len(opts) ==> acc(&opts[i], R10) // @ invariant forall i, j int :: {&opts[i], &opts[j]} 0 <= i && i < j && j < len(opts) ==> opts[i] !== opts[j] diff --git a/router/io-spec-abstract-transitions.gobra b/router/io-spec-abstract-transitions.gobra new file mode 100644 index 000000000..afd145aa0 --- /dev/null +++ b/router/io-spec-abstract-transitions.gobra @@ -0,0 +1,235 @@ +// Copyright 2022 ETH Zurich +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +gobra + +package router + +import ( + "sync" + "github.com/scionproto/scion/pkg/slayers/path" + "github.com/scionproto/scion/pkg/slayers" + io "verification/io" + sl "github.com/scionproto/scion/verification/utils/slices" + . "verification/utils/definitions" +) + +ghost +opaque +requires len(oldPkt.CurrSeg.Future) > 0 +ensures len(newPkt.CurrSeg.Future) > 0 +ensures len(newPkt.CurrSeg.Future) == len(oldPkt.CurrSeg.Future) +decreases +pure func AbsUpdateNonConsDirIngressSegID(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs]) (newPkt io.IO_pkt2) { + return ingressID == none[io.IO_ifs] ? oldPkt : io.IO_pkt2( + io.IO_Packet2{ + io.establishGuardTraversedseg(oldPkt.CurrSeg, !oldPkt.CurrSeg.ConsDir), + oldPkt.LeftSeg, + oldPkt.MidSeg, + oldPkt.RightSeg}) +} + +ghost +opaque +requires len(pkt.CurrSeg.Future) > 0 +decreases +pure func AbsValidateIngressIDConstraint(pkt io.IO_pkt2, ingressID option[io.IO_ifs]) bool { + return let currseg := pkt.CurrSeg in + ingressID != none[io.IO_ifs] ==> + ingressID == (currseg.ConsDir ? currseg.Future[0].InIF2 : currseg.Future[0].EgIF2) +} + +ghost +opaque +requires pkt.RightSeg != none[io.IO_seg2] +requires len(get(pkt.RightSeg).Past) > 0 +decreases +pure func AbsValidateIngressIDConstraintXover(pkt io.IO_pkt2, ingressID option[io.IO_ifs]) bool { + return let rightseg := get(pkt.RightSeg) in + ingressID != none[io.IO_ifs] ==> + ingressID == (rightseg.ConsDir ? rightseg.Past[0].InIF2 : rightseg.Past[0].EgIF2) +} + +ghost +opaque +requires len(pkt.CurrSeg.Future) > 0 +decreases +pure func AbsEgressInterfaceConstraint(pkt io.IO_pkt2, egressID option[io.IO_ifs]) bool { + return let currseg := pkt.CurrSeg in + egressID == (currseg.ConsDir ? currseg.Future[0].EgIF2 : currseg.Future[0].InIF2) +} + +ghost +opaque +requires dp.Valid() +requires len(pkt.CurrSeg.Future) > 0 +decreases +pure func AbsValidateEgressIDConstraint(pkt io.IO_pkt2, enter bool, dp io.DataPlaneSpec) bool { + return let currseg := pkt.CurrSeg in + let nextIf := (currseg.ConsDir ? currseg.Future[0].EgIF2 : currseg.Future[0].InIF2) in + (enter ==> dp.dp2_check_interface_top(currseg.ConsDir, dp.Asid(), currseg.Future[0])) && + nextIf != none[io.IO_ifs] && + (get(nextIf) in domain(dp.GetNeighborIAs())) +} + +ghost +opaque +requires len(oldPkt.CurrSeg.Future) > 0 +ensures len(newPkt.CurrSeg.Future) >= 0 +decreases +pure func AbsProcessEgress(oldPkt io.IO_pkt2) (newPkt io.IO_pkt2) { + return io.IO_pkt2( + io.IO_Packet2{ + io.establishGuardTraversedsegInc(oldPkt.CurrSeg, oldPkt.CurrSeg.ConsDir), + oldPkt.LeftSeg, + oldPkt.MidSeg, + oldPkt.RightSeg}) +} + +ghost +opaque +requires oldPkt.LeftSeg != none[io.IO_seg2] +requires len(oldPkt.CurrSeg.Future) == 1 +requires len(get(oldPkt.LeftSeg).Future) > 0 +requires len(get(oldPkt.LeftSeg).History) == 0 +ensures len(newPkt.CurrSeg.Future) > 0 +ensures newPkt.RightSeg != none[io.IO_seg2] +ensures len(get(newPkt.RightSeg).Past) > 0 +decreases +pure func AbsDoXover(oldPkt io.IO_pkt2) (newPkt io.IO_pkt2) { + return io.IO_pkt2( + io.IO_Packet2{ + get(oldPkt.LeftSeg), + oldPkt.MidSeg, + oldPkt.RightSeg, + some(io.establishGuardTraversedsegInc(oldPkt.CurrSeg, false))}) +} + +ghost +opaque +requires dp.Valid() +requires len(pkt.CurrSeg.Future) > 0 +requires pkt.RightSeg != none[io.IO_seg2] +requires len(get(pkt.RightSeg).Past) > 0 +decreases +pure func AbsValidateEgressIDConstraintXover(pkt io.IO_pkt2, dp io.DataPlaneSpec) bool { + return let currseg := pkt.CurrSeg in + let rightseg := get(pkt.RightSeg) in + let nextIf := (currseg.ConsDir ? currseg.Future[0].EgIF2 : currseg.Future[0].InIF2) in + dp.xover_up2down2_link_type_dir(dp.Asid(), rightseg.ConsDir, rightseg.Past[0], + currseg.ConsDir, currseg.Future[0]) && + nextIf != none[io.IO_ifs] && + (get(nextIf) in domain(dp.GetNeighborIAs())) +} + +ghost +opaque +requires dp.Valid() +requires len(pkt.CurrSeg.Future) > 0 +decreases +pure func AbsVerifyCurrentMACConstraint(pkt io.IO_pkt2, dp io.DataPlaneSpec) bool { + return let currseg := pkt.CurrSeg in + let d := currseg.ConsDir in + let ts := currseg.AInfo in + let hf := currseg.Future[0] in + let uinfo := currseg.UInfo in + dp.hf_valid(d, ts, uinfo, hf) +} + +ghost +requires dp.Valid() +requires ingressID != none[io.IO_ifs] +requires egressID == none[io.IO_ifs] +requires len(oldPkt.CurrSeg.Future) > 0 +requires ElemWitness(ioSharedArg.IBufY, ingressID, oldPkt) +requires newPkt == AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID) +requires AbsValidateIngressIDConstraint(oldPkt, ingressID) +requires AbsVerifyCurrentMACConstraint(newPkt, dp) +requires len(newPkt.CurrSeg.Future) == 1 || AbsValidateEgressIDConstraint(newPkt, true, dp) +preserves acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; +ensures dp.Valid() +ensures ElemWitness(ioSharedArg.OBufY, egressID, newPkt) +decreases +func InternalEnterEvent(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock *sync.Mutex, ioSharedArg SharedArg, dp io.DataPlaneSpec) { + reveal AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID) + reveal AbsValidateIngressIDConstraint(oldPkt, ingressID) + reveal AbsVerifyCurrentMACConstraint(newPkt, dp) + if(len(newPkt.CurrSeg.Future) != 1) { + reveal AbsValidateEgressIDConstraint(newPkt, true, dp) + } + AtomicEnter(oldPkt, ingressID, newPkt, egressID, ioLock, ioSharedArg, dp) +} + +ghost +requires dp.Valid() +requires len(oldPkt.CurrSeg.Future) > 0 +requires ElemWitness(ioSharedArg.IBufY, ingressID, oldPkt) +requires AbsValidateIngressIDConstraint(oldPkt, ingressID) +requires AbsVerifyCurrentMACConstraint(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID), dp) +requires AbsValidateEgressIDConstraint(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID), (ingressID != none[io.IO_ifs]), dp) +requires AbsEgressInterfaceConstraint(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID), egressID) +requires newPkt == AbsProcessEgress(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)) +preserves acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; +ensures dp.Valid() +ensures ElemWitness(ioSharedArg.OBufY, egressID, newPkt) +decreases +func ExternalEvent(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock *sync.Mutex, ioSharedArg SharedArg, dp io.DataPlaneSpec) { + reveal dp.Valid() + nextPkt := reveal AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID) + reveal AbsValidateIngressIDConstraint(oldPkt, ingressID) + reveal AbsVerifyCurrentMACConstraint(nextPkt, dp) + reveal AbsEgressInterfaceConstraint(nextPkt, egressID) + reveal AbsValidateEgressIDConstraint(nextPkt, (ingressID != none[io.IO_ifs]), dp) + reveal AbsProcessEgress(nextPkt) + if(ingressID == none[io.IO_ifs]){ + AtomicExit(oldPkt, ingressID, newPkt, egressID, ioLock, ioSharedArg, dp) + } else { + AtomicEnter(oldPkt, ingressID, newPkt, egressID, ioLock, ioSharedArg, dp) + } +} + +ghost +requires dp.Valid() +requires ingressID != none[io.IO_ifs] +requires len(oldPkt.CurrSeg.Future) > 0 +requires ElemWitness(ioSharedArg.IBufY, ingressID, oldPkt) +requires AbsValidateIngressIDConstraint(oldPkt, ingressID) +requires AbsVerifyCurrentMACConstraint(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID), dp) +requires len(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID).CurrSeg.Future) == 1 +requires AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID).LeftSeg != none[io.IO_seg2] +requires len(get(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID).LeftSeg).Future) > 0 +requires len(get(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID).LeftSeg).History) == 0 +requires AbsVerifyCurrentMACConstraint(AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)), dp) +requires AbsValidateEgressIDConstraintXover(AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)), dp) +requires AbsEgressInterfaceConstraint(AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)), egressID) +requires egressID == none[io.IO_ifs] ==> newPkt == AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)) +requires egressID != none[io.IO_ifs] ==> newPkt == AbsProcessEgress(AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID))) +preserves acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; +ensures dp.Valid() +ensures ElemWitness(ioSharedArg.OBufY, egressID, newPkt) +decreases +func XoverEvent(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock *sync.Mutex, ioSharedArg SharedArg, dp io.DataPlaneSpec) { + reveal dp.Valid() + intermediatePkt1 := reveal AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID) + intermediatePkt2 := reveal AbsDoXover(intermediatePkt1) + reveal AbsValidateIngressIDConstraint(oldPkt, ingressID) + reveal AbsVerifyCurrentMACConstraint(intermediatePkt1, dp) + reveal AbsVerifyCurrentMACConstraint(intermediatePkt2, dp) + reveal AbsValidateEgressIDConstraintXover(intermediatePkt2, dp) + reveal AbsEgressInterfaceConstraint(intermediatePkt2, egressID) + if(egressID != none[io.IO_ifs]){ + reveal AbsProcessEgress(intermediatePkt2) + } + AtomicXoverUp2Down(oldPkt, ingressID, newPkt, egressID, ioLock, ioSharedArg, dp) +} diff --git a/router/io-spec-atomic-events.gobra b/router/io-spec-atomic-events.gobra new file mode 100644 index 000000000..b93ae6ede --- /dev/null +++ b/router/io-spec-atomic-events.gobra @@ -0,0 +1,160 @@ +// Copyright 2022 ETH Zurich +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +gobra + +// This file contains the definition of operations that perform the atomic transitions of state +// in the IO spec. They all take a *sync.Mutex, which acts as a logical invariant, because Gobra +// does not support invariants natively. As such, we can only get access to the invariants if we +// first lock the mutex, which is a blocking operation. Even though all these operations are +// terminating, Gobra cannot currently prove this and thus, we assume termination for all methods +// in this file. + +package router + +import ( + "sync" + io "verification/io" +) + +ghost +requires dp.Valid() +requires ingressID != none[io.IO_ifs] +requires len(oldPkt.CurrSeg.Future) > 0 +requires ElemWitness(ioSharedArg.IBufY, ingressID, oldPkt) +requires dp.dp2_enter_guard( + oldPkt, + oldPkt.CurrSeg, + io.establishGuardTraversedseg(oldPkt.CurrSeg, !oldPkt.CurrSeg.ConsDir), + dp.Asid(), + oldPkt.CurrSeg.Future[0], + get(ingressID), + oldPkt.CurrSeg.Future[1:]) +requires dp.dp3s_forward( + io.IO_pkt2( + io.IO_Packet2{ + io.establishGuardTraversedseg(oldPkt.CurrSeg, !oldPkt.CurrSeg.ConsDir), + oldPkt.LeftSeg, + oldPkt.MidSeg, + oldPkt.RightSeg}), + newPkt, + egressID) +preserves acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; +ensures ElemWitness(ioSharedArg.OBufY, egressID, newPkt) +decreases _ +func AtomicEnter(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock *sync.Mutex, ioSharedArg SharedArg, dp io.DataPlaneSpec) { + ghost ioLock.Lock() + unfold SharedInv!< dp, ioSharedArg !>() + t, s := *ioSharedArg.Place, *ioSharedArg.State + ApplyElemWitness(s.ibuf, ioSharedArg.IBufY, ingressID, oldPkt) + ghost pkt_internal := io.IO_val(io.IO_Internal_val1{oldPkt, get(ingressID), newPkt, egressID}) + assert dp.dp3s_iospec_bio3s_enter_guard(s, t, pkt_internal) + unfold dp.dp3s_iospec_ordered(s, t) + unfold dp.dp3s_iospec_bio3s_enter(s, t) + io.TriggerBodyIoEnter(pkt_internal) + tN := io.CBio_IN_bio3s_enter_T(t, pkt_internal) + io.Enter(t, pkt_internal) //Event + UpdateElemWitness(s.obuf, ioSharedArg.OBufY, egressID, newPkt) + ghost *ioSharedArg.State = io.dp3s_add_obuf(s, egressID, newPkt) + ghost *ioSharedArg.Place = tN + fold SharedInv!< dp, ioSharedArg !>() + ghost ioLock.Unlock() +} + +ghost +requires dp.Valid() +requires ingressID == none[io.IO_ifs] +requires egressID != none[io.IO_ifs] +requires len(oldPkt.CurrSeg.Future) > 0 +requires ElemWitness(ioSharedArg.IBufY, ingressID, oldPkt) +requires dp.dp3s_forward_ext(oldPkt, newPkt, get(egressID)) +preserves acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; +ensures ElemWitness(ioSharedArg.OBufY, egressID, newPkt) +decreases _ +func AtomicExit(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock *sync.Mutex, ioSharedArg SharedArg, dp io.DataPlaneSpec) { + ghost ioLock.Lock() + unfold SharedInv!< dp, ioSharedArg !>() + t, s := *ioSharedArg.Place, *ioSharedArg.State + ApplyElemWitness(s.ibuf, ioSharedArg.IBufY, ingressID, oldPkt) + ghost pkt_internal := io.IO_val(io.IO_Internal_val2{oldPkt, newPkt, get(egressID)}) + assert dp.dp3s_iospec_bio3s_exit_guard(s, t, pkt_internal) + unfold dp.dp3s_iospec_ordered(s, t) + unfold dp.dp3s_iospec_bio3s_exit(s, t) + io.TriggerBodyIoExit(pkt_internal) + tN := io.dp3s_iospec_bio3s_exit_T(t, pkt_internal) + io.Exit(t, pkt_internal) //Event + UpdateElemWitness(s.obuf, ioSharedArg.OBufY, egressID, newPkt) + ghost *ioSharedArg.State = io.dp3s_add_obuf(s, egressID, newPkt) + ghost *ioSharedArg.Place = tN + fold SharedInv!< dp, ioSharedArg !>() + ghost ioLock.Unlock() +} + +ghost +requires dp.Valid() +requires oldPkt.LeftSeg != none[io.IO_seg2] +requires len(oldPkt.CurrSeg.Future) > 0 +requires len(get(oldPkt.LeftSeg).Future) > 0 +requires ingressID != none[io.IO_ifs] +requires ElemWitness(ioSharedArg.IBufY, ingressID, oldPkt) +requires dp.xover_up2down2_link_type_dir( + dp.Asid(), + oldPkt.CurrSeg.ConsDir, + oldPkt.CurrSeg.Future[0], + get(oldPkt.LeftSeg).ConsDir, + get(oldPkt.LeftSeg).Future[0]) +requires dp.dp2_xover_common_guard( + oldPkt, + oldPkt.CurrSeg, + get(oldPkt.LeftSeg), + io.establishGuardTraversedsegInc(oldPkt.CurrSeg, !oldPkt.CurrSeg.ConsDir), + io.IO_pkt2(io.IO_Packet2{ + get(oldPkt.LeftSeg), + oldPkt.MidSeg, + oldPkt.RightSeg, + some(io.establishGuardTraversedsegInc(oldPkt.CurrSeg, !oldPkt.CurrSeg.ConsDir))}), + oldPkt.CurrSeg.Future[0], + get(oldPkt.LeftSeg).Future[0], + get(oldPkt.LeftSeg).Future[1:], + dp.Asid(), + get(ingressID)) +requires dp.dp3s_forward_xover( + io.IO_pkt2(io.IO_Packet2{ + get(oldPkt.LeftSeg), + oldPkt.MidSeg, + oldPkt.RightSeg, + some(io.establishGuardTraversedsegInc(oldPkt.CurrSeg, !oldPkt.CurrSeg.ConsDir))}), + newPkt, + egressID) +preserves acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; +ensures ElemWitness(ioSharedArg.OBufY, egressID, newPkt) +decreases _ +func AtomicXoverUp2Down(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock *sync.Mutex, ioSharedArg SharedArg, dp io.DataPlaneSpec) { + ghost ioLock.Lock() + unfold SharedInv!< dp, ioSharedArg !>() + t, s := *ioSharedArg.Place, *ioSharedArg.State + ApplyElemWitness(s.ibuf, ioSharedArg.IBufY, ingressID, oldPkt) + ghost pkt_internal := io.IO_val(io.IO_Internal_val1{oldPkt, get(ingressID), newPkt, egressID}) + assert dp.dp3s_iospec_bio3s_xover_up2down_guard(s, t, pkt_internal) + unfold dp.dp3s_iospec_ordered(s, t) + unfold dp.dp3s_iospec_bio3s_xover_up2down(s, t) + io.TriggerBodyIoXoverUp2Down(pkt_internal) + tN := io.dp3s_iospec_bio3s_xover_up2down_T(t, pkt_internal) + io.Xover_up2down(t, pkt_internal) //Event + UpdateElemWitness(s.obuf, ioSharedArg.OBufY, egressID, newPkt) + ghost *ioSharedArg.State = io.dp3s_add_obuf(s, egressID, newPkt) + ghost *ioSharedArg.Place = tN + fold SharedInv!< dp, ioSharedArg !>() + ghost ioLock.Unlock() +} \ No newline at end of file diff --git a/router/io-spec-non-proven-lemmas.gobra b/router/io-spec-non-proven-lemmas.gobra new file mode 100644 index 000000000..6edcde280 --- /dev/null +++ b/router/io-spec-non-proven-lemmas.gobra @@ -0,0 +1,261 @@ +// Copyright 2022 ETH Zurich +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +gobra + +package router + +import ( + "sync" + "github.com/scionproto/scion/pkg/slayers/path" + "github.com/scionproto/scion/pkg/slayers/path/scion" + "github.com/scionproto/scion/pkg/slayers" + "verification/dependencies/encoding/binary" + io "verification/io" + sl "github.com/scionproto/scion/verification/utils/slices" + . "verification/utils/definitions" +) + +ghost +preserves dp.Valid() +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) +ensures slayers.ValidPktMetaHdr(raw) && slayers.IsSupportedPkt(raw) ==> + absIO_val(dp, raw, ingressID).isIO_val_Pkt2 && + absIO_val(dp, raw, ingressID).IO_val_Pkt2_2 == absPkt(dp, raw) && + len(absPkt(dp, raw).CurrSeg.Future) > 0 +decreases +func absIO_valLemma(dp io.DataPlaneSpec, raw []byte, ingressID uint16) { + if(slayers.ValidPktMetaHdr(raw) && slayers.IsSupportedPkt(raw)){ + absIO := reveal absIO_val(dp, raw, ingressID) + assert absIO.isIO_val_Pkt2 + assert absIO_val(dp, raw, ingressID).IO_val_Pkt2_2 == absPkt(dp, raw) + absPktFutureLemma(dp, raw) + } +} + +ghost +requires dp.Valid() +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +requires slayers.ValidPktMetaHdr(raw) +ensures dp.Valid() +ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +ensures slayers.ValidPktMetaHdr(raw) +ensures len(absPkt(dp, raw).CurrSeg.Future) > 0 +decreases +func absPktFutureLemma(dp io.DataPlaneSpec, raw []byte) { + reveal slayers.ValidPktMetaHdr(raw) + headerOffset := slayers.GetAddressOffset(raw) + assert forall k int :: {&raw[headerOffset:headerOffset+scion.MetaLen][k]} 0 <= k && k < scion.MetaLen ==> &raw[headerOffset:headerOffset+scion.MetaLen][k] == &raw[headerOffset + k] + hdr := (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in + binary.BigEndian.Uint32(raw[headerOffset:headerOffset+scion.MetaLen])) + metaHdr := scion.DecodedFrom(hdr) + currINFIdx := int(metaHdr.CurrINF) + currHFIdx := int(metaHdr.CurrHF) + seg1Len := int(metaHdr.SegLen[0]) + seg2Len := int(metaHdr.SegLen[1]) + seg3Len := int(metaHdr.SegLen[2]) + segLen := scion.LengthOfCurrSeg(currHFIdx, seg1Len, seg2Len, seg3Len) + prevSegLen := scion.LengthOfPrevSeg(currHFIdx, seg1Len, seg2Len, seg3Len) + numINF := scion.NumInfoFields(seg1Len, seg2Len, seg3Len) + offset := scion.HopFieldOffset(numINF, 0, headerOffset) + pkt := reveal absPkt(dp, raw) + assert pkt.CurrSeg == reveal scion.CurrSeg(raw, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, headerOffset) + assert len(pkt.CurrSeg.Future) > 0 +} + +ghost +requires len(oldPkt.CurrSeg.Future) > 0 +requires AbsValidateIngressIDConstraint(oldPkt, ingressID) +requires newPkt == AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID) +ensures AbsValidateIngressIDConstraint(newPkt, ingressID) +decreases +func AbsValidateIngressIDLemma(oldPkt io.IO_pkt2, newPkt io.IO_pkt2, ingressID option[io.IO_ifs]) { + reveal AbsValidateIngressIDConstraint(oldPkt, ingressID) + reveal AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID) + reveal AbsValidateIngressIDConstraint(newPkt, ingressID) +} + +ghost +requires len(oldPkt.CurrSeg.Future) == 1 +requires oldPkt.LeftSeg != none[io.IO_seg2] +requires len(get(oldPkt.LeftSeg).Future) > 0 +requires len(get(oldPkt.LeftSeg).History) == 0 +requires AbsValidateIngressIDConstraint(oldPkt, ingressID) +requires newPkt == AbsDoXover(oldPkt) +ensures AbsValidateIngressIDConstraintXover(newPkt, ingressID) +decreases +func AbsValidateIngressIDXoverLemma(oldPkt io.IO_pkt2, newPkt io.IO_pkt2, ingressID option[io.IO_ifs]) { + reveal AbsValidateIngressIDConstraint(oldPkt, ingressID) + reveal AbsDoXover(oldPkt) + reveal AbsValidateIngressIDConstraintXover(newPkt, ingressID) +} + +ghost +opaque +requires acc(p.scionLayer.Mem(ub), R50) +requires acc(&p.d, R55) && acc(p.d.Mem(), _) +requires acc(&p.ingressID, R55) +decreases +pure func (p *scionPacketProcessor) DstIsLocalIngressID(ub []byte) bool { + return (unfolding acc(p.scionLayer.Mem(ub), R50) in + (unfolding acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R55) in + p.scionLayer.DstIA) == (unfolding acc(p.d.Mem(), _) in p.d.localIA)) ==> p.ingressID != 0 +} + +ghost +opaque +requires acc(p.scionLayer.Mem(ub), R50) +requires acc(&p.d, R55) && acc(p.d.Mem(), _) +requires acc(&p.ingressID, R55) +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) +requires slayers.ValidPktMetaHdr(ub) +requires dp.Valid() +decreases +pure func (p *scionPacketProcessor) LastHopLen(ub []byte, dp io.DataPlaneSpec) bool { + return (unfolding acc(p.scionLayer.Mem(ub), R50) in + (unfolding acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R55) in + p.scionLayer.DstIA) == (unfolding acc(p.d.Mem(), _) in p.d.localIA)) ==> + len(absPkt(dp, ub).CurrSeg.Future) == 1 +} + +//TODO: Does not work with --disableNL --unsafeWildcardoptimization +ghost +requires acc(p.scionLayer.Mem(ub), R50) +requires acc(&p.d, R55) && acc(p.d.Mem(), _) +requires acc(&p.ingressID, R55) +requires dp.Valid() +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) +requires slayers.ValidPktMetaHdr(ub) +requires p.DstIsLocalIngressID(ub) +requires p.LastHopLen(ub, dp) +requires (unfolding acc(p.scionLayer.Mem(ub), R50) in + (unfolding acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R55) in + p.scionLayer.DstIA) == (unfolding acc(p.d.Mem(), _) in p.d.localIA)) +ensures acc(p.scionLayer.Mem(ub), R50) +ensures acc(&p.d, R55) && acc(p.d.Mem(), _) +ensures acc(&p.ingressID, R55) +ensures dp.Valid() +ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) +ensures slayers.ValidPktMetaHdr(ub) +ensures p.ingressID != 0 +ensures len(absPkt(dp, ub).CurrSeg.Future) == 1 +decreases +func (p* scionPacketProcessor) LocalDstLemma(ub []byte, dp io.DataPlaneSpec) { + reveal p.DstIsLocalIngressID(ub) + reveal p.LastHopLen(ub, dp) +} + +ghost +requires acc(p.scionLayer.Mem(ub), R55) +requires acc(&p.path, R55) && p.path == p.scionLayer.GetPath(ub) +decreases +pure func (p* scionPacketProcessor) GetIsXoverSpec(ub []byte) bool { + return let ubPath := p.scionLayer.UBPath(ub) in + unfolding acc(p.scionLayer.Mem(ub), R55) in + p.path.GetIsXoverSpec(ubPath) +} + +// TODO prove +ghost +requires 0 <= start && start <= end && end <= len(ub) +requires acc(p.scionLayer.Mem(ub), R55) +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R55) +requires acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R55) +requires acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) +requires p.path === p.scionLayer.GetPath(ub) +requires dp.Valid() +requires slayers.ValidPktMetaHdr(ub) +requires start == p.scionLayer.PathStartIdx(ub) +requires end == p.scionLayer.PathEndIdx(ub) +requires p.scionLayer.EqAbsHeader(ub) +ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R55) +ensures acc(p.scionLayer.Mem(ub), R55) +ensures acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R55) +ensures acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) +ensures dp.Valid() +ensures slayers.ValidPktMetaHdr(ub) +ensures start == p.scionLayer.PathStartIdx(ub) +ensures end == p.scionLayer.PathEndIdx(ub) +ensures scion.validPktMetaHdr(ub[start:end]) +ensures p.path.EqAbsHeader(ub[start:end]) +ensures absPkt(dp, ub) == p.path.absPkt(dp, ub[start:end]) +decreases +func (p* scionPacketProcessor) AbsPktToSubSliceAbsPkt(ub []byte, start int, end int, dp io.DataPlaneSpec) +/* { + reveal validPktMetaHdr(ub) + reveal p.scionLayer.EqAbsHeader(ub) + unfold acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) + unfold acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R56) + assert reveal scion.validPktMetaHdr(ub[start:end]) + unfold acc(p.scionLayer.Mem(ub), R56) + assert p.scionLayer.Path.(*scion.Raw).EqAbsHeader(ub[start:end]) + assert p.path.EqAbsHeader(ub[start:end]) + fold acc(p.scionLayer.Mem(ub), R56) + assert reveal absPkt(dp, ub) == reveal p.path.absPkt(dp, ub[start:end]) + fold acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R56) + fold acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) +}*/ + +// TODO prove +ghost +requires 0 <= start && start <= end && end <= len(ub) +requires acc(p.scionLayer.Mem(ub), R55) +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R55) +requires acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R55) +requires acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) +requires p.path === p.scionLayer.GetPath(ub) +requires dp.Valid() +requires scion.validPktMetaHdr(ub[start:end]) +requires start == p.scionLayer.PathStartIdx(ub) +requires end == p.scionLayer.PathEndIdx(ub) +requires p.path.EqAbsHeader(ub[start:end]) +ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R55) +ensures acc(p.scionLayer.Mem(ub), R55) +ensures acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R55) +ensures acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) +ensures dp.Valid() +ensures slayers.ValidPktMetaHdr(ub) +ensures start == p.scionLayer.PathStartIdx(ub) +ensures end == p.scionLayer.PathEndIdx(ub) +ensures scion.validPktMetaHdr(ub[start:end]) +ensures p.scionLayer.EqAbsHeader(ub) +ensures absPkt(dp, ub) == p.path.absPkt(dp, ub[start:end]) +decreases +func (p* scionPacketProcessor) SubSliceAbsPktToAbsPkt(ub []byte, start int, end int, dp io.DataPlaneSpec) + +ghost +opaque +requires acc(&p.hopField, R55) +requires len(pkt.CurrSeg.Future) > 0 +decreases +pure func (p* scionPacketProcessor) EqAbsHopField(pkt io.IO_pkt2) bool { + return let absHop := p.hopField.ToIO_HF() in + let currHF := pkt.CurrSeg.Future[0] in + absHop.InIF2 == currHF.InIF2 && + absHop.EgIF2 == currHF.EgIF2 && + absHop.HVF == currHF.HVF +} + +ghost +opaque +requires acc(&p.infoField, R55) +decreases +pure func (p* scionPacketProcessor) EqAbsInfoField(pkt io.IO_pkt2) bool { + return let absInf := p.infoField.ToIntermediateAbsInfoField() in + let currseg := pkt.CurrSeg in + absInf.AInfo == currseg.AInfo && + absInf.UInfo == currseg.UInfo && + absInf.ConsDir == currseg.ConsDir && + absInf.Peer == currseg.Peer +} \ No newline at end of file diff --git a/router/io-spec.gobra b/router/io-spec.gobra index 6de64f1bb..dc1377a00 100644 --- a/router/io-spec.gobra +++ b/router/io-spec.gobra @@ -17,507 +17,48 @@ package router import ( - sl "github.com/scionproto/scion/verification/utils/slices" - "github.com/scionproto/scion/verification/io" - "github.com/scionproto/scion/verification/dependencies/encoding/binary" + "github.com/scionproto/scion/pkg/slayers" "github.com/scionproto/scion/pkg/slayers/path" "github.com/scionproto/scion/pkg/slayers/path/scion" "github.com/scionproto/scion/private/topology" -) - -ghost -decreases -pure func numInfoFields(seg1Len int, seg2Len int, seg3Len int) int { - return seg3Len > 0 ? 3 : (seg2Len > 0 ? 2 : 1) -} - -ghost -decreases -pure func hopFieldOffset(numINF int, currHF int) int { - return path.InfoFieldOffset(numINF) + path.HopLen * currHF -} - -ghost -decreases -pure func pktLen(seg1Len int, seg2Len int, seg3Len int) int { - return hopFieldOffset(numInfoFields(seg1Len, seg2Len, seg3Len), 0) + - path.HopLen * (seg1Len + seg2Len + seg3Len) -} - - -ghost -decreases -pure func lengthOfCurrSeg(currHF int, seg1Len int, seg2Len int, seg3Len int) int { - return seg1Len > currHF ? seg1Len : ((seg1Len + seg2Len) > currHF ? seg2Len : seg3Len) -} - -ghost -requires 0 <= currHF -ensures res <= currHF -decreases -pure func lengthOfPrevSeg(currHF int, seg1Len int, seg2Len int, seg3Len int) (res int) { - return seg1Len > currHF ? 0 : ((seg1Len + seg2Len) > currHF ? seg1Len : seg1Len + seg2Len) -} - -// returns the ASid of a hopfield -ghost -requires 1 <= numINF -requires 0 <= currHFIdx -requires hopFieldOffset(numINF, currHFIdx) + path.HopLen <= len(raw) -requires dp.Valid() -requires let idx := hopFieldOffset(numINF, currHFIdx) in - acc(&raw[idx+2], _) && acc(&raw[idx+3], _) && acc(&raw[idx+4], _) && acc(&raw[idx+5], _) -decreases -pure func asidFromIfs( - dp io.DataPlaneSpec, - raw []byte, - numINF int, - currHFIdx int, - consDir bool, - asid io.IO_as) (res option[io.IO_as]) { - return let idx := hopFieldOffset(numINF, currHFIdx) in - let ifs := consDir ? binary.BigEndian.Uint16(raw[idx+4:idx+6]) : binary.BigEndian.Uint16(raw[idx+2:idx+4]) in - let asIfPair := io.AsIfsPair{asid, io.IO_ifs(ifs)} in - (asIfPair in domain(dp.GetLinks()) ? - some(dp.Lookup(asIfPair).asid) : none[io.IO_as]) -} - -// returns a list of ASids of hopfields that are before the current hopfield in a segment -ghost -requires 1 <= numINF -requires 0 <= prevSegLen && prevSegLen <= currHFIdx -requires hopFieldOffset(numINF, currHFIdx) + path.HopLen <= len(raw) -requires dp.Valid() -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -ensures res != none[seq[io.IO_as]] ==> len(get(res)) == currHFIdx - prevSegLen + 1 -decreases currHFIdx - prevSegLen -pure func asidsBefore( - dp io.DataPlaneSpec, - raw []byte, - numINF int, - currHFIdx int, - prevSegLen int, - consDir bool, - asid io.IO_as) (res option[seq[io.IO_as]]) { - return let next_asid := (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in asidFromIfs(dp, raw, numINF, currHFIdx, !consDir, asid)) in - match next_asid{ - case none[io.IO_as]: - none[seq[io.IO_as]] - default: - currHFIdx == prevSegLen ? some(seq[io.IO_as]{get(next_asid)}) : - let next_asid_seq := asidsBefore(dp, raw, numINF, currHFIdx-1, prevSegLen, consDir, get(next_asid)) in - match next_asid_seq{ - case none[seq[io.IO_as]]: - none[seq[io.IO_as]] - default: - some(get(next_asid_seq) ++ seq[io.IO_as]{get(next_asid)}) - } - } -} - -// returns a list of ASids of hopfields that are after the current hopfield in a segment -ghost -requires 1 <= numINF -requires 0 <= currHFIdx && currHFIdx < segLen -requires hopFieldOffset(numINF, segLen) <= len(raw) -requires dp.Valid() -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -ensures res != none[seq[io.IO_as]] ==> len(get(res)) == segLen - currHFIdx -decreases segLen - currHFIdx + 1 -pure func asidsAfter( - dp io.DataPlaneSpec, - raw []byte, - numINF int, - currHFIdx int, - segLen int, - consDir bool, - asid io.IO_as) (res option[seq[io.IO_as]]) { - return let next_asid := (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in asidFromIfs(dp, raw, numINF, currHFIdx, consDir, asid)) in - match next_asid{ - case none[io.IO_as]: - none[seq[io.IO_as]] - default: - currHFIdx == segLen - 1 ? some(seq[io.IO_as]{get(next_asid)}) : - let next_asid_seq := asidsAfter(dp, raw, numINF, currHFIdx+1, segLen, consDir, get(next_asid)) in - match next_asid_seq{ - case none[seq[io.IO_as]]: - none[seq[io.IO_as]] - default: - some(seq[io.IO_as]{get(next_asid)} ++ get(next_asid_seq)) - } - } -} - -// returns a list of ASids of hopfields for CurrSeg in the abstract packet -ghost -requires 1 <= numINF -requires 0 <= prevSegLen && prevSegLen <= currHFIdx -requires currHFIdx < segLen -requires hopFieldOffset(numINF, segLen) <= len(raw) -requires dp.Valid() -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -ensures res != none[seq[io.IO_as]] ==> len(get(res)) == segLen - prevSegLen -decreases -pure func asidForCurrSeg( - dp io.DataPlaneSpec, - raw []byte, - numINF int, - currHFIdx int, - segLen int, - prevSegLen int, - consDir bool, - asid io.IO_as) (res option[seq[io.IO_as]]) { - return segLen == 0 ? some(seq[io.IO_as]{}) : - let left := asidsBefore(dp, raw, numINF, currHFIdx, prevSegLen, consDir, asid) in - let right := asidsAfter(dp, raw, numINF, currHFIdx, segLen, consDir, asid) in - (left == none[seq[io.IO_as]] || right == none[seq[io.IO_as]]) ? - none[seq[io.IO_as]] : - some(get(left) ++ get(right)[1:]) -} - -// returns a list of ASids of hopfields for LeftSeg in the abstract packet -ghost -requires dp.Valid() -requires 1 <= numINF -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len -requires hopFieldOffset(numINF, seg1Len + seg2Len + seg3Len) <= len(raw) -requires currINFIdx <= numINF + 1 -requires 1 <= currINFIdx && currINFIdx < 4 -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -decreases -pure func asidsForLeftSeg(dp io.DataPlaneSpec, raw []byte, numINF int, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, asid io.IO_as) (res option[seq[io.IO_as]]) { - return let consDir := unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in path.ConsDir(raw, currINFIdx) in - (currINFIdx == 1 && seg2Len > 0) ? - asidForCurrSeg(dp, raw, numINF, seg1Len, seg1Len+seg2Len, seg1Len, consDir, asid) : - (currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ? - asidForCurrSeg(dp, raw, numINF, seg1Len+seg2Len, seg1Len+seg2Len+seg3Len, seg1Len+seg2Len, consDir, asid) : - some(seq[io.IO_as]{}) -} - -// returns a list of ASids of hopfields for RightSeg in the abstract packet -ghost -requires dp.Valid() -requires 1 <= numINF -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len -requires hopFieldOffset(numINF, seg1Len + seg2Len + seg3Len) <= len(raw) -requires currINFIdx <= numINF + 1 -requires -1 <= currINFIdx && currINFIdx < 2 -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -ensures (currINFIdx == 0 && res != none[seq[io.IO_as]]) ==> len(get(res)) == seg1Len -ensures (currINFIdx == 1 && seg2Len > 0 && res != none[seq[io.IO_as]]) ==> len(get(res)) == seg2Len -decreases -pure func asidsForRightSeg(dp io.DataPlaneSpec, raw []byte, numINF int, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, asid io.IO_as) (res option[seq[io.IO_as]]) { - return (currINFIdx == 1 && seg2Len > 0) ? - let consDir := unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in path.ConsDir(raw, currINFIdx) in - asidForCurrSeg(dp, raw, numINF, seg1Len+seg2Len-1, seg1Len+seg2Len, seg1Len, consDir, asid) : - (currINFIdx == 0) ? - let consDir := unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in path.ConsDir(raw, currINFIdx) in - asidForCurrSeg(dp, raw, numINF, seg1Len-1, seg1Len, 0, consDir, asid) : - some(seq[io.IO_as]{}) -} - -// returns a list of ASids of hopfields for MidSeg in the abstract packet -ghost -requires dp.Valid() -requires 1 <= numINF -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len -requires hopFieldOffset(numINF, seg1Len + seg2Len + seg3Len) <= len(raw) -requires currINFIdx <= numINF + 1 -requires 2 <= currINFIdx && currINFIdx < 5 -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -requires (currINFIdx == 4 && seg2Len > 0) ==> asid != none[io.IO_as] -requires (currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ==> asid != none[io.IO_as] -decreases -pure func asidsForMidSeg(dp io.DataPlaneSpec, raw []byte, numINF int, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, asid option[io.IO_as]) (res option[seq[io.IO_as]]) { - return (currINFIdx == 4 && seg2Len > 0) ? - let consDir := unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in path.ConsDir(raw, 1) in - asidForCurrSeg(dp, raw, numINF, seg1Len-1, seg1Len, 0, consDir, get(asid)) : - (currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ? - let consDir := unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in path.ConsDir(raw, 2) in - asidForCurrSeg(dp, raw, numINF, seg1Len + seg2Len, seg1Len + seg2Len + seg3Len, seg1Len + seg2Len, consDir, get(asid)) : - some(seq[io.IO_as]{}) -} - -ghost -requires idx + path.HopLen <= len(raw) -requires 0 <= idx -requires acc(&raw[idx+2], _) && acc(&raw[idx+3], _) && acc(&raw[idx+4], _) && acc(&raw[idx+5], _) -ensures len(res.HVF.MsgTerm_Hash_.MsgTerm_MPair_2.MsgTerm_L_) > 0 -decreases -pure func hopField(raw []byte, idx int, beta set[io.IO_msgterm], asid io.IO_as, ainfo io.IO_ainfo) (res io.IO_HF) { - return let inif2 := binary.BigEndian.Uint16(raw[idx+2:idx+4]) in - let egif2 := binary.BigEndian.Uint16(raw[idx+4:idx+6]) in - let op_inif2 := inif2 == 0 ? none[io.IO_ifs] : some(io.IO_ifs(inif2)) in - let op_egif2 := egif2 == 0 ? none[io.IO_ifs] : some(io.IO_ifs(egif2)) in - let ts := io.IO_msgterm(io.MsgTerm_Num{ainfo}) in - let l := io.IO_msgterm(io.MsgTerm_L{seq[io.IO_msgterm]{ts, io.if2term(op_inif2), io.if2term(op_egif2), - io.IO_msgterm(io.MsgTerm_FS{beta})}}) in - let hvf := io.mac(io.macKey(io.asidToKey(asid)), l) in - io.IO_HF(io.IO_HF_{ - InIF2 : op_inif2, - EgIF2 : op_egif2, - HVF : hvf, - }) -} - -ghost -requires 0 <= offset -requires 0 <= currHFIdx && currHFIdx <= len(asid) -requires offset + path.HopLen * len(asid) <= len(raw) -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -ensures len(res) == len(asid) - currHFIdx -ensures forall k int :: {res[k]} 0 <= k && k < len(res) ==> - len(res[k].HVF.MsgTerm_Hash_.MsgTerm_MPair_2.MsgTerm_L_) > 0 -decreases len(asid) - currHFIdx -pure func hopFieldsConsDir( - raw []byte, - offset int, - currHFIdx int, - beta set[io.IO_msgterm], - asid seq[io.IO_as], - ainfo io.IO_ainfo) (res seq[io.IO_HF]) { - return currHFIdx == len(asid) ? seq[io.IO_HF]{} : - let hf := (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in - hopField(raw, offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo)) in - seq[io.IO_HF]{hf} ++ hopFieldsConsDir(raw, offset, currHFIdx + 1, (beta union set[io.IO_msgterm]{hf.HVF}), asid, ainfo) -} - -ghost -requires 0 <= offset -requires -1 <= currHFIdx && currHFIdx < len(asid) -requires offset + path.HopLen * currHFIdx + path.HopLen <= len(raw) -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -ensures len(res) == currHFIdx + 1 -ensures forall k int :: {res[k]} 0 <= k && k < len(res) ==> - len(res[k].HVF.MsgTerm_Hash_.MsgTerm_MPair_2.MsgTerm_L_) > 0 -decreases currHFIdx + 1 -pure func hopFieldsNotConsDir( - raw []byte, - offset int, - currHFIdx int, - beta set[io.IO_msgterm], - asid seq[io.IO_as], - ainfo io.IO_ainfo) (res seq[io.IO_HF]) { - return currHFIdx == -1 ? seq[io.IO_HF]{} : - let hf := (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in - hopField(raw, offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo)) in - hopFieldsNotConsDir(raw, offset, currHFIdx -1, (beta union set[io.IO_msgterm]{hf.HVF}), asid, ainfo) ++ seq[io.IO_HF]{hf} -} - -ghost -requires -1 <= currHFIdx && currHFIdx < len(hopfields) -decreases currHFIdx + 1 -pure func segPast(hopfields seq[io.IO_HF], currHFIdx int) seq[io.IO_HF] { - return currHFIdx == -1 ? - seq[io.IO_HF]{} : - seq[io.IO_HF]{hopfields[currHFIdx]} ++ segPast(hopfields, currHFIdx - 1) -} - -ghost -requires 0 <= currHFIdx && currHFIdx <= len(hopfields) -decreases len(hopfields) - currHFIdx -pure func segFuture(hopfields seq[io.IO_HF], currHFIdx int) seq[io.IO_HF] { - return currHFIdx == len(hopfields) ? seq[io.IO_HF]{} : - seq[io.IO_HF]{hopfields[currHFIdx]} ++ segFuture(hopfields, currHFIdx + 1) -} - -ghost -requires -1 <= currHFIdx && currHFIdx < len(hopfields) -decreases currHFIdx + 1 -pure func segHistory(hopfields seq[io.IO_HF], currHFIdx int) seq[io.IO_ahi] { - return currHFIdx == -1 ? seq[io.IO_ahi]{} : - seq[io.IO_ahi]{hopfields[currHFIdx].Toab()} ++ segHistory(hopfields, currHFIdx - 1) -} - -ghost -requires 0 <= offset -requires 0 < len(asid) -requires offset + path.HopLen * len(asid) <= len(raw) -requires 0 <= currHFIdx && currHFIdx <= len(asid) -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -decreases -pure func segment(raw []byte, - offset int, - currHFIdx int, - asid seq[io.IO_as], - ainfo io.IO_ainfo, - consDir bool, - peer bool) io.IO_seg2 { - return let hopfields := consDir ? - hopFieldsConsDir(raw, offset, 0, set[io.IO_msgterm]{}, asid, ainfo) : - hopFieldsNotConsDir(raw, offset, len(asid) - 1, set[io.IO_msgterm]{}, asid, ainfo) in - let uinfo := uInfo(hopfields, currHFIdx, consDir) in - io.IO_seg2(io.IO_seg3_{ - AInfo :ainfo, - UInfo : uinfo, - ConsDir : consDir, - Peer : peer, - Past : segPast(hopfields, currHFIdx - 1), - Future : segFuture(hopfields, currHFIdx), - History : segHistory(hopfields, currHFIdx - 1), - }) -} - -ghost -requires path.InfoFieldOffset(currINFIdx) + path.InfoLen <= offset -requires 0 < len(asid) -requires offset + path.HopLen * len(asid) <= len(raw) -requires 0 <= currHFIdx && currHFIdx <= len(asid) -requires 0 <= currINFIdx && currINFIdx < 3 -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -decreases -pure func currSeg(raw []byte, offset int, currINFIdx int, currHFIdx int, asid seq[io.IO_as]) io.IO_seg3 { - return unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in - let ainfo := timestamp(raw, currINFIdx) in - let consDir := path.ConsDir(raw, currINFIdx) in - let peer := path.Peer(raw, currINFIdx) in - segment(raw, offset, currHFIdx, asid, ainfo, consDir, peer) -} - -ghost -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len -requires pktLen(seg1Len, seg2Len, seg3Len) <= len(raw) -requires 1 <= currINFIdx && currINFIdx < 4 -requires (currINFIdx == 1 && seg2Len > 0) ==> len(asid) == seg2Len -requires (currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ==> len(asid) == seg3Len -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -decreases -pure func leftSeg( - raw []byte, - currINFIdx int, - seg1Len int, - seg2Len int, - seg3Len int, - asid seq[io.IO_as]) option[io.IO_seg3] { - return let offset := hopFieldOffset(numInfoFields(seg1Len, seg2Len, seg3Len), 0) in - (currINFIdx == 1 && seg2Len > 0) ? - some(currSeg(raw, offset + path.HopLen * seg1Len, currINFIdx, 0, asid)) : - ((currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ? - some(currSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid)) : - none[io.IO_seg3]) -} - -ghost -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len -requires pktLen(seg1Len, seg2Len, seg3Len) <= len(raw) -requires -1 <= currINFIdx && currINFIdx < 2 -requires (currINFIdx == 1 && seg2Len > 0 && seg3Len > 0) ==> len(asid) == seg2Len -requires (currINFIdx == 0 && seg2Len > 0) ==> len(asid) == seg1Len -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -decreases -pure func rightSeg( - raw []byte, - currINFIdx int, - seg1Len int, - seg2Len int, - seg3Len int, - asid seq[io.IO_as]) option[io.IO_seg3] { - return let offset := hopFieldOffset(numInfoFields(seg1Len, seg2Len, seg3Len), 0) in - (currINFIdx == 1 && seg2Len > 0 && seg3Len > 0) ? - some(currSeg(raw, offset + path.HopLen * seg1Len, currINFIdx, seg2Len, asid)) : - (currINFIdx == 0 && seg2Len > 0) ? - some(currSeg(raw, offset, currINFIdx, seg1Len, asid)) : - none[io.IO_seg3] -} + "github.com/scionproto/scion/pkg/addr" + "golang.org/x/net/ipv4" -ghost -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len -requires pktLen(seg1Len, seg2Len, seg3Len) <= len(raw) -requires 2 <= currINFIdx && currINFIdx < 5 -requires (currINFIdx == 4 && seg2Len > 0) ==> len(asid) == seg1Len -requires (currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ==> len(asid) == seg3Len -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -decreases -pure func midSeg( - raw []byte, - currINFIdx int, - seg1Len int, - seg2Len int, - seg3Len int, - asid seq[io.IO_as]) option[io.IO_seg3] { - return let offset := hopFieldOffset(numInfoFields(seg1Len, seg2Len, seg3Len), 0) in - (currINFIdx == 4 && seg2Len > 0) ? - some(currSeg(raw, offset, 0, seg1Len, asid)) : - ((currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ? - some(currSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid)) : - none[io.IO_seg3]) -} + "verification/dependencies/encoding/binary" + "verification/io" + sl "verification/utils/slices" + . "verification/utils/definitions" +) ghost +opaque requires dp.Valid() -requires len(raw) > 4 -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -requires unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in - let hdr := binary.BigEndian.Uint32(raw[0:4]) in - let metaHdr := scion.DecodedFrom(hdr) in - let seg1 := int(metaHdr.SegLen[0]) in - let seg2 := int(metaHdr.SegLen[1]) in - let seg3 := int(metaHdr.SegLen[2]) in - let base := scion.Base{metaHdr, - numInfoFields(seg1, seg2, seg3), - seg1 + seg2 + seg3} in - metaHdr.InBounds() && - 0 < metaHdr.SegLen[0] && - base.ValidCurrInfSpec() && - base.ValidCurrHfSpec() && - len(raw) >= pktLen(seg1, seg2, seg3) -decreases -pure func absPkt(dp io.DataPlaneSpec, raw []byte, asid io.IO_as) option[io.IO_pkt2] { - return let hdr := unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in binary.BigEndian.Uint32(raw[0:4]) in +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +requires slayers.ValidPktMetaHdr(raw) +decreases +pure func absPkt(dp io.DataPlaneSpec, raw []byte) (res io.IO_pkt2) { + return let _ := reveal slayers.ValidPktMetaHdr(raw) in + let headerOffset := slayers.GetAddressOffset(raw) in + let _ := Asserting(forall k int :: {&raw[headerOffset:headerOffset+scion.MetaLen][k]} 0 <= k && k < scion.MetaLen ==> + &raw[headerOffset:headerOffset+scion.MetaLen][k] == &raw[headerOffset + k]) in + let hdr := (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in + binary.BigEndian.Uint32(raw[headerOffset:headerOffset+scion.MetaLen])) in let metaHdr := scion.DecodedFrom(hdr) in let currINFIdx := int(metaHdr.CurrINF) in let currHFIdx := int(metaHdr.CurrHF) in let seg1Len := int(metaHdr.SegLen[0]) in let seg2Len := int(metaHdr.SegLen[1]) in let seg3Len := int(metaHdr.SegLen[2]) in - let segLen := lengthOfCurrSeg(currHFIdx, seg1Len, seg2Len, seg3Len) in - let prevSegLen := lengthOfPrevSeg(currHFIdx, seg1Len, seg2Len, seg3Len) in - let numINF := numInfoFields(seg1Len, seg2Len, seg3Len) in - let offset := hopFieldOffset(numINF, 0) in - let consDir := unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in path.ConsDir(raw, currINFIdx) in - let currAsidSeq := asidForCurrSeg(dp, raw, numINF, currHFIdx, prevSegLen+segLen, prevSegLen, consDir, dp.Asid()) in - currAsidSeq == none[seq[io.IO_as]] ? none[io.IO_pkt2] : - let last := get(currAsidSeq)[segLen-1] in - let first := get(currAsidSeq)[0] in - let leftAsidSeq := asidsForLeftSeg(dp, raw, numINF, currINFIdx + 1, seg1Len, seg2Len, seg3Len, last) in - let rightAsidSeq := asidsForRightSeg(dp, raw, numINF, currINFIdx - 1, seg1Len, seg2Len, seg3Len, first) in - (leftAsidSeq == none[seq[io.IO_as]] || rightAsidSeq == none[seq[io.IO_as]]) ? none[io.IO_pkt2] : - let midAsid := ((currINFIdx == 0 && seg2Len > 0 && seg3Len > 0) ? some(get(leftAsidSeq)[len(get(leftAsidSeq))-1]) : - (currINFIdx == 2 && seg2Len > 0) ? some(get(rightAsidSeq)[0]) : none[io.IO_as]) in - let midAsidSeq := asidsForMidSeg(dp, raw, numINF, currINFIdx + 2, seg1Len, seg2Len, seg3Len, midAsid) in - midAsidSeq == none[seq[io.IO_as]] ? none[io.IO_pkt2] : - some(io.IO_pkt2(io.IO_Packet2{ - CurrSeg : currSeg(raw, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, get(currAsidSeq)), - LeftSeg : leftSeg(raw, currINFIdx + 1, seg1Len, seg2Len , seg3Len, get(leftAsidSeq)), - MidSeg : midSeg(raw, currINFIdx + 2, seg1Len, seg2Len , seg3Len, get(midAsidSeq)), - RightSeg : rightSeg(raw, currINFIdx - 1, seg1Len, seg2Len , seg3Len, get(rightAsidSeq)), - })) -} - - -ghost -requires 0 <= offset -requires path.InfoFieldOffset(offset) + 8 < len(raw) -requires acc(&raw[path.InfoFieldOffset(offset) + 4], _) -requires acc(&raw[path.InfoFieldOffset(offset) + 5], _) -requires acc(&raw[path.InfoFieldOffset(offset) + 6], _) -requires acc(&raw[path.InfoFieldOffset(offset) + 7], _) -decreases -pure func timestamp(raw []byte, offset int) io.IO_ainfo { - return let idx := path.InfoFieldOffset(offset) + 4 in - io.IO_ainfo(binary.BigEndian.Uint32(raw[idx : idx + 4])) + let segLen := scion.LengthOfCurrSeg(currHFIdx, seg1Len, seg2Len, seg3Len) in + let prevSegLen := scion.LengthOfPrevSeg(currHFIdx, seg1Len, seg2Len, seg3Len) in + let numINF := scion.NumInfoFields(seg1Len, seg2Len, seg3Len) in + let offset := scion.HopFieldOffset(numINF, 0, headerOffset) in + io.IO_pkt2(io.IO_Packet2{ + CurrSeg : scion.CurrSeg(raw, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, headerOffset), + LeftSeg : scion.LeftSeg(raw, currINFIdx + 1, seg1Len, seg2Len , seg3Len, headerOffset), + MidSeg : scion.MidSeg(raw, currINFIdx + 2, seg1Len, seg2Len , seg3Len, headerOffset), + RightSeg : scion.RightSeg(raw, currINFIdx - 1, seg1Len, seg2Len , seg3Len, headerOffset), + }) } ghost @@ -535,66 +76,47 @@ requires forall idx int :: {hopfields[idx]} 0 <= idx && idx < len(hopfields) ==> len(hopfields[idx].HVF.MsgTerm_Hash_.MsgTerm_MPair_2.MsgTerm_L_) > 0 decreases pure func uInfo(hopfields seq[io.IO_HF], currHFIdx int, consDir bool) set[io.IO_msgterm] { - return currHFIdx == len(hopfields) ? - hvfSet(hopfields[currHFIdx-1]) : - (currHFIdx == 0 ? + return currHFIdx + 1 >= len(hopfields) ? + hvfSet(hopfields[len(hopfields)-1]) : + (consDir ? hvfSet(hopfields[currHFIdx]) : - (consDir ? - hvfSet(hopfields[currHFIdx]) : - hvfSet(hopfields[currHFIdx-1]))) + hvfSet(hopfields[currHFIdx+1])) } ghost -decreases -pure func ifsToIO_ifs(ifs uint16) option[io.IO_ifs]{ - return ifs == 0 ? none[io.IO_ifs] : some(io.IO_ifs(ifs)) -} - -ghost -opaque -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -decreases -pure func validPktMetaHdr(raw []byte) bool { - return len(raw) > 4 && - unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in - let hdr := binary.BigEndian.Uint32(raw[0:4]) in - let metaHdr := scion.DecodedFrom(hdr) in - let seg1 := int(metaHdr.SegLen[0]) in - let seg2 := int(metaHdr.SegLen[1]) in - let seg3 := int(metaHdr.SegLen[2]) in - let base := scion.Base{metaHdr, - numInfoFields(seg1, seg2, seg3), - seg1 + seg2 + seg3} in - metaHdr.InBounds() && - 0 < metaHdr.SegLen[0] && - base.ValidCurrInfSpec() && - base.ValidCurrHfSpec() && - len(raw) >= pktLen(seg1, seg2, seg3) -} - -ghost -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) ensures val.isIO_val_Unsupported -ensures val.IO_val_Unsupported_1 == ifsToIO_ifs(ingressID) +ensures val.IO_val_Unsupported_1 == path.ifsToIO_ifs(ingressID) decreases pure func absIO_val_Unsupported(raw []byte, ingressID uint16) (val io.IO_val) { return io.IO_val(io.IO_val_Unsupported{ - ifsToIO_ifs(ingressID), + path.ifsToIO_ifs(ingressID), io.Unit(io.Unit_{}), }) } ghost +opaque requires dp.Valid() -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) ensures val.isIO_val_Pkt2 || val.isIO_val_Unsupported decreases pure func absIO_val(dp io.DataPlaneSpec, raw []byte, ingressID uint16) (val io.IO_val) { - return (reveal validPktMetaHdr(raw) && absPkt(dp, raw, dp.asid()) != none[io.IO_pkt2]) ? - io.IO_val(io.IO_val_Pkt2{ifsToIO_ifs(ingressID), get(absPkt(dp, raw, dp.asid()))}) : + return (reveal slayers.ValidPktMetaHdr(raw) && slayers.IsSupportedPkt(raw)) ? + io.IO_val(io.IO_val_Pkt2{path.ifsToIO_ifs(ingressID), absPkt(dp, raw)}) : absIO_val_Unsupported(raw, ingressID) } +ghost +requires dp.Valid() +requires respr.OutPkt != nil ==> + acc(sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)), R56) +decreases +pure func absReturnErr(dp io.DataPlaneSpec, respr processResult) (val io.IO_val) { + return respr.OutPkt == nil ? io.IO_val_Unit{} : + absIO_val(dp, respr.OutPkt, respr.EgressID) +} + ghost requires acc(&d.localIA, _) decreases @@ -625,7 +147,8 @@ ghost requires acc(&d.linkTypes, _) && (d.linkTypes != nil ==> acc(d.linkTypes, _)) decreases pure func (d *DataPlane) dpSpecWellConfiguredLinkTypes(dp io.DataPlaneSpec) bool { - return forall ifs uint16 :: {ifs in domain(d.linkTypes)} ifs in domain(d.linkTypes) ==> + return !(0 in domain(d.linkTypes)) && + forall ifs uint16 :: {ifs in domain(d.linkTypes)} ifs in domain(d.linkTypes) ==> io.IO_ifs(ifs) in domain(dp.GetLinkTypes()) && absLinktype(d.linkTypes[ifs]) == dp.GetLinkType(io.IO_ifs(ifs)) } @@ -640,3 +163,32 @@ pure func (d *DataPlane) DpAgreesWithSpec(dp io.DataPlaneSpec) bool { d.dpSpecWellConfiguredNeighborIAs(dp) && d.dpSpecWellConfiguredLinkTypes(dp) } + +ghost +requires acc(d.Mem(), _) +requires d.DpAgreesWithSpec(dp) +ensures acc(&d.linkTypes, _) && (d.linkTypes != nil ==> acc(d.linkTypes, _)) +ensures d.dpSpecWellConfiguredLinkTypes(dp) +decreases +func (d *DataPlane) LinkTypesLemma(dp io.DataPlaneSpec) { + reveal d.DpAgreesWithSpec(dp) + unfold acc(d.Mem(), _) +} + +ghost +requires dp.Valid() +requires acc(msg.Mem(), R50) +decreases +pure func MsgToAbsVal(dp io.DataPlaneSpec, msg *ipv4.Message, ingressID uint16) (res io.IO_val) { + return unfolding acc(msg.Mem(), R50) in + absIO_val(dp, msg.Buffers[0], ingressID) +} + +// This assumption will be dropped as soon as we can establish that the contents +// of the underlying buffer did not change between the call to `decodeLayers` and +// `p.processSCION` in the function `processPkt` in the router. +ghost +ensures absPkt.isIO_val_Pkt2 ==> + ElemWitness(ioSharedArg.IBufY, path.ifsToIO_ifs(ingressID), absPkt.IO_val_Pkt2_2) +decreases +func TemporaryAssumeForIOWitness(absPkt io.IO_val, ingressID uint16, ioSharedArg SharedArg) \ No newline at end of file diff --git a/router/widen-lemma.gobra b/router/widen-lemma.gobra new file mode 100644 index 000000000..61580235e --- /dev/null +++ b/router/widen-lemma.gobra @@ -0,0 +1,972 @@ +// Copyright 2022 ETH Zurich +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +gobra + +package router + +import ( + sl "verification/utils/slices" + "verification/io" + . "verification/utils/definitions" + "verification/dependencies/encoding/binary" + "github.com/scionproto/scion/pkg/slayers/path" + "github.com/scionproto/scion/pkg/slayers/path/scion" +) + +// Some thins in this file can be simplified. Nonetheless, the important definition here +// is absIO_valWidenLemma. Everything else can be seen as an implementation detail. +// TODO: prove Lemma +ghost +requires 0 <= length && length <= len(raw) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R49) +requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R49) +preserves dp.Valid() +ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R49) +ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R49) +ensures absIO_val(dp, raw[:length], ingressID).isIO_val_Pkt2 ==> + absIO_val(dp, raw[:length], ingressID) == absIO_val(dp, raw, ingressID) +decreases +func absIO_valWidenLemma(dp io.DataPlaneSpec, raw []byte, ingressID uint16, length int) +/* { + var ret1 io.IO_val + var ret2 io.IO_val + + if (validPktMetaHdr(raw[:length]) && absPkt(dp, raw[:length]) != none[io.IO_pkt2]) { + validPktMetaHdrWidenLemma(raw, length) + assert validPktMetaHdr(raw) + absPktWidenLemma(dp, raw, length) + assert absPkt(dp, raw) != none[io.IO_pkt2] + + ret1 = io.IO_val(io.IO_val_Pkt2{ifsToIO_ifs(ingressID), get(absPkt(dp, raw))}) + ret2 = io.IO_val(io.IO_val_Pkt2{ifsToIO_ifs(ingressID), get(absPkt(dp, raw[:length]))}) + assert ret1 == reveal absIO_val(dp, raw, ingressID) + assert ret2 == reveal absIO_val(dp, raw[:length], ingressID) + assert ret1 == ret2 + assert absIO_val(dp, raw[:length], ingressID).isIO_val_Pkt2 ==> + absIO_val(dp, raw[:length], ingressID) == absIO_val(dp, raw, ingressID) + } else { + assert !(reveal absIO_val(dp, raw[:length], ingressID).isIO_val_Pkt2) + } +} + +ghost +requires 0 <= length && length <= len(raw) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +requires validPktMetaHdr(raw[:length]) +ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +ensures validPktMetaHdr(raw) +decreases +func validPktMetaHdrWidenLemma(raw []byte, length int) { + unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) + unfold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R56) + reveal validPktMetaHdr(raw[:length]) + ret1 := reveal validPktMetaHdr(raw) + ret2 := reveal validPktMetaHdr(raw[:length]) + assert ret1 == ret2 + fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) + fold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R56) +} + +ghost +requires 0 <= length && length <= len(raw) +requires dp.Valid() +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R50) +requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R50) +requires validPktMetaHdr(raw) +requires validPktMetaHdr(raw[:length]) +ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R50) +ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R50) +ensures validPktMetaHdr(raw) +ensures validPktMetaHdr(raw[:length]) +ensures absPkt(dp, raw) == absPkt(dp, raw[:length]) +decreases +func absPktWidenLemma(dp io.DataPlaneSpec, raw []byte, length int) { + + // declarations + var last1 io.IO_as + var last2 io.IO_as + var first1 io.IO_as + var first2 io.IO_as + var leftAsidSeq1 option[seq[io.IO_as]] + var leftAsidSeq2 option[seq[io.IO_as]] + var rightAsidSeq1 option[seq[io.IO_as]] + var rightAsidSeq2 option[seq[io.IO_as]] + var midAsidSeq1 option[seq[io.IO_as]] + var midAsidSeq2 option[seq[io.IO_as]] + var midAsid1 option[io.IO_as] + var midAsid2 option[io.IO_as] + var ret1 option[io.IO_pkt2] + var ret2 option[io.IO_pkt2] + var lm bool + + // abspkt step by step + _ := reveal validPktMetaHdr(raw) + _ := reveal validPktMetaHdr(raw[:length]) + hdr1 := unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) in binary.BigEndian.Uint32(raw[0:4]) + hdr2 := unfolding acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) in binary.BigEndian.Uint32(raw[:length][0:4]) + assert unfolding acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) in binary.BigEndian.Uint32(raw[:length][0:4]) == unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) in binary.BigEndian.Uint32(raw[:length][0:4]) + assert hdr1 == hdr2 + + metaHdr1 := scion.DecodedFrom(hdr1) + metaHdr2 := scion.DecodedFrom(hdr2) + assert metaHdr1 == metaHdr2 + + currINFIdx1 := int(metaHdr1.CurrINF) + currINFIdx2 := int(metaHdr2.CurrINF) + assert currINFIdx1 == currINFIdx2 + + currHFIdx1 := int(metaHdr1.CurrHF) + currHFIdx2 := int(metaHdr2.CurrHF) + assert currHFIdx1 == currHFIdx2 + + seg1Len1 := int(metaHdr1.SegLen[0]) + seg1Len2 := int(metaHdr2.SegLen[0]) + assert seg1Len1 == seg1Len2 + + seg2Len1 := int(metaHdr1.SegLen[1]) + seg2Len2 := int(metaHdr2.SegLen[1]) + assert seg2Len1 == seg2Len2 + + seg3Len1 := int(metaHdr1.SegLen[2]) + seg3Len2 := int(metaHdr2.SegLen[2]) + assert seg3Len1 == seg3Len2 + + segLen1 := lengthOfCurrSeg(currHFIdx1, seg1Len1, seg2Len1, seg3Len1) + segLen2 := lengthOfCurrSeg(currHFIdx2, seg1Len2, seg2Len2, seg3Len2) + assert segLen1 == segLen2 + + prevSegLen1 := lengthOfPrevSeg(currHFIdx1, seg1Len1, seg2Len1, seg3Len1) + prevSegLen2 := lengthOfPrevSeg(currHFIdx2, seg1Len2, seg2Len2, seg3Len2) + assert prevSegLen1 == prevSegLen2 + + numINF1 := numInfoFields(seg1Len1, seg2Len1, seg3Len1) + numINF2 := numInfoFields(seg1Len2, seg2Len2, seg3Len2) + assert numINF1 == numINF2 + + offset1 := hopFieldOffset(numINF1, 0) + offset2 := hopFieldOffset(numINF2, 0) + assert offset1 == offset2 + + consDir1 := path.ConsDir(raw, currINFIdx1) + consDir2 := path.ConsDir(raw[:length], currINFIdx2) + consDirWidenLemma(raw, length, currINFIdx1) + assert consDir1 == consDir2 + + asidForCurrSegWidenLemma(dp, raw, numINF1, currHFIdx1, prevSegLen1+segLen1, prevSegLen1, consDir1, dp.Asid(), length) + currAsidSeq2 := asidForCurrSeg(dp, raw, numINF1, currHFIdx1, prevSegLen1+segLen1, prevSegLen1, consDir1, dp.Asid()) + currAsidSeq1 := asidForCurrSeg(dp, raw[:length], numINF2, currHFIdx2, prevSegLen2+segLen2, prevSegLen2, consDir2, dp.Asid()) + assert currAsidSeq1 == currAsidSeq2 + + if (currAsidSeq1 == none[seq[io.IO_as]]) { + ret := none[io.IO_pkt2] + assert ret == reveal absPkt(dp, raw) + assert ret == reveal absPkt(dp, raw[:length]) + } else { + + last1 = get(currAsidSeq1)[segLen1-1] + last2 = get(currAsidSeq2)[segLen1-1] + assert last1 == last2 + + first1 = get(currAsidSeq1)[0] + first2 = get(currAsidSeq2)[0] + assert first1 == first2 + + asidsForLeftSegWidenLemma(dp, raw, numINF1, currINFIdx1+1, seg1Len1, seg2Len1, seg3Len1, last1, length) + leftAsidSeq1 = asidsForLeftSeg(dp, raw, numINF1, currINFIdx1 + 1, seg1Len1, seg2Len1, seg3Len1, last1) + leftAsidSeq2 = asidsForLeftSeg(dp, raw[:length], numINF2, currINFIdx2 + 1, seg1Len2, seg2Len2, seg3Len2, last2) + assert leftAsidSeq1 == leftAsidSeq2 + + asidsForRightSegWidenLemma(dp, raw, numINF1, currINFIdx1-1, seg1Len1, seg2Len1, seg3Len1, first1, length) + rightAsidSeq1 = asidsForRightSeg(dp, raw, numINF1, currINFIdx1 - 1, seg1Len1, seg2Len1, seg3Len1, first1) + rightAsidSeq2 = asidsForRightSeg(dp, raw[:length], numINF2, currINFIdx2 - 1, seg1Len2, seg2Len2, seg3Len2, first2) + assert rightAsidSeq1 == rightAsidSeq2 + + if (leftAsidSeq1 == none[seq[io.IO_as]] || rightAsidSeq1 == none[seq[io.IO_as]]) { + ret := none[io.IO_pkt2] + assert ret == reveal absPkt(dp, raw) + assert ret == reveal absPkt(dp, raw[:length]) + } else { + assert leftAsidSeq2 != none[seq[io.IO_as]] && rightAsidSeq2 != none[seq[io.IO_as]] + + midAsid1 = ((currINFIdx1 == 0 && seg2Len1 > 0 && seg3Len1 > 0) ? some(get(leftAsidSeq1)[len(get(leftAsidSeq1))-1]) : (currINFIdx1 == 2 && seg2Len1 > 0) ? some(get(rightAsidSeq1)[0]) : none[io.IO_as]) + midAsid2 = ((currINFIdx2 == 0 && seg2Len2 > 0 && seg3Len2 > 0) ? some(get(leftAsidSeq2)[len(get(leftAsidSeq2))-1]) : (currINFIdx2 == 2 && seg2Len2 > 0) ? some(get(rightAsidSeq2)[0]) : none[io.IO_as]) + assert midAsid1 == midAsid2 + + asidsForMidSegWidenLemma(dp, raw, numINF1, currINFIdx1+2, seg1Len1, seg2Len1, seg3Len1, midAsid1, length) + midAsidSeq1 = asidsForMidSeg(dp, raw, numINF1, currINFIdx1 + 2, seg1Len1, seg2Len1, seg3Len1, midAsid1) + midAsidSeq2 = asidsForMidSeg(dp, raw[:length], numINF2, currINFIdx2 + 2, seg1Len2, seg2Len2, seg3Len2, midAsid2) + assert midAsidSeq1 == midAsidSeq2 + if (midAsidSeq1 == none[seq[io.IO_as]]) { + ret := none[io.IO_pkt2] + assert ret == reveal absPkt(dp, raw) + assert ret == reveal absPkt(dp, raw[:length]) + } else { + currSegWidenLemma(raw, offset1+prevSegLen1, currINFIdx1, currHFIdx1-prevSegLen1, get(currAsidSeq1), length) + leftSegWidenLemma(raw, currINFIdx1 + 1, seg1Len1, seg2Len1, seg3Len1, get(leftAsidSeq1), length) + midSegWidenLemma(raw, currINFIdx1 + 2, seg1Len1, seg2Len1, seg3Len1, get(midAsidSeq1), length) + rightSegWidenLemma(raw, currINFIdx1 - 1, seg1Len1, seg2Len1, seg3Len1, get(rightAsidSeq1), length) + ret1 = some(io.IO_pkt2(io.IO_Packet2{ + CurrSeg : currSeg(raw, offset1+prevSegLen1, currINFIdx1, currHFIdx1-prevSegLen1, get(currAsidSeq1)), + LeftSeg : leftSeg(raw, currINFIdx1 + 1, seg1Len1, seg2Len1 , seg3Len1, get(leftAsidSeq1)), + MidSeg : midSeg(raw, currINFIdx1 + 2, seg1Len1, seg2Len1 , seg3Len1, get(midAsidSeq1)), + RightSeg : rightSeg(raw, currINFIdx1 - 1, seg1Len1, seg2Len1 , seg3Len1, get(rightAsidSeq1)), + })) + ret2 = some(io.IO_pkt2(io.IO_Packet2{ + CurrSeg : currSeg(raw[:length], offset2+prevSegLen2, currINFIdx2, currHFIdx2-prevSegLen2, get(currAsidSeq2)), + LeftSeg : leftSeg(raw[:length], currINFIdx2 + 1, seg1Len2, seg2Len2 , seg3Len2, get(leftAsidSeq2)), + MidSeg : midSeg(raw[:length], currINFIdx2 + 2, seg1Len2, seg2Len2 , seg3Len2, get(midAsidSeq2)), + RightSeg : rightSeg(raw[:length], currINFIdx2 - 1, seg1Len2, seg2Len2 , seg3Len2, get(rightAsidSeq2)), + })) + reveal absPkt(dp, raw) + reveal absPkt(dp, raw[:length]) + assert ret1 == absPkt(dp, raw) + assert ret2 == absPkt(dp, raw[:length]) + assert ret1 == ret2 + } + } + } +} + +ghost +requires 0 <= length && length <= len(raw) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) +requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R55) +requires 0 <= currINFIdx +requires path.InfoFieldOffset(currINFIdx) < length +ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) +ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R55) +ensures path.ConsDir(raw, currINFIdx) == path.ConsDir(raw[:length], currINFIdx) +decreases +func consDirWidenLemma(raw []byte, length int, currINFIdx int) { + unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) + unfold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R56) + assert &raw[path.InfoFieldOffset(currINFIdx)] == &raw[:length][path.InfoFieldOffset(currINFIdx)] + assert raw[path.InfoFieldOffset(currINFIdx)] == raw[:length][path.InfoFieldOffset(currINFIdx)] + assert (raw[path.InfoFieldOffset(currINFIdx)] & 0x1 == 0x1) == (raw[:length][path.InfoFieldOffset(currINFIdx)] & 0x1 == 0x1) + fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) + fold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R56) +} + +ghost +requires 0 <= length && length <= len(raw) +requires 1 <= numINF1 +requires 0 <= prevSegLen1 && prevSegLen1 <= currHFIdx1 +requires currHFIdx1 < segLen1 +requires hopFieldOffset(numINF1, segLen1) <= length +requires dp.Valid() +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R52) +requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R52) +ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R52) +ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R52) +ensures asidForCurrSeg(dp, raw, numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) == + asidForCurrSeg(dp, raw[:length], numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) +decreases +func asidForCurrSegWidenLemma( + dp io.DataPlaneSpec, + raw []byte, + numINF1 int, + currHFIdx1 int, + segLen1 int, + prevSegLen1 int, + consDir1 bool, + asid1 io.IO_as, + length int) { + + var ret1 option[seq[io.IO_as]] + var ret2 option[seq[io.IO_as]] + var left1 option[seq[io.IO_as]] + var left2 option[seq[io.IO_as]] + var right1 option[seq[io.IO_as]] + var right2 option[seq[io.IO_as]] + + + if (segLen1 == 0) { + assert segLen1 == 0 + ret1 = some(seq[io.IO_as]{}) + ret2 = some(seq[io.IO_as]{}) + assert ret1 == asidForCurrSeg(dp, raw, numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) + assert ret2 == asidForCurrSeg(dp, raw[:length], numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) + assert ret1 == ret2 + } else { + asidsBeforeWidenLemma(dp, raw, numINF1, numINF1, currHFIdx1, currHFIdx1, prevSegLen1, prevSegLen1, consDir1, consDir1, asid1, asid1, length) + left1 = asidsBefore(dp, raw, numINF1, currHFIdx1, prevSegLen1, consDir1, asid1) + left2 = asidsBefore(dp, raw[:length], numINF1, currHFIdx1, prevSegLen1, consDir1, asid1) + assert left1 == left2 + newP := (R52 + R53)/2 + asidsAfterWidenLemma(dp, raw, numINF1, currHFIdx1, segLen1, consDir1, asid1, length, newP) + right1 = asidsAfter(dp, raw, numINF1, currHFIdx1, segLen1, consDir1, asid1) + right2 = asidsAfter(dp, raw[:length], numINF1, currHFIdx1, segLen1, consDir1, asid1) + assert right1 == right2 + if (left1 == none[seq[io.IO_as]] || right1 == none[seq[io.IO_as]]) { + assert (left2 == none[seq[io.IO_as]] || right2 == none[seq[io.IO_as]]) + ret1 = none[seq[io.IO_as]] + ret2 = none[seq[io.IO_as]] + assert ret1 == reveal asidForCurrSeg(dp, raw, numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) + assert ret2 == reveal asidForCurrSeg(dp, raw[:length], numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) + assert ret1 == ret2 + } else { + assert (left2 != none[seq[io.IO_as]] && right2 != none[seq[io.IO_as]]) + ret1 = some(get(left1) ++ get(right1)[1:]) + ret2 = some(get(left2) ++ get(right2)[1:]) + assert ret1 == reveal asidForCurrSeg(dp, raw, numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) + assert ret2 == reveal asidForCurrSeg(dp, raw[:length], numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) + assert ret1 == ret2 + } + } + assert ret1 == reveal asidForCurrSeg(dp, raw, numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) + assert ret2 == reveal asidForCurrSeg(dp, raw[:length], numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) + assert ret1 == ret2 +} + +ghost +requires 1 <= numINF1 +requires 0 <= prevSegLen1 && prevSegLen1 <= currHFIdx1 +requires length <= len(raw) +requires hopFieldOffset(numINF1, currHFIdx1) + path.HopLen <= length +requires dp.Valid() +requires consDir1 == consDir2 +requires prevSegLen1 == prevSegLen2 +requires currHFIdx1 == currHFIdx2 +requires numINF1 == numINF2 +requires asid1 == asid2 +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) +requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R53) +ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) +ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R53) +ensures forall i int :: { &raw[i] } 0 <= i && i < len(raw) ==> old(unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) in raw[i]) == (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) in raw[i]) +ensures forall i int :: { &raw[:length][i] } 0 <= i && i < len(raw[:length]) ==> old(unfolding acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R53) in raw[:length][i]) == (unfolding acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R53) in raw[:length][i]) +ensures asidsBefore(dp, raw, numINF1, currHFIdx1, prevSegLen1, consDir1, asid1) == + asidsBefore(dp, raw[:length], numINF2, currHFIdx2, prevSegLen2, consDir2, asid2) +decreases currHFIdx1 - prevSegLen1 +func asidsBeforeWidenLemma( + dp io.DataPlaneSpec, + raw []byte, + numINF1 int, + numINF2 int, + currHFIdx1 int, + currHFIdx2 int, + prevSegLen1 int, + prevSegLen2 int, + consDir1 bool, + consDir2 bool, + asid1 io.IO_as, + asid2 io.IO_as, + length int) { + + var ret1 option[seq[io.IO_as]] + var ret2 option[seq[io.IO_as]] + var nextAsid1 option[io.IO_as] + var nextAsid2 option[io.IO_as] + var nextAsidSeq1 option[seq[io.IO_as]] + var nextAsidSeq2 option[seq[io.IO_as]] + + if (currHFIdx1 == prevSegLen1) { + assert currHFIdx2 == prevSegLen2 + ret1 = some(seq[io.IO_as]{asid1}) + ret2 = some(seq[io.IO_as]{asid2}) + assert ret1 == ret2 + } else { + assert currHFIdx2 != prevSegLen2 + nextAsid1 = asidFromIfs(dp, raw, numINF1, currHFIdx1, !consDir1, asid1) + nextAsid2 = asidFromIfs(dp, raw[:length], numINF2, currHFIdx2, !consDir2, asid2) + asidFromIfsWidenLemma(dp, raw, numINF1, numINF2, currHFIdx1, currHFIdx2, !consDir1, !consDir2, asid1, asid2, length) + assert nextAsid1 == nextAsid2 + if (nextAsid1 == none[io.IO_as]) { + assert nextAsid2 == none[io.IO_as] + ret1 = none[seq[io.IO_as]] + ret2 = none[seq[io.IO_as]] + assert ret1 == ret2 + assert ret1 == asidsBefore(dp, raw, numINF1, currHFIdx1, prevSegLen1, consDir1, asid1) + assert ret2 == asidsBefore(dp, raw[:length], numINF2, currHFIdx2, prevSegLen2, consDir2, asid2) + } else { + assert nextAsid2 != none[io.IO_as] + asidsBeforeWidenLemma(dp, raw, numINF1, numINF2, currHFIdx1-1, currHFIdx2-1, prevSegLen1, prevSegLen2, consDir1, consDir2, get(nextAsid1), get(nextAsid2), length) + nextAsidSeq1 = asidsBefore(dp, raw, numINF1, currHFIdx1-1, prevSegLen1, consDir1, get(nextAsid1)) + nextAsidSeq2 = asidsBefore(dp, raw[:length], numINF2, currHFIdx2-1, prevSegLen2, consDir2, get(nextAsid2)) + assert nextAsidSeq1 == nextAsidSeq2 + if (nextAsidSeq1 == none[seq[io.IO_as]]) { + assert nextAsidSeq2 == none[seq[io.IO_as]] + ret1 = none[seq[io.IO_as]] + ret2 = none[seq[io.IO_as]] + assert ret1 == ret2 + assert ret1 == asidsBefore(dp, raw, numINF1, currHFIdx1, prevSegLen1, consDir1, asid1) + assert ret2 == asidsBefore(dp, raw[:length], numINF2, currHFIdx2, prevSegLen2, consDir2, asid2) + } else { + ret1 = some(get(nextAsidSeq1) ++ seq[io.IO_as]{asid1}) + ret2 = some(get(nextAsidSeq2) ++ seq[io.IO_as]{asid2}) + assert ret1 == ret2 + assert ret1 == asidsBefore(dp, raw, numINF1, currHFIdx1, prevSegLen1, consDir1, asid1) + assert ret2 == asidsBefore(dp, raw[:length], numINF2, currHFIdx2, prevSegLen2, consDir2, asid2) + } + } + } +} + +ghost +requires 1 <= numINF1 +requires 0 <= currHFIdx1 +requires numINF1 == numINF2 +requires currHFIdx1 == currHFIdx2 +requires consDir1 == consDir2 +requires asid1 == asid2 +requires 0 <= length && length <= len(raw) +requires hopFieldOffset(numINF1, currHFIdx1) + path.HopLen <= length +requires dp.Valid() +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R54) +requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R54) +ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R54) +ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R54) +ensures asidFromIfs(dp, raw, numINF1, currHFIdx1, consDir1, asid1) == + asidFromIfs(dp, raw[:length], numINF2, currHFIdx2, consDir2, asid2) +decreases +func asidFromIfsWidenLemma( + dp io.DataPlaneSpec, + raw []byte, + numINF1 int, + numINF2 int, + currHFIdx1 int, + currHFIdx2 int, + consDir1 bool, + consDir2 bool, + asid1 io.IO_as, + asid2 io.IO_as, + length int) { + var ret1 option[io.IO_as] + var ret2 option[io.IO_as] + + idx1 := hopFieldOffset(numINF1, currHFIdx1) + idx2 := hopFieldOffset(numINF2, currHFIdx1) + assert idx1 == idx2 + unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) + unfold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R55) + assert forall i int :: { &raw[idx1+2+i] } { &raw[idx1+2:idx1+4][i] } 0 <= i && i < 2 ==> + &raw[idx1+2+i] == &raw[idx1+2:idx1+4][i] + assert forall i int :: { &raw[:length][idx2+2+i] } { &raw[:length][idx2+2:idx2+4][i] } 0 <= i && i < 2 ==> + &raw[:length][idx2+2+i] == &raw[:length][idx2+2:idx2+4][i] + assert forall i int :: { &raw[idx1+4+i] } { &raw[idx1+4:idx1+6][i] } 0 <= i && i < 2 ==> + &raw[idx1+4+i] == &raw[idx1+4:idx1+6][i] + assert forall i int :: { &raw[:length][idx2+4+i] } { &raw[idx2+4:idx2+6][i] } 0 <= i && i < 2 ==> + &raw[:length][idx2+4+i] == &raw[:length][idx2+4:idx2+6][i] + ifs1 := consDir1 ? binary.BigEndian.Uint16(raw[idx1+4:idx1+6]) : binary.BigEndian.Uint16(raw[idx1+2:idx1+4]) + ifs2 := consDir2 ? binary.BigEndian.Uint16(raw[:length][idx2+4:idx2+6]) : binary.BigEndian.Uint16(raw[:length][idx2+2:idx2+4]) + assert ifs1 == ifs2 + asIfPair1 := io.AsIfsPair{asid1, io.IO_ifs(ifs1)} + asIfPair2 := io.AsIfsPair{asid2, io.IO_ifs(ifs2)} + assert asIfPair1 == asIfPair2 + if (asIfPair1 in domain(dp.GetLinks())) { + assert asIfPair2 in domain(dp.GetLinks()) + ret1 = some(dp.Lookup(asIfPair1).asid) + ret2 = some(dp.Lookup(asIfPair2).asid) + assert ret1 == ret2 + assert ret1 == asidFromIfs(dp, raw, numINF1, currHFIdx1, consDir1, asid1) + assert ret2 == asidFromIfs(dp, raw[:length], numINF2, currHFIdx2, consDir2, asid2) + } else { + assert !(asIfPair2 in domain(dp.GetLinks())) + ret1 = none[io.IO_as] + ret2 = none[io.IO_as] + assert ret1 == ret2 + assert ret1 == asidFromIfs(dp, raw, numINF1, currHFIdx1, consDir1, asid1) + assert ret2 == asidFromIfs(dp, raw[:length], numINF2, currHFIdx2, consDir2, asid2) + } + fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) + fold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R55) + assert ret1 == ret2 + assert ret1 == asidFromIfs(dp, raw, numINF1, currHFIdx1, consDir1, asid1) + assert ret2 == asidFromIfs(dp, raw[:length], numINF2, currHFIdx2, consDir2, asid2) +} + +// --- The file has been simplified past this point + +ghost +requires R53 < p +requires 1 <= numINF +requires 0 <= currHFIdx && currHFIdx < segLen +requires length <= len(raw) +requires hopFieldOffset(numINF, segLen) <= length +requires dp.Valid() +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), p) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), p) +ensures asidsAfter(dp, raw, numINF, currHFIdx, segLen, consDir, asid) == + asidsAfter(dp, raw[:length], numINF, currHFIdx, segLen, consDir, asid) +decreases segLen - currHFIdx + 1 +func asidsAfterWidenLemma(dp io.DataPlaneSpec, raw []byte, numINF int, currHFIdx int, segLen int, consDir bool, asid io.IO_as, length int, p perm) { + if currHFIdx != segLen - 1 { + nextAsid1 := asidFromIfs(dp, raw, numINF, currHFIdx, consDir, asid) + nextAsid2 := asidFromIfs(dp, raw[:length], numINF, currHFIdx, consDir, asid) + asidFromIfsWidenLemma(dp, raw, numINF, numINF, currHFIdx, currHFIdx, consDir, consDir, asid, asid, length) + assert nextAsid1 == nextAsid2 + if nextAsid1 == none[io.IO_as] { + ret := none[seq[io.IO_as]] + assert ret == asidsAfter(dp, raw, numINF, currHFIdx, segLen, consDir, asid) + assert ret == asidsAfter(dp, raw[:length], numINF, currHFIdx, segLen, consDir, asid) + } else { + newP := (p + R53)/2 + asidsAfterWidenLemma(dp, raw, numINF, currHFIdx+1, segLen, consDir, get(nextAsid1), length, newP) + nextAsidSeq1 := asidsAfter(dp, raw, numINF, currHFIdx+1, segLen, consDir, get(nextAsid1)) + nextAsidSeq2 := asidsAfter(dp, raw[:length], numINF, currHFIdx+1, segLen, consDir, get(nextAsid2)) + assert nextAsidSeq1 == nextAsidSeq2 + if nextAsidSeq1 == none[seq[io.IO_as]] { + ret := none[seq[io.IO_as]] + assert ret == asidsAfter(dp, raw, numINF, currHFIdx, segLen, consDir, asid) + assert ret == asidsAfter(dp, raw[:length], numINF, currHFIdx, segLen, consDir, asid) + } else { + ret := some(seq[io.IO_as]{asid} ++ get(nextAsidSeq1)) + assert ret == asidsAfter(dp, raw, numINF, currHFIdx, segLen, consDir, asid) + assert ret == asidsAfter(dp, raw[:length], numINF, currHFIdx, segLen, consDir, asid) + } + } + } +} + +ghost +requires dp.Valid() +requires 1 <= numINF +requires 0 < seg1Len +requires 0 <= seg2Len +requires 0 <= seg3Len +requires 0 <= length && length <= len(raw) +requires hopFieldOffset(numINF, seg1Len + seg2Len + seg3Len) <= length +requires currINFIdx <= numINF + 1 +requires 1 <= currINFIdx && currINFIdx < 4 +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +ensures asidsForLeftSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) == + asidsForLeftSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) +decreases +func asidsForLeftSegWidenLemma(dp io.DataPlaneSpec, raw []byte, numINF int, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, asid io.IO_as, length int) { + consDir1 := path.ConsDir(raw, currINFIdx) + consDir2 := path.ConsDir(raw[:length], currINFIdx) + consDirWidenLemma(raw, length, currINFIdx) + assert consDir1 == consDir2 + + if currINFIdx == 1 && seg2Len > 0 { + asidForCurrSegWidenLemma(dp, raw, numINF, seg1Len, seg1Len+seg2Len, seg1Len, consDir1, asid, length) + ret1 := asidForCurrSeg(dp, raw, numINF, seg1Len, seg1Len+seg2Len, seg1Len, consDir1, asid) + ret2 := asidForCurrSeg(dp, raw[:length], numINF, seg1Len, seg1Len+seg2Len, seg1Len, consDir2, asid) + assert ret1 == reveal asidsForLeftSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal asidsForLeftSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else if currINFIdx == 2 && seg2Len > 0 && seg3Len > 0 { + asidForCurrSegWidenLemma(dp, raw, numINF, seg1Len+seg2Len, seg1Len+seg2Len+seg3Len, seg1Len+seg2Len, consDir1, asid, length) + ret1 := asidForCurrSeg(dp, raw, numINF, seg1Len+seg2Len, seg1Len+seg2Len+seg3Len, seg1Len+seg2Len, consDir1, asid) + ret2 := asidForCurrSeg(dp, raw[:length], numINF, seg1Len+seg2Len, seg1Len+seg2Len+seg3Len, seg1Len+seg2Len, consDir2, asid) + assert ret1 == reveal asidsForLeftSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal asidsForLeftSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else { + ret := some(seq[io.IO_as]{}) + assert ret == reveal asidsForLeftSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret == reveal asidsForLeftSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + } +} + +ghost +requires dp.Valid() +requires 1 <= numINF +requires 0 < seg1Len +requires 0 <= seg2Len +requires 0 <= seg3Len +requires 0 <= length && length <= len(raw) +requires hopFieldOffset(numINF, seg1Len + seg2Len + seg3Len) <= length +requires currINFIdx <= numINF + 1 +requires -1 <= currINFIdx && currINFIdx < 2 +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +ensures asidsForRightSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) == + asidsForRightSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) +decreases +func asidsForRightSegWidenLemma(dp io.DataPlaneSpec, raw []byte, numINF int, currINFIdx int, seg1Len int, seg2Len int,seg3Len int, asid io.IO_as, length int) { + if currINFIdx == 1 && seg2Len > 0 { + consDir1 := path.ConsDir(raw, currINFIdx) + consDir2 := path.ConsDir(raw[:length], currINFIdx) + consDirWidenLemma(raw, length, currINFIdx) + assert consDir1 == consDir2 + asidForCurrSegWidenLemma(dp, raw, numINF, seg1Len+seg2Len-1, seg1Len+seg2Len, seg1Len, consDir1, asid, length) + ret1 := asidForCurrSeg(dp, raw, numINF, seg1Len+seg2Len-1, seg1Len+seg2Len, seg1Len, consDir1, asid) + ret2 := asidForCurrSeg(dp, raw[:length], numINF, seg1Len+seg2Len-1, seg1Len+seg2Len, seg1Len, consDir2, asid) + assert ret1 == reveal asidsForRightSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal asidsForRightSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else if currINFIdx == 0 { + consDir1 := path.ConsDir(raw, currINFIdx) + consDir2 := path.ConsDir(raw[:length], currINFIdx) + consDirWidenLemma(raw, length, currINFIdx) + assert consDir1 == consDir2 + asidForCurrSegWidenLemma(dp, raw, numINF, seg1Len-1, seg1Len, 0, consDir1, asid, length) + ret1 := asidForCurrSeg(dp, raw, numINF, seg1Len-1, seg1Len, 0, consDir1, asid) + ret2 := asidForCurrSeg(dp, raw[:length], numINF, seg1Len-1, seg1Len, 0, consDir2, asid) + assert ret1 == reveal asidsForRightSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal asidsForRightSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else { + ret := some(seq[io.IO_as]{}) + assert ret == reveal asidsForRightSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret == reveal asidsForRightSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + } +} + +ghost +requires dp.Valid() +requires 1 <= numINF +requires 0 < seg1Len +requires 0 <= seg2Len +requires 0 <= seg3Len +requires 0 <= length && length <= len(raw) +requires hopFieldOffset(numINF, seg1Len + seg2Len + seg3Len) <= length +requires currINFIdx <= numINF + 1 +requires 2 <= currINFIdx && currINFIdx < 5 +requires (currINFIdx == 4 && seg2Len > 0) ==> asid != none[io.IO_as] +requires (currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ==> asid != none[io.IO_as] +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +ensures asidsForMidSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) == + asidsForMidSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) +decreases +func asidsForMidSegWidenLemma(dp io.DataPlaneSpec, raw []byte, numINF int, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, asid option[io.IO_as], length int) { + if currINFIdx == 4 && seg2Len > 0 { + consDir1 := path.ConsDir(raw, 1) + consDir2 := path.ConsDir(raw[:length], 1) + consDirWidenLemma(raw, length, 1) + assert consDir1 == consDir2 + asidForCurrSegWidenLemma(dp, raw, numINF, seg1Len-1, seg1Len, 0, consDir1, get(asid), length) + ret1 := asidForCurrSeg(dp, raw, numINF, seg1Len-1, seg1Len, 0, consDir1, get(asid)) + ret2 := asidForCurrSeg(dp, raw[:length], numINF, seg1Len-1, seg1Len, 0, consDir2, get(asid)) + assert ret1 == reveal asidsForMidSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal asidsForMidSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else if currINFIdx == 2 && seg2Len > 0 && seg3Len > 0 { + consDir1 := path.ConsDir(raw, 2) + consDir2 := path.ConsDir(raw[:length], 2) + consDirWidenLemma(raw, length, 2) + assert consDir1 == consDir2 + asidForCurrSegWidenLemma(dp, raw, numINF, seg1Len+seg2Len, seg1Len+seg2Len+seg3Len, seg1Len+seg2Len, consDir1, get(asid), length) + ret1 := asidForCurrSeg(dp, raw, numINF, seg1Len + seg2Len, seg1Len + seg2Len + seg3Len, seg1Len + seg2Len, consDir1, get(asid)) + ret2 := asidForCurrSeg(dp, raw[:length], numINF, seg1Len + seg2Len, seg1Len + seg2Len + seg3Len, seg1Len + seg2Len, consDir2, get(asid)) + assert ret1 == reveal asidsForMidSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal asidsForMidSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else { + ret := some(seq[io.IO_as]{}) + assert ret == reveal asidsForMidSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret == reveal asidsForMidSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + } +} + +ghost +requires path.InfoFieldOffset(currINFIdx) + path.InfoLen <= offset +requires 0 < len(asid) +requires 0 <= length && length <= len(raw) +requires offset + path.HopLen * len(asid) <= length +requires 0 <= currHFIdx && currHFIdx <= len(asid) +requires 0 <= currINFIdx && currINFIdx < 3 +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +ensures currSeg(raw, offset, currINFIdx, currHFIdx, asid) == + currSeg(raw[:length], offset, currINFIdx, currHFIdx, asid) +decreases +func currSegWidenLemma(raw []byte, offset int, currINFIdx int, currHFIdx int, asid seq[io.IO_as], length int) { + unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) + unfold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R53) + + ainfo1 := path.Timestamp(raw, currINFIdx) + ainfo2 := path.Timestamp(raw[:length], currINFIdx) + assert ainfo1 == ainfo2 + + consDir1 := path.ConsDir(raw, currINFIdx) + consDir2 := path.ConsDir(raw[:length], currINFIdx) + assert consDir1 == consDir2 + + peer1 := path.Peer(raw, currINFIdx) + peer2 := path.Peer(raw[:length], currINFIdx) + assert peer1 == peer2 + + segmentWidenLemma(raw, offset, currHFIdx, asid, ainfo1, consDir1, peer1, length) + ret1 := segment(raw, offset, currHFIdx, asid, ainfo1, consDir1, peer1) + ret2 := segment(raw[:length], offset, currHFIdx, asid, ainfo2, consDir2, peer2) + assert ret1 == reveal currSeg(raw, offset, currINFIdx, currHFIdx, asid) + assert ret2 == reveal currSeg(raw[:length], offset, currINFIdx, currHFIdx, asid) + assert ret1 == ret2 + + fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) + fold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R53) +} + +ghost +requires 0 <= offset +requires 0 < len(asid) +requires 0 <= length && length <= len(raw) +requires offset + path.HopLen * len(asid) <= length +requires 0 <= currHFIdx && currHFIdx <= len(asid) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R52) +requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R52) +ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R52) +ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R52) +ensures segment(raw, offset, currHFIdx, asid, ainfo, consDir, peer) == segment(raw[:length], offset, currHFIdx, asid, ainfo, consDir, peer) +decreases +func segmentWidenLemma(raw []byte, offset int, currHFIdx int, asid seq[io.IO_as], ainfo io.IO_ainfo, consDir bool, peer bool, length int) { + newP := (R52 + R53)/2 + assert R53 < newP && newP < R52 + hopFieldsConsDirWidenLemma(raw, offset, 0, set[io.IO_msgterm]{}, asid, ainfo, length, newP) + hopFieldsNotConsDirWidenLemma(raw, offset, len(asid)-1, set[io.IO_msgterm]{}, asid, ainfo, length, newP) + hopfields1 := consDir ? hopFieldsConsDir(raw, offset, 0, set[io.IO_msgterm]{}, asid, ainfo) : hopFieldsNotConsDir(raw, offset, len(asid) - 1, set[io.IO_msgterm]{}, asid, ainfo) + hopfields2 := consDir ? hopFieldsConsDir(raw[:length], offset, 0, set[io.IO_msgterm]{}, asid, ainfo) : hopFieldsNotConsDir(raw[:length], offset, len(asid) - 1, set[io.IO_msgterm]{}, asid, ainfo) + assert hopfields1 == hopfields2 + + uinfo := uInfo(hopfields1, currHFIdx, consDir) + + ret1 := io.IO_seg2(io.IO_seg3_{ + AInfo :ainfo, + UInfo : uinfo, + ConsDir : consDir, + Peer : peer, + Past : segPast(hopfields1, currHFIdx - 1), + Future : segFuture(hopfields1, currHFIdx), + History : segHistory(hopfields1, currHFIdx - 1), + }) + ret2 := io.IO_seg2(io.IO_seg3_{ + AInfo :ainfo, + UInfo : uinfo, + ConsDir : consDir, + Peer : peer, + Past : segPast(hopfields2, currHFIdx - 1), + Future : segFuture(hopfields2, currHFIdx), + History : segHistory(hopfields2, currHFIdx - 1), + }) + assert ret1 == segment(raw, offset, currHFIdx, asid, ainfo, consDir, peer) + assert ret2 == segment(raw[:length], offset, currHFIdx, asid, ainfo, consDir, peer) + assert ret1 == ret2 +} + +ghost +requires R53 < p +requires 0 <= offset +requires 0 <= currHFIdx && currHFIdx <= len(asid) +requires 0 <= length && length <= len(raw) +requires offset + path.HopLen * len(asid) <= length +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), p) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), p) +ensures hopFieldsConsDir(raw, offset, currHFIdx, beta, asid, ainfo) == + hopFieldsConsDir(raw[:length], offset, currHFIdx, beta, asid, ainfo) +decreases len(asid) - currHFIdx +func hopFieldsConsDirWidenLemma(raw []byte, offset int, currHFIdx int, beta set[io.IO_msgterm], asid seq[io.IO_as], ainfo io.IO_ainfo, length int, p perm) { + if currHFIdx == len(asid) { + ret := seq[io.IO_HF]{} + assert ret == hopFieldsConsDir(raw, offset, currHFIdx, beta, asid, ainfo) + assert ret == hopFieldsConsDir(raw[:length], offset, currHFIdx, beta, asid, ainfo) + } else { + hopFieldWidenLemma(raw, offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo, length) + hf1 := hopField(raw, offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo) + hf2 := hopField(raw[:length], offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo) + assert hf1 == hf2 + + newP := (p + R53)/2 + assert R53 < newP && newP < p + hopFieldsConsDirWidenLemma(raw, offset, currHFIdx + 1, (beta union set[io.IO_msgterm]{hf1.HVF}), asid, ainfo, length, newP) + ret1 := seq[io.IO_HF]{hf1} ++ hopFieldsConsDir(raw, offset, currHFIdx + 1, (beta union set[io.IO_msgterm]{hf1.HVF}), asid, ainfo) + ret2 := seq[io.IO_HF]{hf2} ++ hopFieldsConsDir(raw[:length], offset, currHFIdx + 1, (beta union set[io.IO_msgterm]{hf2.HVF}), asid, ainfo) + assert ret1 == hopFieldsConsDir(raw, offset, currHFIdx, beta, asid, ainfo) + assert ret2 == hopFieldsConsDir(raw[:length], offset, currHFIdx, beta, asid, ainfo) + assert ret1 == ret2 + } +} + +ghost +requires 0 <= length && length <= len(raw) +requires idx + path.HopLen <= length +requires 0 <= idx +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R54) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R54) +ensures hopField(raw, idx, beta, asid, ainfo) == hopField(raw[:length], idx, beta, asid, ainfo) +decreases +func hopFieldWidenLemma(raw []byte, idx int, beta set[io.IO_msgterm], asid io.IO_as, ainfo io.IO_ainfo, length int) { + unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) + unfold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R55) + + assert forall i int :: { &raw[idx+2+i] } { &raw[idx+2:idx+4][i] } 0 <= i && i < 2 ==> &raw[idx+2+i] == &raw[idx+2:idx+4][i] + assert forall i int :: { &raw[idx+4+i] } { &raw[idx+4:idx+6][i] } 0 <= i && i < 2 ==> &raw[idx+4+i] == &raw[idx+4:idx+6][i] + inif21 := binary.BigEndian.Uint16(raw[idx+2:idx+4]) + inif22 := binary.BigEndian.Uint16(raw[:length][idx+2:idx+4]) + assert inif21 == inif22 + + egif2 := binary.BigEndian.Uint16(raw[idx+4:idx+6]) + op_inif2 := inif21 == 0 ? none[io.IO_ifs] : some(io.IO_ifs(inif21)) + op_egif2 := egif2 == 0 ? none[io.IO_ifs] : some(io.IO_ifs(egif2)) + ts := io.IO_msgterm(io.MsgTerm_Num{ainfo}) + l := io.IO_msgterm(io.MsgTerm_L{seq[io.IO_msgterm]{ts, io.if2term(op_inif2), io.if2term(op_egif2), io.IO_msgterm(io.MsgTerm_FS{beta})}}) + hvf := io.mac(io.macKey(io.asidToKey(asid)), l) + + ret1 := io.IO_HF(io.IO_HF_{ + InIF2 : op_inif2, + EgIF2 : op_egif2, + HVF : hvf, + }) + ret2 := io.IO_HF(io.IO_HF_{ + InIF2 : op_inif2, + EgIF2 : op_egif2, + HVF : hvf, + }) + assert ret1 == hopField(raw, idx, beta, asid, ainfo) + assert ret2 == hopField(raw[:length], idx, beta, asid, ainfo) + assert ret1 == ret2 + fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) + fold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R55) +} + +ghost +requires R53 < p +requires 0 <= offset +requires -1 <= currHFIdx && currHFIdx < len(asid) +requires 0 <= length && length <= len(raw) +requires offset + path.HopLen * currHFIdx + path.HopLen <= length +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), p) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), p) +ensures hopFieldsNotConsDir(raw, offset, currHFIdx, beta, asid, ainfo) == + hopFieldsNotConsDir(raw[:length], offset, currHFIdx, beta, asid, ainfo) +decreases currHFIdx + 1 +func hopFieldsNotConsDirWidenLemma(raw []byte, offset int, currHFIdx int, beta set[io.IO_msgterm], asid seq[io.IO_as], ainfo io.IO_ainfo, length int, p perm) { + if currHFIdx == -1 { + ret := seq[io.IO_HF]{} + assert ret == hopFieldsNotConsDir(raw, offset, currHFIdx, beta, asid, ainfo) + assert ret == hopFieldsNotConsDir(raw[:length], offset, currHFIdx, beta, asid, ainfo) + } else { + hopFieldWidenLemma(raw, offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo, length) + hf1 := hopField(raw, offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo) + hf2 := hopField(raw[:length], offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo) + assert hf1 == hf2 + + newP := (p + R53)/2 + assert R53 < newP && newP < p + hopFieldsNotConsDirWidenLemma(raw, offset, currHFIdx - 1, (beta union set[io.IO_msgterm]{hf1.HVF}), asid, ainfo, length, newP) + ret1 := hopFieldsNotConsDir(raw, offset, currHFIdx - 1, (beta union set[io.IO_msgterm]{hf1.HVF}), asid, ainfo) ++ seq[io.IO_HF]{hf1} + ret2 := hopFieldsNotConsDir(raw[:length], offset, currHFIdx - 1, (beta union set[io.IO_msgterm]{hf2.HVF}), asid, ainfo) ++ seq[io.IO_HF]{hf2} + assert ret1 == hopFieldsNotConsDir(raw, offset, currHFIdx, beta, asid, ainfo) + assert ret2 == hopFieldsNotConsDir(raw[:length], offset, currHFIdx, beta, asid, ainfo) + assert ret1 == ret2 + } +} + +ghost +requires 0 < seg1Len +requires 0 <= seg2Len +requires 0 <= seg3Len +requires 0 <= length && length <= len(raw) +requires pktLen(seg1Len, seg2Len, seg3Len) <= length +requires 1 <= currINFIdx && currINFIdx < 4 +requires (currINFIdx == 1 && seg2Len > 0) ==> len(asid) == seg2Len +requires (currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ==> len(asid) == seg3Len +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +ensures leftSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) == + leftSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) +decreases +func leftSegWidenLemma(raw []byte, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, asid seq[io.IO_as], length int) { + offset := hopFieldOffset(numInfoFields(seg1Len, seg2Len, seg3Len), 0) + if currINFIdx == 1 && seg2Len > 0 { + currSegWidenLemma(raw, offset + path.HopLen * seg1Len, currINFIdx, 0, asid, length) + ret1 := some(currSeg(raw, offset + path.HopLen * seg1Len, currINFIdx, 0, asid)) + ret2 := some(currSeg(raw[:length], offset + path.HopLen * seg1Len, currINFIdx, 0, asid)) + assert ret1 == reveal leftSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal leftSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else if currINFIdx == 2 && seg2Len > 0 && seg3Len > 0 { + currSegWidenLemma(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid, length) + ret1 := some(currSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid)) + ret2 := some(currSeg(raw[:length], offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid)) + assert ret1 == reveal leftSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal leftSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else { + ret := none[io.IO_seg3] + assert ret == reveal leftSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret == reveal leftSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + } +} + +ghost +requires 0 < seg1Len +requires 0 <= seg2Len +requires 0 <= seg3Len +requires 0 <= length && length <= len(raw) +requires pktLen(seg1Len, seg2Len, seg3Len) <= length +requires -1 <= currINFIdx && currINFIdx < 2 +requires (currINFIdx == 1 && seg2Len > 0 && seg3Len > 0) ==> len(asid) == seg2Len +requires (currINFIdx == 0 && seg2Len > 0) ==> len(asid) == seg1Len +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +ensures rightSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) == + rightSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) +decreases +func rightSegWidenLemma(raw []byte, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, asid seq[io.IO_as], length int) { + offset := hopFieldOffset(numInfoFields(seg1Len, seg2Len, seg3Len), 0) + if currINFIdx == 1 && seg2Len > 0 && seg3Len > 0 { + currSegWidenLemma(raw, offset + path.HopLen * seg1Len, currINFIdx, seg2Len, asid, length) + ret1 := some(currSeg(raw, offset + path.HopLen * seg1Len, currINFIdx, seg2Len, asid)) + ret2 := some(currSeg(raw[:length], offset + path.HopLen * seg1Len, currINFIdx, seg2Len, asid)) + assert ret1 == reveal rightSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal rightSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else if currINFIdx == 0 && seg2Len > 0 { + currSegWidenLemma(raw, offset, currINFIdx, seg1Len, asid, length) + ret1 := some(currSeg(raw, offset, currINFIdx, seg1Len, asid)) + ret2 := some(currSeg(raw[:length], offset, currINFIdx, seg1Len, asid)) + assert ret1 == reveal rightSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal rightSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else { + ret := none[io.IO_seg3] + assert ret == reveal rightSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret == reveal rightSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + } +} + +ghost +requires 0 <= seg2Len +requires 0 < seg1Len +requires 0 <= length && length <= len(raw) +requires 0 <= seg3Len +requires 2 <= currINFIdx && currINFIdx < 5 +requires pktLen(seg1Len, seg2Len, seg3Len) <= length +requires (currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ==> len(asid) == seg3Len +requires (currINFIdx == 4 && seg2Len > 0) ==> len(asid) == seg1Len +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +ensures midSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) == + midSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) +decreases +func midSegWidenLemma(raw []byte, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, asid seq[io.IO_as], length int) { + offset := hopFieldOffset(numInfoFields(seg1Len, seg2Len, seg3Len), 0) + if currINFIdx == 4 && seg2Len > 0 { + currSegWidenLemma(raw, offset, 0, seg1Len, asid, length) + ret1 := some(currSeg(raw, offset, 0, seg1Len, asid)) + ret2 := some(currSeg(raw[:length], offset, 0, seg1Len, asid)) + assert ret1 == reveal midSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal midSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else if currINFIdx == 2 && seg2Len > 0 && seg3Len > 0 { + currSegWidenLemma(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid, length) + ret1 := some(currSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid)) + ret2 := some(currSeg(raw[:length], offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid)) + assert ret1 == reveal midSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal midSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else { + ret := none[io.IO_seg3] + assert ret == reveal midSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret == reveal midSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + } +} +*/ \ No newline at end of file diff --git a/verification/io/router.gobra b/verification/io/router.gobra index d34d2de3a..0bb6d82b8 100644 --- a/verification/io/router.gobra +++ b/verification/io/router.gobra @@ -72,7 +72,7 @@ pure func asidToKey(asid IO_as) IO_key{ ghost decreases -pure func upd_uinfo(segid set[IO_msgterm], hf IO_HF) set[IO_msgterm]{ +pure func upd_uinfo(segid set[IO_msgterm], hf IO_HF) set[IO_msgterm] { return let setHVF := set[IO_msgterm]{hf.HVF} in (segid union setHVF) setminus (segid intersection setHVF) } @@ -129,7 +129,6 @@ pure func (dp DataPlaneSpec) dp3s_forward_ext(m IO_pkt3, newpkt IO_pkt3, nextif let hf1, fut := currseg.Future[0], currseg.Future[1:] in let traversedseg := newpkt.CurrSeg in dp.dp2_forward_ext_guard(dp.Asid(), m, nextif, currseg, traversedseg, newpkt, fut, hf1) && - dp.dp2_check_interface_top(currseg.ConsDir, dp.Asid(), hf1) && (nextif in domain(dp.GetNeighborIAs())) && let a2 := dp.GetNeighborIA(nextif) in let i2 := dp.Lookup(AsIfsPair{dp.Asid(), nextif}).ifs in diff --git a/verification/io/router_events.gobra b/verification/io/router_events.gobra index 51c4dc33a..f20449359 100644 --- a/verification/io/router_events.gobra +++ b/verification/io/router_events.gobra @@ -91,6 +91,7 @@ pure func (dp DataPlaneSpec) dp2_enter_guard(m IO_pkt2, currseg IO_seg2, travers return m.CurrSeg == currseg && currseg.Future == seq[IO_HF]{hf1} ++ fut && dp.dp2_check_interface(currseg.ConsDir, asid, hf1, recvif) && + (dp.dp2_check_interface_top(currseg.ConsDir, asid, hf1) || fut == seq[IO_HF]{}) && update_uinfo(!currseg.ConsDir, currseg, traversedseg, hf1) && same_segment2(currseg, traversedseg) && same_other2(currseg, traversedseg) && diff --git a/verification/io/xover.gobra b/verification/io/xover.gobra index 206d53d55..eff09c6bf 100644 --- a/verification/io/xover.gobra +++ b/verification/io/xover.gobra @@ -103,3 +103,5 @@ decreases pure func (dp DataPlaneSpec) xover_up2down2_link_type_dir(asid IO_as, d1 bool, hf1 IO_HF, d2 bool, hf2 IO_HF) bool { return dp.xover_up2down2_link_type(asid, swap_if_dir2(hf1, d1), swap_if_dir2(hf2, d2)) } + + From 7b47f91f9601ad985741ff0a713ee37332900280 Mon Sep 17 00:00:00 2001 From: Markus Limbeck <92801626+mlimbeck@users.noreply.github.com> Date: Sat, 13 Apr 2024 10:10:25 +0200 Subject: [PATCH 17/57] Update names of functions according to the changes in the IO-spec (#314) * minor renaming and merging of functions * further renaming --- router/io-spec-abstract-transitions.gobra | 4 +- router/io-spec-atomic-events.gobra | 20 +++----- router/io-spec.gobra | 4 +- verification/io/bios.gobra | 2 +- verification/io/io-spec.gobra | 61 +++++++++++------------ verification/io/other_defs.gobra | 10 ++-- verification/io/router.gobra | 4 +- verification/io/router_events.gobra | 4 +- verification/io/xover.gobra | 15 +++--- 9 files changed, 59 insertions(+), 65 deletions(-) diff --git a/router/io-spec-abstract-transitions.gobra b/router/io-spec-abstract-transitions.gobra index afd145aa0..2d36d4054 100644 --- a/router/io-spec-abstract-transitions.gobra +++ b/router/io-spec-abstract-transitions.gobra @@ -127,7 +127,7 @@ pure func AbsValidateEgressIDConstraintXover(pkt io.IO_pkt2, dp io.DataPlaneSpec return let currseg := pkt.CurrSeg in let rightseg := get(pkt.RightSeg) in let nextIf := (currseg.ConsDir ? currseg.Future[0].EgIF2 : currseg.Future[0].InIF2) in - dp.xover_up2down2_link_type_dir(dp.Asid(), rightseg.ConsDir, rightseg.Past[0], + dp.xover2_link_type_dir(dp.Asid(), rightseg.ConsDir, rightseg.Past[0], currseg.ConsDir, currseg.Future[0]) && nextIf != none[io.IO_ifs] && (get(nextIf) in domain(dp.GetNeighborIAs())) @@ -231,5 +231,5 @@ func XoverEvent(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt if(egressID != none[io.IO_ifs]){ reveal AbsProcessEgress(intermediatePkt2) } - AtomicXoverUp2Down(oldPkt, ingressID, newPkt, egressID, ioLock, ioSharedArg, dp) + AtomicXover(oldPkt, ingressID, newPkt, egressID, ioLock, ioSharedArg, dp) } diff --git a/router/io-spec-atomic-events.gobra b/router/io-spec-atomic-events.gobra index b93ae6ede..4b7309caa 100644 --- a/router/io-spec-atomic-events.gobra +++ b/router/io-spec-atomic-events.gobra @@ -108,13 +108,7 @@ requires len(oldPkt.CurrSeg.Future) > 0 requires len(get(oldPkt.LeftSeg).Future) > 0 requires ingressID != none[io.IO_ifs] requires ElemWitness(ioSharedArg.IBufY, ingressID, oldPkt) -requires dp.xover_up2down2_link_type_dir( - dp.Asid(), - oldPkt.CurrSeg.ConsDir, - oldPkt.CurrSeg.Future[0], - get(oldPkt.LeftSeg).ConsDir, - get(oldPkt.LeftSeg).Future[0]) -requires dp.dp2_xover_common_guard( +requires dp.dp2_xover_guard( oldPkt, oldPkt.CurrSeg, get(oldPkt.LeftSeg), @@ -140,18 +134,18 @@ requires dp.dp3s_forward_xover( preserves acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; ensures ElemWitness(ioSharedArg.OBufY, egressID, newPkt) decreases _ -func AtomicXoverUp2Down(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock *sync.Mutex, ioSharedArg SharedArg, dp io.DataPlaneSpec) { +func AtomicXover(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock *sync.Mutex, ioSharedArg SharedArg, dp io.DataPlaneSpec) { ghost ioLock.Lock() unfold SharedInv!< dp, ioSharedArg !>() t, s := *ioSharedArg.Place, *ioSharedArg.State ApplyElemWitness(s.ibuf, ioSharedArg.IBufY, ingressID, oldPkt) ghost pkt_internal := io.IO_val(io.IO_Internal_val1{oldPkt, get(ingressID), newPkt, egressID}) - assert dp.dp3s_iospec_bio3s_xover_up2down_guard(s, t, pkt_internal) + assert dp.dp3s_iospec_bio3s_xover_guard(s, t, pkt_internal) unfold dp.dp3s_iospec_ordered(s, t) - unfold dp.dp3s_iospec_bio3s_xover_up2down(s, t) - io.TriggerBodyIoXoverUp2Down(pkt_internal) - tN := io.dp3s_iospec_bio3s_xover_up2down_T(t, pkt_internal) - io.Xover_up2down(t, pkt_internal) //Event + unfold dp.dp3s_iospec_bio3s_xover(s, t) + io.TriggerBodyIoXover(pkt_internal) + tN := io.dp3s_iospec_bio3s_xover_T(t, pkt_internal) + io.Xover(t, pkt_internal) //Event UpdateElemWitness(s.obuf, ioSharedArg.OBufY, egressID, newPkt) ghost *ioSharedArg.State = io.dp3s_add_obuf(s, egressID, newPkt) ghost *ioSharedArg.Place = tN diff --git a/router/io-spec.gobra b/router/io-spec.gobra index dc1377a00..dde5bc198 100644 --- a/router/io-spec.gobra +++ b/router/io-spec.gobra @@ -136,10 +136,10 @@ pure func (d *DataPlane) dpSpecWellConfiguredNeighborIAs(dp io.DataPlaneSpec) bo ghost decreases pure func absLinktype(link topology.LinkType) io.IO_Link { - return link == topology.Core ? io.IO_Link(io.IO_PeerOrCore{}) : + return link == topology.Core ? io.IO_Link(io.IO_Core{}) : link == topology.Parent ? io.IO_Link(io.IO_CustProv{}) : link == topology.Child ? io.IO_Link(io.IO_ProvCust{}) : - link == topology.Peer ? io.IO_Link(io.IO_PeerOrCore{}) : + link == topology.Peer ? io.IO_Link(io.IO_Core{}) : io.IO_Link(io.IO_NoLink{}) } diff --git a/verification/io/bios.gobra b/verification/io/bios.gobra index f85bc30db..190e6d5c5 100644 --- a/verification/io/bios.gobra +++ b/verification/io/bios.gobra @@ -20,7 +20,7 @@ package io type IO_bio3sIN adt { IO_bio3s_enter{} - IO_bio3s_xover_up2down{} + IO_bio3s_xover{} IO_bio3s_exit{} } diff --git a/verification/io/io-spec.gobra b/verification/io/io-spec.gobra index 625690554..c5bdfd790 100644 --- a/verification/io/io-spec.gobra +++ b/verification/io/io-spec.gobra @@ -28,7 +28,7 @@ type BogusTrigger struct{} // This is the main IO Specification. pred (dp DataPlaneSpec) dp3s_iospec_ordered(s IO_dp3s_state_local, t Place) { dp.dp3s_iospec_bio3s_enter(s, t) && - dp.dp3s_iospec_bio3s_xover_up2down(s, t) && + dp.dp3s_iospec_bio3s_xover(s, t) && dp.dp3s_iospec_bio3s_exit(s, t) && dp.dp3s_iospec_bio3s_send(s, t) && dp.dp3s_iospec_bio3s_recv(s, t) && @@ -143,19 +143,19 @@ ghost decreases pure func TriggerBodyIoEnter(v IO_val) BogusTrigger { return BogusTrigger{} } -pred CBio_IN_bio3s_xover_up2down(t Place, v IO_val) +pred CBio_IN_bio3s_xover(t Place, v IO_val) ghost -requires CBio_IN_bio3s_xover_up2down(t, v) +requires CBio_IN_bio3s_xover(t, v) decreases -pure func dp3s_iospec_bio3s_xover_up2down_T(t Place, v IO_val) Place +pure func dp3s_iospec_bio3s_xover_T(t Place, v IO_val) Place -// This corresponds to the condition of the if statement in the io-spec case for xover_up2down +// This corresponds to the condition of the if statement in the io-spec case for xover ghost requires v.isIO_Internal_val1 requires dp.Valid() decreases -pure func (dp DataPlaneSpec) dp3s_iospec_bio3s_xover_up2down_guard(s IO_dp3s_state_local, t Place, v IO_val) bool { +pure func (dp DataPlaneSpec) dp3s_iospec_bio3s_xover_guard(s IO_dp3s_state_local, t Place, v IO_val) bool { return let currseg := v.IO_Internal_val1_1.CurrSeg in match v.IO_Internal_val1_1.LeftSeg{ case none[IO_seg2]: @@ -167,37 +167,36 @@ pure func (dp DataPlaneSpec) dp3s_iospec_bio3s_xover_up2down_guard(s IO_dp3s_sta let hf1, hf2 := currseg.Future[0], nextseg.Future[0] in let traversedseg := establishGuardTraversedsegInc(currseg, !currseg.ConsDir) in let nextfut := nextseg.Future[1:] in - (dp.xover_up2down2_link_type_dir(dp.Asid(), currseg.ConsDir, hf1, nextseg.ConsDir, hf2) && - dp.dp3s_xover_common( - s, - v.IO_Internal_val1_1, - currseg, - nextseg, - traversedseg, - IO_pkt2(IO_Packet2{nextseg, v.IO_Internal_val1_1.MidSeg, v.IO_Internal_val1_1.RightSeg, some(traversedseg)}), - hf1, - hf2, - nextfut, - v.IO_Internal_val1_2, - v.IO_Internal_val1_3, - v.IO_Internal_val1_4,))) + dp.dp3s_xover_guard( + s, + v.IO_Internal_val1_1, + currseg, + nextseg, + traversedseg, + IO_pkt2(IO_Packet2{nextseg, v.IO_Internal_val1_1.MidSeg, v.IO_Internal_val1_1.RightSeg, some(traversedseg)}), + hf1, + hf2, + nextfut, + v.IO_Internal_val1_2, + v.IO_Internal_val1_3, + v.IO_Internal_val1_4,)) } } -pred (dp DataPlaneSpec) dp3s_iospec_bio3s_xover_up2down(s IO_dp3s_state_local, t Place) { - forall v IO_val :: { TriggerBodyIoXoverUp2Down(v) } ( +pred (dp DataPlaneSpec) dp3s_iospec_bio3s_xover(s IO_dp3s_state_local, t Place) { + forall v IO_val :: { TriggerBodyIoXover(v) } ( match v { case IO_Internal_val1{_, _, ?newpkt, ?nextif}: // Gobra requires the triggering term to occur inside the qtfier body, - // otherwise we get an error in the call to dp3s_iospec_bio3s_xover_up2down_T. + // otherwise we get an error in the call to dp3s_iospec_bio3s_xover_T. // We named the variable `_ignored` because using `_` here leads to a strange // type error. - let _ignored := TriggerBodyIoXoverUp2Down(v) in - (dp.Valid() && dp.dp3s_iospec_bio3s_xover_up2down_guard(s, t, v) ==> - (CBio_IN_bio3s_xover_up2down(t, v) && + let _ignored := TriggerBodyIoXover(v) in + (dp.Valid() && dp.dp3s_iospec_bio3s_xover_guard(s, t, v) ==> + (CBio_IN_bio3s_xover(t, v) && dp.dp3s_iospec_ordered( dp3s_add_obuf(s, nextif, newpkt), - dp3s_iospec_bio3s_xover_up2down_T(t, v)))) + dp3s_iospec_bio3s_xover_T(t, v)))) default: true }) @@ -205,7 +204,7 @@ pred (dp DataPlaneSpec) dp3s_iospec_bio3s_xover_up2down(s IO_dp3s_state_local, t ghost decreases -pure func TriggerBodyIoXoverUp2Down(v IO_val) BogusTrigger { return BogusTrigger{} } +pure func TriggerBodyIoXover(v IO_val) BogusTrigger { return BogusTrigger{} } pred CBio_IN_bio3s_exit(t Place, v IO_val) @@ -347,9 +346,9 @@ func Enter(ghost t Place, ghost v IO_val) ghost decreases -requires token(t) && CBio_IN_bio3s_xover_up2down(t, v) -ensures token(old(dp3s_iospec_bio3s_xover_up2down_T(t, v))) -func Xover_up2down(ghost t Place, ghost v IO_val) +requires token(t) && CBio_IN_bio3s_xover(t, v) +ensures token(old(dp3s_iospec_bio3s_xover_T(t, v))) +func Xover(ghost t Place, ghost v IO_val) ghost decreases diff --git a/verification/io/other_defs.gobra b/verification/io/other_defs.gobra index b65e70abc..d4eb1df8f 100644 --- a/verification/io/other_defs.gobra +++ b/verification/io/other_defs.gobra @@ -108,7 +108,7 @@ func (h IO_HF) Toab() IO_ahi { type IO_Link adt { IO_CustProv{} IO_ProvCust{} - IO_PeerOrCore{} + IO_Core{} IO_NoLink{} } @@ -137,7 +137,7 @@ requires dp.Valid() requires asid == dp.Asid() decreases pure func (dp DataPlaneSpec) egif_core2(hf1 IO_HF, asid IO_as) bool{ - return dp.egif2_type(hf1, asid, IO_Link(IO_PeerOrCore{})) + return dp.egif2_type(hf1, asid, IO_Link(IO_Core{})) } ghost @@ -161,7 +161,7 @@ requires dp.Valid() requires asid == dp.Asid() decreases pure func (dp DataPlaneSpec) inif_core2(hf1 IO_HF, asid IO_as) bool{ - return dp.inif2_type(hf1, asid, IO_Link(IO_PeerOrCore{})) + return dp.inif2_type(hf1, asid, IO_Link(IO_Core{})) } ghost @@ -178,9 +178,9 @@ requires ifs != none[IO_ifs] ==> asid == dp.Asid() decreases pure func (dp DataPlaneSpec) if_type(asid IO_as, ifs option[IO_ifs], link IO_Link) bool{ return match ifs { - case none[IO_ifs]: + case none[IO_ifs]: false - default: + default: dp.link_type(asid, get(ifs)) == link } } diff --git a/verification/io/router.gobra b/verification/io/router.gobra index 0bb6d82b8..db9cabaab 100644 --- a/verification/io/router.gobra +++ b/verification/io/router.gobra @@ -183,7 +183,7 @@ ghost requires len(intermediatepkt.CurrSeg.Future) > 0 requires dp.Valid() decreases -pure func (dp DataPlaneSpec) dp3s_xover_common( +pure func (dp DataPlaneSpec) dp3s_xover_guard( s IO_dp3s_state_local, m IO_pkt3, currseg IO_seg3, @@ -201,6 +201,6 @@ pure func (dp DataPlaneSpec) dp3s_xover_common( // this is because of the way math. maps are implemented, we can only obtain a key that is in the map before. return some(recvif) in domain(s.ibuf) && (let lookupRes := s.ibuf[some(recvif)] in (m in lookupRes)) && - dp.dp2_xover_common_guard(m, currseg, nextseg, traversedseg, intermediatepkt, hf1, hf2, nextfut, dp.Asid(), recvif) && + dp.dp2_xover_guard(m, currseg, nextseg, traversedseg, intermediatepkt, hf1, hf2, nextfut, dp.Asid(), recvif) && dp.dp3s_forward_xover(intermediatepkt, newpkt, nextif) } diff --git a/verification/io/router_events.gobra b/verification/io/router_events.gobra index f20449359..c03f05fad 100644 --- a/verification/io/router_events.gobra +++ b/verification/io/router_events.gobra @@ -42,7 +42,7 @@ pure func (dp DataPlaneSpec) valid_link_types_in2(hf1 IO_HF, a IO_as) bool { ghost decreases -pure func (dp DataPlaneSpec) dp2_check_interface(d bool, asid IO_as, hf1 IO_HF, recvif IO_ifs) bool { +pure func (dp DataPlaneSpec) dp2_enter_interface(d bool, asid IO_as, hf1 IO_HF, recvif IO_ifs) bool { return (d && hf1.InIF2 === some(recvif)) || (!d && hf1.EgIF2 === some(recvif)) } @@ -90,7 +90,7 @@ decreases pure func (dp DataPlaneSpec) dp2_enter_guard(m IO_pkt2, currseg IO_seg2, traversedseg IO_seg2, asid IO_as, hf1 IO_HF, recvif IO_ifs, fut seq[IO_HF]) bool { return m.CurrSeg == currseg && currseg.Future == seq[IO_HF]{hf1} ++ fut && - dp.dp2_check_interface(currseg.ConsDir, asid, hf1, recvif) && + dp.dp2_enter_interface(currseg.ConsDir, asid, hf1, recvif) && (dp.dp2_check_interface_top(currseg.ConsDir, asid, hf1) || fut == seq[IO_HF]{}) && update_uinfo(!currseg.ConsDir, currseg, traversedseg, hf1) && same_segment2(currseg, traversedseg) && diff --git a/verification/io/xover.gobra b/verification/io/xover.gobra index eff09c6bf..8fff431fb 100644 --- a/verification/io/xover.gobra +++ b/verification/io/xover.gobra @@ -34,7 +34,7 @@ ghost requires dp.Valid() requires asid == dp.Asid() decreases -pure func (dp DataPlaneSpec) dp2_xover_common_guard(m IO_pkt2, +pure func (dp DataPlaneSpec) dp2_xover_guard(m IO_pkt2, currseg IO_seg2, nextseg IO_seg2, traversedseg IO_seg2, @@ -50,7 +50,8 @@ pure func (dp DataPlaneSpec) dp2_xover_common_guard(m IO_pkt2, newpkt == IO_pkt2(IO_Packet2{nextseg, m.MidSeg, m.RightSeg, some(traversedseg)}) && currseg.Future == seq[IO_HF]{hf1} && nextseg.Future == seq[IO_HF]{hf2} ++ nextfut && - dp.dp2_check_interface(currseg.ConsDir, asid, hf1, recvif) && + dp.dp2_enter_interface(currseg.ConsDir, asid, hf1, recvif) && + dp.xover2_link_type_dir(dp.Asid(), currseg.ConsDir, hf1, nextseg.ConsDir, hf2) && update_uinfo(!currseg.ConsDir, currseg, traversedseg, hf1) && inc_seg2(currseg, traversedseg, hf1, seq[IO_HF]{}) && dp.hf_valid(currseg.ConsDir, currseg.AInfo, traversedseg.UInfo, hf1) && @@ -80,10 +81,10 @@ ghost requires dp.Valid() requires asid == dp.Asid() decreases -pure func (dp DataPlaneSpec) xover_up2down2_link_type(asid IO_as, hf1 IO_HF, hf2 IO_HF) bool { +pure func (dp DataPlaneSpec) xover2_link_type(asid IO_as, hf1 IO_HF, hf2 IO_HF) bool { return (dp.inif2_type(hf1, asid, IO_ProvCust{}) && dp.egif2_type(hf2, asid, IO_ProvCust{})) || - (dp.inif2_type(hf1, asid, IO_ProvCust{}) && dp.egif2_type(hf2, asid, IO_PeerOrCore{})) || - (dp.inif2_type(hf1, asid, IO_PeerOrCore{}) && dp.egif2_type(hf2, asid, IO_ProvCust{})) + (dp.inif2_type(hf1, asid, IO_ProvCust{}) && dp.egif2_type(hf2, asid, IO_Core{})) || + (dp.inif2_type(hf1, asid, IO_Core{}) && dp.egif2_type(hf2, asid, IO_ProvCust{})) } ghost @@ -100,8 +101,8 @@ ghost requires dp.Valid() requires asid == dp.Asid() decreases -pure func (dp DataPlaneSpec) xover_up2down2_link_type_dir(asid IO_as, d1 bool, hf1 IO_HF, d2 bool, hf2 IO_HF) bool { - return dp.xover_up2down2_link_type(asid, swap_if_dir2(hf1, d1), swap_if_dir2(hf2, d2)) +pure func (dp DataPlaneSpec) xover2_link_type_dir(asid IO_as, d1 bool, hf1 IO_HF, d2 bool, hf2 IO_HF) bool { + return dp.xover2_link_type(asid, swap_if_dir2(hf1, d1), swap_if_dir2(hf2, d2)) } From 53f9359f8b82836812a1f99f4a2c9aa411d78366 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Mon, 15 Apr 2024 11:17:53 +0200 Subject: [PATCH 18/57] Cleanup unnecessary code in the stdlib formalization (#315) * cleanup unnecessary code in the stdlib formalization * replace occurrences of names with 'VerifiedSCION' --- pkg/slayers/path/scion/raw.go | 2 +- .../dependencies/context/context.gobra | 24 -- .../dependencies/crypto/aes/cipher.gobra | 15 +- .../dependencies/crypto/cipher/cbc.gobra | 11 +- .../google/gopacket/layerclass.gobra | 85 +------ .../google/gopacket/layers/layertypes.gobra | 212 +--------------- .../client_golang/prometheus/desc.gobra | 2 +- .../x/net/internal/socket/socket.gobra.old | 186 -------------- .../golang.org/x/net/ipv4/endpoint.gobra | 106 +------- .../golang.org/x/net/ipv6/endpoint.gobra | 76 +----- .../strconv/itoa.gobra.verified_backup | 237 ------------------ verification/dependencies/time/time.gobra | 13 +- 12 files changed, 28 insertions(+), 941 deletions(-) delete mode 100644 verification/dependencies/golang.org/x/net/internal/socket/socket.gobra.old delete mode 100644 verification/dependencies/strconv/itoa.gobra.verified_backup diff --git a/pkg/slayers/path/scion/raw.go b/pkg/slayers/path/scion/raw.go index 8f3d24e5e..7414b11d9 100644 --- a/pkg/slayers/path/scion/raw.go +++ b/pkg/slayers/path/scion/raw.go @@ -443,7 +443,7 @@ func (s *Raw) SetHopField(hop path.HopField, idx int /*@, ghost ubuf []byte @*/) //@ unfold acc(s.Mem(ubuf), R20) //@ unfold acc(s.Base.Mem(), R20) if idx >= s.NumHops { - // (gavin) introduced `err` + // (VerifiedSCION) introduced `err` err := serrors.New("HopField index out of bounds", "max", s.NumHops-1, "actual", idx) //@ fold acc(s.Base.Mem(), R20) //@ fold acc(s.Mem(ubuf), R20) diff --git a/verification/dependencies/context/context.gobra b/verification/dependencies/context/context.gobra index 654f9d187..ab6060516 100644 --- a/verification/dependencies/context/context.gobra +++ b/verification/dependencies/context/context.gobra @@ -59,27 +59,3 @@ ensures child.Mem() ensures child.Mem() --* parent.Mem() decreases _ func WithValue(parent Context, key, val interface{ pred Mem() }) (child Context) - -/* Below functions are closure-dependent and currently unsupported. - -type CancelFunc func() - -requires parent.Mem() -ensures child.Mem() -ensures child.Mem() --* parent.Mem() -decreases _ -func WithCancel(parent Context) (child Context, cancel CancelFunc) - -requires parent.Mem() -ensures child.Mem() -ensures child.Mem() --* parent.Mem() -decreases _ -func WithDeadline(parent Context, d time.Time) (child Context, cancel CancelFunc) - -requires parent.Mem() -ensures child.Mem() -ensures child.Mem() --* parent.Mem() -decreases _ -func WithTimeout(parent Context, timeout time.Duration) (child Context, cancel CancelFunc) - -*/ diff --git a/verification/dependencies/crypto/aes/cipher.gobra b/verification/dependencies/crypto/aes/cipher.gobra index 3dd4949b5..98eef7516 100644 --- a/verification/dependencies/crypto/aes/cipher.gobra +++ b/verification/dependencies/crypto/aes/cipher.gobra @@ -19,7 +19,6 @@ const BlockSize = 16 // The key argument should be the AES key, // either 16, 24, or 32 bytes to select // AES-128, AES-192, or AES-256. -trusted preserves acc(slices.AbsSlice_Bytes(key, 0, len(key)), R50) ensures err == nil ==> len(key) == 16 || len(key) == 24 || len(key) == 32 @@ -29,16 +28,4 @@ ensures err == nil ==> result.BlockSize() == len(key)) ensures err != nil ==> err.ErrorMem() decreases -func NewCipher(key []byte) (result cipher.Block, err error) { - k := len(key) - switch k { - default: - return nil, KeySizeError(k) - case 16, 24, 32: - break - } - if boring.Enabled { - return boring.NewAESCipher(key) - } - return newCipher(key) -} +func NewCipher(key []byte) (result cipher.Block, err error) \ No newline at end of file diff --git a/verification/dependencies/crypto/cipher/cbc.gobra b/verification/dependencies/crypto/cipher/cbc.gobra index a195b0260..0dcf5511d 100644 --- a/verification/dependencies/crypto/cipher/cbc.gobra +++ b/verification/dependencies/crypto/cipher/cbc.gobra @@ -20,19 +20,10 @@ import "github.com/scionproto/scion/verification/utils/slices" // NewCBCEncrypter returns a BlockMode which encrypts in cipher block chaining // mode, using the given Block. The length of iv must be the same as the // Block's block size. -trusted requires b != nil && b.Mem() requires len(iv) == b.BlockSize() preserves acc(slices.AbsSlice_Bytes(iv, 0, len(iv)), _) ensures result != nil && result.Mem() ensures result.BlockSize() == old(b.BlockSize()) decreases _ -func NewCBCEncrypter(b Block, iv []byte) (result BlockMode) { - if len(iv) != b.BlockSize() { - panic("cipher.NewCBCEncrypter: IV length must equal block size") - } - if cbc, ok := b.(cbcEncAble); ok { - return cbc.NewCBCEncrypter(iv) - } - return (*cbcEncrypter)(newCBC(b, iv)) -} \ No newline at end of file +func NewCBCEncrypter(b Block, iv []byte) (result BlockMode) \ No newline at end of file diff --git a/verification/dependencies/github.com/google/gopacket/layerclass.gobra b/verification/dependencies/github.com/google/gopacket/layerclass.gobra index 23b02797d..9d0b48adf 100644 --- a/verification/dependencies/github.com/google/gopacket/layerclass.gobra +++ b/verification/dependencies/github.com/google/gopacket/layerclass.gobra @@ -45,80 +45,6 @@ func (l LayerType) LayerTypes() (res []LayerType) { return []LayerType{l} } -/* -// (verifiedSCION) The following commented methods could be easily verified -// after we introduce support for range: - -// LayerClassSlice implements a LayerClass with a slice. -type LayerClassSlice []bool - -// Contains returns true if the given layer type should be considered part -// of this layer class. -func (s LayerClassSlice) Contains(t LayerType) bool { - return int(t) < len(s) && s[t] -} - -// LayerTypes returns all layer types in this LayerClassSlice. -// Because of LayerClassSlice's implementation, this could be quite slow. -func (s LayerClassSlice) LayerTypes() (all []LayerType) { - for i := 0; i < len(s); i++ { - if s[i] { - all = append(all, LayerType(i)) - } - } - return -} - -// NewLayerClassSlice creates a new LayerClassSlice by creating a slice of -// size max(types) and setting slice[t] to true for each type t. Note, if -// you implement your own LayerType and give it a high value, this WILL create -// a very large slice. -func NewLayerClassSlice(types []LayerType) LayerClassSlice { - var max LayerType - for _, typ := range types { - if typ > max { - max = typ - } - } - t := make([]bool, int(max+1)) - for _, typ := range types { - t[typ] = true - } - return t -} - -// LayerClassMap implements a LayerClass with a map. -type LayerClassMap map[LayerType]bool - -// Contains returns true if the given layer type should be considered part -// of this layer class. -func (m LayerClassMap) Contains(t LayerType) bool { - return m[t] -} - -// LayerTypes returns all layer types in this LayerClassMap. -func (m LayerClassMap) LayerTypes() (all []LayerType) { - for t := range m { - all = append(all, t) - } - return -} - -// NewLayerClassMap creates a LayerClassMap and sets map[t] to true for each -// type in types. -func NewLayerClassMap(types []LayerType) LayerClassMap { - m := LayerClassMap{} - for _, typ := range types { - m[typ] = true - } - return m -} -*/ - -// TODO: add explicit implementation proofs - -// TODO: verify after verifying the remainder of the file -trusted preserves acc(types, R20) // NewLayerClass creates a LayerClass, attempting to be smart about which type // it creates based on which types are passed in. @@ -127,13 +53,4 @@ ensures res.Mem() // ensures forall i LayerType :: i in types ==> res.Contains(i) // ensures forall i LayerType :: !i in types ==> !res.Contains(i) decreases -func NewLayerClass(types []LayerType) (res LayerClass) { - for _, typ := range types { - if typ > maxLayerType { - // NewLayerClassSlice could create a very large object, so instead create - // a map. - return NewLayerClassMap(types) - } - } - return NewLayerClassSlice(types) -} \ No newline at end of file +func NewLayerClass(types []LayerType) (res LayerClass) \ No newline at end of file diff --git a/verification/dependencies/github.com/google/gopacket/layers/layertypes.gobra b/verification/dependencies/github.com/google/gopacket/layers/layertypes.gobra index 66a11d919..5ab8c0fb1 100644 --- a/verification/dependencies/github.com/google/gopacket/layers/layertypes.gobra +++ b/verification/dependencies/github.com/google/gopacket/layers/layertypes.gobra @@ -38,215 +38,5 @@ decreases func generateDecoders() (d gopacket.Decoder) var ( -// LayerTypeARP = gopacket.RegisterLayerType(10, gopacket.LayerTypeMetadata{Name: "ARP", Decoder: generateDecoders()}) // gopacket.DecodeFunc(decodeARP) -// LayerTypeCiscoDiscovery = gopacket.RegisterLayerType(11, gopacket.LayerTypeMetadata{Name: "CiscoDiscovery", Decoder: generateDecoders()}) // gopacket.DecodeFunc(decodeCiscoDiscovery) -// LayerTypeEthernetCTP = gopacket.RegisterLayerType(12, gopacket.LayerTypeMetadata{Name: "EthernetCTP", Decoder: /* gopacket.DecodeFunc(decodeEthernetCTP)}) */ generateDecoders()}) -// LayerTypeEthernetCTPForwardData = gopacket.RegisterLayerType(13, gopacket.LayerTypeMetadata{Name: "EthernetCTPForwardData", Decoder: nil}) -// LayerTypeEthernetCTPReply = gopacket.RegisterLayerType(14, gopacket.LayerTypeMetadata{Name: "EthernetCTPReply", Decoder: nil}) -// LayerTypeDot1Q = gopacket.RegisterLayerType(15, gopacket.LayerTypeMetadata{Name: "Dot1Q", Decoder: /* gopacket.DecodeFunc(decodeDot1Q)} */ generateDecoders()}) -// LayerTypeEtherIP = gopacket.RegisterLayerType(16, gopacket.LayerTypeMetadata{Name: "EtherIP", Decoder: /* gopacket.DecodeFunc(decodeEtherIP)} */ generateDecoders()}) -// LayerTypeEthernet = gopacket.RegisterLayerType(17, gopacket.LayerTypeMetadata{Name: "Ethernet", Decoder: /* gopacket.DecodeFunc(decodeEthernet)} */ generateDecoders()}) -// LayerTypeGRE = gopacket.RegisterLayerType(18, gopacket.LayerTypeMetadata{Name: "GRE", Decoder: /* gopacket.DecodeFunc(decodeGRE)} */ generateDecoders()}) -// LayerTypeICMPv4 = gopacket.RegisterLayerType(19, gopacket.LayerTypeMetadata{Name: "ICMPv4", Decoder: /* gopacket.DecodeFunc(decodeICMPv4)} */ generateDecoders()}) -// LayerTypeIPv4 = gopacket.RegisterLayerType(20, gopacket.LayerTypeMetadata{Name: "IPv4", Decoder: /* gopacket.DecodeFunc(decodeIPv4)} */ generateDecoders()}) -// LayerTypeIPv6 = gopacket.RegisterLayerType(21, gopacket.LayerTypeMetadata{Name: "IPv6", Decoder: /* gopacket.DecodeFunc(decodeIPv6)} */ generateDecoders()}) -// LayerTypeLLC = gopacket.RegisterLayerType(22, gopacket.LayerTypeMetadata{Name: "LLC", Decoder: /* gopacket.DecodeFunc(decodeLLC)} */ generateDecoders()}) -// LayerTypeSNAP = gopacket.RegisterLayerType(23, gopacket.LayerTypeMetadata{Name: "SNAP", Decoder: /* gopacket.DecodeFunc(decodeSNAP)} */ generateDecoders()}) -// LayerTypeMPLS = gopacket.RegisterLayerType(24, gopacket.LayerTypeMetadata{Name: "MPLS", Decoder: /* gopacket.DecodeFunc(decodeMPLS)} */ generateDecoders()}) -// LayerTypePPP = gopacket.RegisterLayerType(25, gopacket.LayerTypeMetadata{Name: "PPP", Decoder: /* gopacket.DecodeFunc(decodePPP)} */ generateDecoders()}) -// LayerTypePPPoE = gopacket.RegisterLayerType(26, gopacket.LayerTypeMetadata{Name: "PPPoE", Decoder: /* gopacket.DecodeFunc(decodePPPoE)} */ generateDecoders()}) -// LayerTypeRUDP = gopacket.RegisterLayerType(27, gopacket.LayerTypeMetadata{Name: "RUDP", Decoder: /* gopacket.DecodeFunc(decodeRUDP)} */ generateDecoders()}) -// LayerTypeSCTP = gopacket.RegisterLayerType(28, gopacket.LayerTypeMetadata{Name: "SCTP", Decoder: /* gopacket.DecodeFunc(decodeSCTP)} */ generateDecoders()}) -// LayerTypeSCTPUnknownChunkType = gopacket.RegisterLayerType(29, gopacket.LayerTypeMetadata{Name: "SCTPUnknownChunkType", Decoder: nil}) -// LayerTypeSCTPData = gopacket.RegisterLayerType(30, gopacket.LayerTypeMetadata{Name: "SCTPData", Decoder: nil}) -// LayerTypeSCTPInit = gopacket.RegisterLayerType(31, gopacket.LayerTypeMetadata{Name: "SCTPInit", Decoder: nil}) -// LayerTypeSCTPSack = gopacket.RegisterLayerType(32, gopacket.LayerTypeMetadata{Name: "SCTPSack", Decoder: nil}) -// LayerTypeSCTPHeartbeat = gopacket.RegisterLayerType(33, gopacket.LayerTypeMetadata{Name: "SCTPHeartbeat", Decoder: nil}) -// LayerTypeSCTPError = gopacket.RegisterLayerType(34, gopacket.LayerTypeMetadata{Name: "SCTPError", Decoder: nil}) -// LayerTypeSCTPShutdown = gopacket.RegisterLayerType(35, gopacket.LayerTypeMetadata{Name: "SCTPShutdown", Decoder: nil}) -// LayerTypeSCTPShutdownAck = gopacket.RegisterLayerType(36, gopacket.LayerTypeMetadata{Name: "SCTPShutdownAck", Decoder: nil}) -// LayerTypeSCTPCookieEcho = gopacket.RegisterLayerType(37, gopacket.LayerTypeMetadata{Name: "SCTPCookieEcho", Decoder: nil}) -// LayerTypeSCTPEmptyLayer = gopacket.RegisterLayerType(38, gopacket.LayerTypeMetadata{Name: "SCTPEmptyLayer", Decoder: nil}) -// LayerTypeSCTPInitAck = gopacket.RegisterLayerType(39, gopacket.LayerTypeMetadata{Name: "SCTPInitAck", Decoder: nil}) -// LayerTypeSCTPHeartbeatAck = gopacket.RegisterLayerType(40, gopacket.LayerTypeMetadata{Name: "SCTPHeartbeatAck", Decoder: nil}) -// LayerTypeSCTPAbort = gopacket.RegisterLayerType(41, gopacket.LayerTypeMetadata{Name: "SCTPAbort", Decoder: nil}) -// LayerTypeSCTPShutdownComplete = gopacket.RegisterLayerType(42, gopacket.LayerTypeMetadata{Name: "SCTPShutdownComplete", Decoder: nil}) -// LayerTypeSCTPCookieAck = gopacket.RegisterLayerType(43, gopacket.LayerTypeMetadata{Name: "SCTPCookieAck", Decoder: nil}) -// LayerTypeTCP = gopacket.RegisterLayerType(44, gopacket.LayerTypeMetadata{Name: "TCP", Decoder: /* gopacket.DecodeFunc(decodeTCP)} */ generateDecoders()}) -// LayerTypeUDP = gopacket.RegisterLayerType(45, gopacket.LayerTypeMetadata{Name: "UDP", Decoder: /* gopacket.DecodeFunc(decodeUDP)} */ generateDecoders()}) -// LayerTypeIPv6HopByHop = gopacket.RegisterLayerType(46, gopacket.LayerTypeMetadata{Name: "IPv6HopByHop", Decoder: /* gopacket.DecodeFunc(decodeIPv6HopByHop)} */ generateDecoders()}) -// LayerTypeIPv6Routing = gopacket.RegisterLayerType(47, gopacket.LayerTypeMetadata{Name: "IPv6Routing", Decoder: /* gopacket.DecodeFunc(decodeIPv6Routing)} */ generateDecoders()}) -// LayerTypeIPv6Fragment = gopacket.RegisterLayerType(48, gopacket.LayerTypeMetadata{Name: "IPv6Fragment", Decoder: /* gopacket.DecodeFunc(decodeIPv6Fragment)} */ generateDecoders()}) -// LayerTypeIPv6Destination = gopacket.RegisterLayerType(49, gopacket.LayerTypeMetadata{Name: "IPv6Destination", Decoder: /* gopacket.DecodeFunc(decodeIPv6Destination)} */ generateDecoders()}) -// LayerTypeIPSecAH = gopacket.RegisterLayerType(50, gopacket.LayerTypeMetadata{Name: "IPSecAH", Decoder: /* gopacket.DecodeFunc(decodeIPSecAH)} */ generateDecoders()}) -// LayerTypeIPSecESP = gopacket.RegisterLayerType(51, gopacket.LayerTypeMetadata{Name: "IPSecESP", Decoder: /* gopacket.DecodeFunc(decodeIPSecESP)} */ generateDecoders()}) -// LayerTypeUDPLite = gopacket.RegisterLayerType(52, gopacket.LayerTypeMetadata{Name: "UDPLite", Decoder: /* gopacket.DecodeFunc(decodeUDPLite)} */ generateDecoders()}) -// LayerTypeFDDI = gopacket.RegisterLayerType(53, gopacket.LayerTypeMetadata{Name: "FDDI", Decoder: /* gopacket.DecodeFunc(decodeFDDI)} */ generateDecoders()}) -// LayerTypeLoopback = gopacket.RegisterLayerType(54, gopacket.LayerTypeMetadata{Name: "Loopback", Decoder: /* gopacket.DecodeFunc(decodeLoopback)} */ generateDecoders()}) -// LayerTypeEAP = gopacket.RegisterLayerType(55, gopacket.LayerTypeMetadata{Name: "EAP", Decoder: /* gopacket.DecodeFunc(decodeEAP)} */ generateDecoders()}) -// LayerTypeEAPOL = gopacket.RegisterLayerType(56, gopacket.LayerTypeMetadata{Name: "EAPOL", Decoder: /* gopacket.DecodeFunc(decodeEAPOL)} */ generateDecoders()}) -// LayerTypeICMPv6 = gopacket.RegisterLayerType(57, gopacket.LayerTypeMetadata{Name: "ICMPv6", Decoder: /* gopacket.DecodeFunc(decodeICMPv6)} */ generateDecoders()}) -// LayerTypeLinkLayerDiscovery = gopacket.RegisterLayerType(58, gopacket.LayerTypeMetadata{Name: "LinkLayerDiscovery", Decoder: /* gopacket.DecodeFunc(decodeLinkLayerDiscovery)} */ generateDecoders()}) -// LayerTypeCiscoDiscoveryInfo = gopacket.RegisterLayerType(59, gopacket.LayerTypeMetadata{Name: "CiscoDiscoveryInfo", Decoder: /* gopacket.DecodeFunc(decodeCiscoDiscoveryInfo)} */ generateDecoders()}) -// LayerTypeLinkLayerDiscoveryInfo = gopacket.RegisterLayerType(60, gopacket.LayerTypeMetadata{Name: "LinkLayerDiscoveryInfo", Decoder: nil}) -// LayerTypeNortelDiscovery = gopacket.RegisterLayerType(61, gopacket.LayerTypeMetadata{Name: "NortelDiscovery", Decoder: /* gopacket.DecodeFunc(decodeNortelDiscovery)} */ generateDecoders()}) -// LayerTypeIGMP = gopacket.RegisterLayerType(62, gopacket.LayerTypeMetadata{Name: "IGMP", Decoder: /* gopacket.DecodeFunc(decodeIGMP)} */ generateDecoders()}) -// LayerTypePFLog = gopacket.RegisterLayerType(63, gopacket.LayerTypeMetadata{Name: "PFLog", Decoder: /* gopacket.DecodeFunc(decodePFLog)} */ generateDecoders()}) -// LayerTypeRadioTap = gopacket.RegisterLayerType(64, gopacket.LayerTypeMetadata{Name: "RadioTap", Decoder: /* gopacket.DecodeFunc(decodeRadioTap)} */ generateDecoders()}) -// LayerTypeDot11 = gopacket.RegisterLayerType(65, gopacket.LayerTypeMetadata{Name: "Dot11", Decoder: /* gopacket.DecodeFunc(decodeDot11)} */ generateDecoders()}) -// LayerTypeDot11Ctrl = gopacket.RegisterLayerType(66, gopacket.LayerTypeMetadata{Name: "Dot11Ctrl", Decoder: /* gopacket.DecodeFunc(decodeDot11Ctrl)} */ generateDecoders()}) -// LayerTypeDot11Data = gopacket.RegisterLayerType(67, gopacket.LayerTypeMetadata{Name: "Dot11Data", Decoder: /* gopacket.DecodeFunc(decodeDot11Data)} */ generateDecoders()}) -// LayerTypeDot11DataCFAck = gopacket.RegisterLayerType(68, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAck", Decoder: /* gopacket.DecodeFunc(decodeDot11DataCFAck)} */ generateDecoders()}) -// LayerTypeDot11DataCFPoll = gopacket.RegisterLayerType(69, gopacket.LayerTypeMetadata{Name: "Dot11DataCFPoll", Decoder: /* gopacket.DecodeFunc(decodeDot11DataCFPoll)} */ generateDecoders()}) -// LayerTypeDot11DataCFAckPoll = gopacket.RegisterLayerType(70, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAckPoll", Decoder: /* gopacket.DecodeFunc(decodeDot11DataCFAckPoll)} */ generateDecoders()}) -// LayerTypeDot11DataNull = gopacket.RegisterLayerType(71, gopacket.LayerTypeMetadata{Name: "Dot11DataNull", Decoder: /* gopacket.DecodeFunc(decodeDot11DataNull)} */ generateDecoders()}) -// LayerTypeDot11DataCFAckNoData = gopacket.RegisterLayerType(72, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAck", Decoder: /* gopacket.DecodeFunc(decodeDot11DataCFAck)} */ generateDecoders()}) -// LayerTypeDot11DataCFPollNoData = gopacket.RegisterLayerType(73, gopacket.LayerTypeMetadata{Name: "Dot11DataCFPoll", Decoder: /* gopacket.DecodeFunc(decodeDot11DataCFPoll)} */ generateDecoders()}) -// LayerTypeDot11DataCFAckPollNoData = gopacket.RegisterLayerType(74, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAckPoll", Decoder: /* gopacket.DecodeFunc(decodeDot11DataCFAckPoll)} */ generateDecoders()}) -// LayerTypeDot11DataQOSData = gopacket.RegisterLayerType(75, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSData", Decoder: /* gopacket.DecodeFunc(decodeDot11DataQOSData)} */ generateDecoders()}) -// LayerTypeDot11DataQOSDataCFAck = gopacket.RegisterLayerType(76, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSDataCFAck", Decoder: /* gopacket.DecodeFunc(decodeDot11DataQOSDataCFAck)} */ generateDecoders()}) -// LayerTypeDot11DataQOSDataCFPoll = gopacket.RegisterLayerType(77, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSDataCFPoll", Decoder: /* gopacket.DecodeFunc(decodeDot11DataQOSDataCFPoll)} */ generateDecoders()}) -// LayerTypeDot11DataQOSDataCFAckPoll = gopacket.RegisterLayerType(78, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSDataCFAckPoll", Decoder: /* gopacket.DecodeFunc(decodeDot11DataQOSDataCFAckPoll)} */ generateDecoders()}) -// LayerTypeDot11DataQOSNull = gopacket.RegisterLayerType(79, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSNull", Decoder: /* gopacket.DecodeFunc(decodeDot11DataQOSNull)} */ generateDecoders()}) -// LayerTypeDot11DataQOSCFPollNoData = gopacket.RegisterLayerType(80, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSCFPoll", Decoder: /* gopacket.DecodeFunc(decodeDot11DataQOSCFPollNoData)} */ generateDecoders()}) -// LayerTypeDot11DataQOSCFAckPollNoData = gopacket.RegisterLayerType(81, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSCFAckPoll", Decoder: /* gopacket.DecodeFunc(decodeDot11DataQOSCFAckPollNoData)} */ generateDecoders()}) -// LayerTypeDot11InformationElement = gopacket.RegisterLayerType(82, gopacket.LayerTypeMetadata{Name: "Dot11InformationElement", Decoder: /* gopacket.DecodeFunc(decodeDot11InformationElement)} */ generateDecoders()}) -// LayerTypeDot11CtrlCTS = gopacket.RegisterLayerType(83, gopacket.LayerTypeMetadata{Name: "Dot11CtrlCTS", Decoder: /* gopacket.DecodeFunc(decodeDot11CtrlCTS)} */ generateDecoders()}) -// LayerTypeDot11CtrlRTS = gopacket.RegisterLayerType(84, gopacket.LayerTypeMetadata{Name: "Dot11CtrlRTS", Decoder: /* gopacket.DecodeFunc(decodeDot11CtrlRTS)} */ generateDecoders()}) -// LayerTypeDot11CtrlBlockAckReq = gopacket.RegisterLayerType(85, gopacket.LayerTypeMetadata{Name: "Dot11CtrlBlockAckReq", Decoder: /* gopacket.DecodeFunc(decodeDot11CtrlBlockAckReq)} */ generateDecoders()}) -// LayerTypeDot11CtrlBlockAck = gopacket.RegisterLayerType(86, gopacket.LayerTypeMetadata{Name: "Dot11CtrlBlockAck", Decoder: /* gopacket.DecodeFunc(decodeDot11CtrlBlockAck)} */ generateDecoders()}) -// LayerTypeDot11CtrlPowersavePoll = gopacket.RegisterLayerType(87, gopacket.LayerTypeMetadata{Name: "Dot11CtrlPowersavePoll", Decoder: /* gopacket.DecodeFunc(decodeDot11CtrlPowersavePoll)} */ generateDecoders()}) -// LayerTypeDot11CtrlAck = gopacket.RegisterLayerType(88, gopacket.LayerTypeMetadata{Name: "Dot11CtrlAck", Decoder: /* gopacket.DecodeFunc(decodeDot11CtrlAck)} */ generateDecoders()}) -// LayerTypeDot11CtrlCFEnd = gopacket.RegisterLayerType(89, gopacket.LayerTypeMetadata{Name: "Dot11CtrlCFEnd", Decoder: /* gopacket.DecodeFunc(decodeDot11CtrlCFEnd)} */ generateDecoders()}) -// LayerTypeDot11CtrlCFEndAck = gopacket.RegisterLayerType(90, gopacket.LayerTypeMetadata{Name: "Dot11CtrlCFEndAck", Decoder: /* gopacket.DecodeFunc(decodeDot11CtrlCFEndAck)} */ generateDecoders()}) -// LayerTypeDot11MgmtAssociationReq = gopacket.RegisterLayerType(91, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAssociationReq", Decoder: /* gopacket.DecodeFunc(decodeDot11MgmtAssociationReq)} */ generateDecoders()}) -// LayerTypeDot11MgmtAssociationResp = gopacket.RegisterLayerType(92, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAssociationResp", Decoder: /* gopacket.DecodeFunc(decodeDot11MgmtAssociationResp)} */ generateDecoders()}) -// LayerTypeDot11MgmtReassociationReq = gopacket.RegisterLayerType(93, gopacket.LayerTypeMetadata{Name: "Dot11MgmtReassociationReq", Decoder: /* gopacket.DecodeFunc(decodeDot11MgmtReassociationReq)} */ generateDecoders()}) -// LayerTypeDot11MgmtReassociationResp = gopacket.RegisterLayerType(94, gopacket.LayerTypeMetadata{Name: "Dot11MgmtReassociationResp", Decoder: /* gopacket.DecodeFunc(decodeDot11MgmtReassociationResp)} */ generateDecoders()}) -// LayerTypeDot11MgmtProbeReq = gopacket.RegisterLayerType(95, gopacket.LayerTypeMetadata{Name: "Dot11MgmtProbeReq", Decoder: /* gopacket.DecodeFunc(decodeDot11MgmtProbeReq)} */ generateDecoders()}) -// LayerTypeDot11MgmtProbeResp = gopacket.RegisterLayerType(96, gopacket.LayerTypeMetadata{Name: "Dot11MgmtProbeResp", Decoder: /* gopacket.DecodeFunc(decodeDot11MgmtProbeResp)} */ generateDecoders()}) -// LayerTypeDot11MgmtMeasurementPilot = gopacket.RegisterLayerType(97, gopacket.LayerTypeMetadata{Name: "Dot11MgmtMeasurementPilot", Decoder: /* gopacket.DecodeFunc(decodeDot11MgmtMeasurementPilot)} */ generateDecoders()}) -// LayerTypeDot11MgmtBeacon = gopacket.RegisterLayerType(98, gopacket.LayerTypeMetadata{Name: "Dot11MgmtBeacon", Decoder: /* gopacket.DecodeFunc(decodeDot11MgmtBeacon)} */ generateDecoders()}) -// LayerTypeDot11MgmtATIM = gopacket.RegisterLayerType(99, gopacket.LayerTypeMetadata{Name: "Dot11MgmtATIM", Decoder: /* gopacket.DecodeFunc(decodeDot11MgmtATIM)} */ generateDecoders()}) -// LayerTypeDot11MgmtDisassociation = gopacket.RegisterLayerType(100, gopacket.LayerTypeMetadata{Name: "Dot11MgmtDisassociation", Decoder: /* gopacket.DecodeFunc(decodeDot11MgmtDisassociation)} */ generateDecoders()}) -// LayerTypeDot11MgmtAuthentication = gopacket.RegisterLayerType(101, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAuthentication", Decoder: /* gopacket.DecodeFunc(decodeDot11MgmtAuthentication)} */ generateDecoders()}) -// LayerTypeDot11MgmtDeauthentication = gopacket.RegisterLayerType(102, gopacket.LayerTypeMetadata{Name: "Dot11MgmtDeauthentication", Decoder: /* gopacket.DecodeFunc(decodeDot11MgmtDeauthentication)} */ generateDecoders()}) -// LayerTypeDot11MgmtAction = gopacket.RegisterLayerType(103, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAction", Decoder: /* gopacket.DecodeFunc(decodeDot11MgmtAction)} */ generateDecoders()}) -// LayerTypeDot11MgmtActionNoAck = gopacket.RegisterLayerType(104, gopacket.LayerTypeMetadata{Name: "Dot11MgmtActionNoAck", Decoder: /* opacket.DecodeFunc(decodeDot11MgmtActionNoAck)} */ generateDecoders()}) -// LayerTypeDot11MgmtArubaWLAN = gopacket.RegisterLayerType(105, gopacket.LayerTypeMetadata{Name: "Dot11MgmtArubaWLAN", Decoder: /* gopacket.DecodeFunc(decodeDot11MgmtArubaWLAN)} */ generateDecoders()}) -// LayerTypeDot11WEP = gopacket.RegisterLayerType(106, gopacket.LayerTypeMetadata{Name: "Dot11WEP", Decoder: /* gopacket.DecodeFunc(decodeDot11WEP)} */ generateDecoders()}) -// LayerTypeDNS = gopacket.RegisterLayerType(107, gopacket.LayerTypeMetadata{Name: "DNS", Decoder: /* gopacket.DecodeFunc(decodeDNS)} */ generateDecoders()}) -// LayerTypeUSB = gopacket.RegisterLayerType(108, gopacket.LayerTypeMetadata{Name: "USB", Decoder: /* gopacket.DecodeFunc(decodeUSB)} */ generateDecoders()}) -// LayerTypeUSBRequestBlockSetup = gopacket.RegisterLayerType(109, gopacket.LayerTypeMetadata{Name: "USBRequestBlockSetup", Decoder: /* gopacket.DecodeFunc(decodeUSBRequestBlockSetup)} */ generateDecoders()}) -// LayerTypeUSBControl = gopacket.RegisterLayerType(110, gopacket.LayerTypeMetadata{Name: "USBControl", Decoder: /* gopacket.DecodeFunc(decodeUSBControl)} */ generateDecoders()}) -// LayerTypeUSBInterrupt = gopacket.RegisterLayerType(111, gopacket.LayerTypeMetadata{Name: "USBInterrupt", Decoder: /* gopacket.DecodeFunc(decodeUSBInterrupt)} */ generateDecoders()}) -// LayerTypeUSBBulk = gopacket.RegisterLayerType(112, gopacket.LayerTypeMetadata{Name: "USBBulk", Decoder: /* gopacket.DecodeFunc(decodeUSBBulk)} */ generateDecoders()}) -// LayerTypeLinuxSLL = gopacket.RegisterLayerType(113, gopacket.LayerTypeMetadata{Name: "Linux SLL", Decoder: /* gopacket.DecodeFunc(decodeLinuxSLL)} */ generateDecoders()}) -// LayerTypeSFlow = gopacket.RegisterLayerType(114, gopacket.LayerTypeMetadata{Name: "SFlow", Decoder: /* gopacket.DecodeFunc(decodeSFlow)} */ generateDecoders()}) -// LayerTypePrismHeader = gopacket.RegisterLayerType(115, gopacket.LayerTypeMetadata{Name: "Prism monitor mode header", Decoder: /* gopacket.DecodeFunc(decodePrismHeader)} */ generateDecoders()}) -// LayerTypeVXLAN = gopacket.RegisterLayerType(116, gopacket.LayerTypeMetadata{Name: "VXLAN", Decoder: /* gopacket.DecodeFunc(decodeVXLAN)} */ generateDecoders()}) -// LayerTypeNTP = gopacket.RegisterLayerType(117, gopacket.LayerTypeMetadata{Name: "NTP", Decoder: /* opacket.DecodeFunc(decodeNTP)} */ generateDecoders()}) -// LayerTypeDHCPv4 = gopacket.RegisterLayerType(118, gopacket.LayerTypeMetadata{Name: "DHCPv4", Decoder: /* gopacket.DecodeFunc(decodeDHCPv4)} */ generateDecoders()}) -// LayerTypeVRRP = gopacket.RegisterLayerType(119, gopacket.LayerTypeMetadata{Name: "VRRP", Decoder: /* gopacket.DecodeFunc(decodeVRRP)} */ generateDecoders()}) -// LayerTypeGeneve = gopacket.RegisterLayerType(120, gopacket.LayerTypeMetadata{Name: "Geneve", Decoder: /* gopacket.DecodeFunc(decodeGeneve)} */ generateDecoders()}) -// LayerTypeSTP = gopacket.RegisterLayerType(121, gopacket.LayerTypeMetadata{Name: "STP", Decoder: /* gopacket.DecodeFunc(decodeSTP)} */ generateDecoders()}) - LayerTypeBFD = gopacket.RegisterLayerType(122, gopacket.LayerTypeMetadata{Name: "BFD", Decoder: /* gopacket.DecodeFunc(decodeBFD)} */ generateDecoders()}) -// LayerTypeOSPF = gopacket.RegisterLayerType(123, gopacket.LayerTypeMetadata{Name: "OSPF", Decoder: /* gopacket.DecodeFunc(decodeOSPF)} */ generateDecoders()}) -// LayerTypeICMPv6RouterSolicitation = gopacket.RegisterLayerType(124, gopacket.LayerTypeMetadata{Name: "ICMPv6RouterSolicitation", Decoder: /* gopacket.DecodeFunc(decodeICMPv6RouterSolicitation)} */ generateDecoders()}) -// LayerTypeICMPv6RouterAdvertisement = gopacket.RegisterLayerType(125, gopacket.LayerTypeMetadata{Name: "ICMPv6RouterAdvertisement", Decoder: /* gopacket.DecodeFunc(decodeICMPv6RouterAdvertisement)} */ generateDecoders()}) -// LayerTypeICMPv6NeighborSolicitation = gopacket.RegisterLayerType(126, gopacket.LayerTypeMetadata{Name: "ICMPv6NeighborSolicitation", Decoder: /* gopacket.DecodeFunc(decodeICMPv6NeighborSolicitation)} */ generateDecoders()}) -// LayerTypeICMPv6NeighborAdvertisement = gopacket.RegisterLayerType(127, gopacket.LayerTypeMetadata{Name: "ICMPv6NeighborAdvertisement", Decoder: /* gopacket.DecodeFunc(decodeICMPv6NeighborAdvertisement)} */ generateDecoders()}) -// LayerTypeICMPv6Redirect = gopacket.RegisterLayerType(128, gopacket.LayerTypeMetadata{Name: "ICMPv6Redirect", Decoder: /* gopacket.DecodeFunc(decodeICMPv6Redirect)} */ generateDecoders()}) -// LayerTypeGTPv1U = gopacket.RegisterLayerType(129, gopacket.LayerTypeMetadata{Name: "GTPv1U", Decoder: /* gopacket.DecodeFunc(decodeGTPv1u)} */ generateDecoders()}) -// LayerTypeEAPOLKey = gopacket.RegisterLayerType(130, gopacket.LayerTypeMetadata{Name: "EAPOLKey", Decoder: /* gopacket.DecodeFunc(decodeEAPOLKey)} */ generateDecoders()}) -// LayerTypeLCM = gopacket.RegisterLayerType(131, gopacket.LayerTypeMetadata{Name: "LCM", Decoder: /* gopacket.DecodeFunc(decodeLCM)} */ generateDecoders()}) -// LayerTypeICMPv6Echo = gopacket.RegisterLayerType(132, gopacket.LayerTypeMetadata{Name: "ICMPv6Echo", Decoder: /* gopacket.DecodeFunc(decodeICMPv6Echo)} */ generateDecoders()}) -// LayerTypeSIP = gopacket.RegisterLayerType(133, gopacket.LayerTypeMetadata{Name: "SIP", Decoder: /* gopacket.DecodeFunc(decodeSIP)} */ generateDecoders()}) -// LayerTypeDHCPv6 = gopacket.RegisterLayerType(134, gopacket.LayerTypeMetadata{Name: "DHCPv6", Decoder: /* gopacket.DecodeFunc(decodeDHCPv6)} */ generateDecoders()}) -// LayerTypeMLDv1MulticastListenerReport = gopacket.RegisterLayerType(135, gopacket.LayerTypeMetadata{Name: "MLDv1MulticastListenerReport", Decoder: /* gopacket.DecodeFunc(decodeMLDv1MulticastListenerReport)} */ generateDecoders()}) -// LayerTypeMLDv1MulticastListenerDone = gopacket.RegisterLayerType(136, gopacket.LayerTypeMetadata{Name: "MLDv1MulticastListenerDone", Decoder: /* gopacket.DecodeFunc(decodeMLDv1MulticastListenerDone)} */ generateDecoders()}) -// LayerTypeMLDv1MulticastListenerQuery = gopacket.RegisterLayerType(137, gopacket.LayerTypeMetadata{Name: "MLDv1MulticastListenerQuery", Decoder: /* gopacket.DecodeFunc(decodeMLDv1MulticastListenerQuery)} */ generateDecoders()}) -// LayerTypeMLDv2MulticastListenerReport = gopacket.RegisterLayerType(138, gopacket.LayerTypeMetadata{Name: "MLDv2MulticastListenerReport", Decoder: /* gopacket.DecodeFunc(decodeMLDv2MulticastListenerReport)} */ generateDecoders()}) -// LayerTypeMLDv2MulticastListenerQuery = gopacket.RegisterLayerType(139, gopacket.LayerTypeMetadata{Name: "MLDv2MulticastListenerQuery", Decoder: /* gopacket.DecodeFunc(decodeMLDv2MulticastListenerQuery)} */ generateDecoders()}) -// LayerTypeTLS = gopacket.RegisterLayerType(140, gopacket.LayerTypeMetadata{Name: "TLS", Decoder: /* gopacket.DecodeFunc(decodeTLS)} */ generateDecoders()}) -// LayerTypeModbusTCP = gopacket.RegisterLayerType(141, gopacket.LayerTypeMetadata{Name: "ModbusTCP", Decoder: /* gopacket.DecodeFunc(decodeModbusTCP)} */ generateDecoders()}) -// LayerTypeRMCP = gopacket.RegisterLayerType(142, gopacket.LayerTypeMetadata{Name: "RMCP", Decoder: /* gopacket.DecodeFunc(decodeRMCP)} */ generateDecoders()}) -// LayerTypeASF = gopacket.RegisterLayerType(143, gopacket.LayerTypeMetadata{Name: "ASF", Decoder: /* gopacket.DecodeFunc(decodeASF)} */ generateDecoders()}) -// LayerTypeASFPresencePong = gopacket.RegisterLayerType(144, gopacket.LayerTypeMetadata{Name: "ASFPresencePong", Decoder: /* gopacket.DecodeFunc(decodeASFPresencePong)} */ generateDecoders()}) -// LayerTypeERSPANII = gopacket.RegisterLayerType(145, gopacket.LayerTypeMetadata{Name: "ERSPAN Type II", Decoder: /* gopacket.DecodeFunc(decodeERSPANII)} */ generateDecoders()}) -// LayerTypeRADIUS = gopacket.RegisterLayerType(146, gopacket.LayerTypeMetadata{Name: "RADIUS", Decoder: /* gopacket.DecodeFunc(decodeRADIUS)} */ generateDecoders()}) + LayerTypeBFD = gopacket.RegisterLayerType(122, gopacket.LayerTypeMetadata{Name: "BFD", Decoder: /* gopacket.DecodeFunc(decodeBFD)} */ generateDecoders()}) ) - -/* -var ( - // LayerClassIPNetwork contains TCP/IP network layer types. - LayerClassIPNetwork = gopacket.NewLayerClass([]gopacket.LayerType{ - LayerTypeIPv4, - LayerTypeIPv6, - }) - // LayerClassIPTransport contains TCP/IP transport layer types. - LayerClassIPTransport = gopacket.NewLayerClass([]gopacket.LayerType{ - LayerTypeTCP, - LayerTypeUDP, - LayerTypeSCTP, - }) - // LayerClassIPControl contains TCP/IP control protocols. - LayerClassIPControl = gopacket.NewLayerClass([]gopacket.LayerType{ - LayerTypeICMPv4, - LayerTypeICMPv6, - }) - // LayerClassSCTPChunk contains SCTP chunk types (not the top-level SCTP - // layer). - LayerClassSCTPChunk = gopacket.NewLayerClass([]gopacket.LayerType{ - LayerTypeSCTPUnknownChunkType, - LayerTypeSCTPData, - LayerTypeSCTPInit, - LayerTypeSCTPSack, - LayerTypeSCTPHeartbeat, - LayerTypeSCTPError, - LayerTypeSCTPShutdown, - LayerTypeSCTPShutdownAck, - LayerTypeSCTPCookieEcho, - LayerTypeSCTPEmptyLayer, - LayerTypeSCTPInitAck, - LayerTypeSCTPHeartbeatAck, - LayerTypeSCTPAbort, - LayerTypeSCTPShutdownComplete, - LayerTypeSCTPCookieAck, - }) - // LayerClassIPv6Extension contains IPv6 extension headers. - LayerClassIPv6Extension = gopacket.NewLayerClass([]gopacket.LayerType{ - LayerTypeIPv6HopByHop, - LayerTypeIPv6Routing, - LayerTypeIPv6Fragment, - LayerTypeIPv6Destination, - }) - LayerClassIPSec = gopacket.NewLayerClass([]gopacket.LayerType{ - LayerTypeIPSecAH, - LayerTypeIPSecESP, - }) - // LayerClassICMPv6NDP contains ICMPv6 neighbor discovery protocol - // messages. - LayerClassICMPv6NDP = gopacket.NewLayerClass([]gopacket.LayerType{ - LayerTypeICMPv6RouterSolicitation, - LayerTypeICMPv6RouterAdvertisement, - LayerTypeICMPv6NeighborSolicitation, - LayerTypeICMPv6NeighborAdvertisement, - LayerTypeICMPv6Redirect, - }) - // LayerClassMLDv1 contains multicast listener discovery protocol - LayerClassMLDv1 = gopacket.NewLayerClass([]gopacket.LayerType{ - LayerTypeMLDv1MulticastListenerQuery, - LayerTypeMLDv1MulticastListenerReport, - LayerTypeMLDv1MulticastListenerDone, - }) - // LayerClassMLDv2 contains multicast listener discovery protocol v2 - LayerClassMLDv2 = gopacket.NewLayerClass([]gopacket.LayerType{ - LayerTypeMLDv1MulticastListenerReport, - LayerTypeMLDv1MulticastListenerDone, - LayerTypeMLDv2MulticastListenerReport, - LayerTypeMLDv1MulticastListenerQuery, - LayerTypeMLDv2MulticastListenerQuery, - }) -) -*/ \ No newline at end of file diff --git a/verification/dependencies/github.com/prometheus/client_golang/prometheus/desc.gobra b/verification/dependencies/github.com/prometheus/client_golang/prometheus/desc.gobra index 97e378442..b45c4f15f 100644 --- a/verification/dependencies/github.com/prometheus/client_golang/prometheus/desc.gobra +++ b/verification/dependencies/github.com/prometheus/client_golang/prometheus/desc.gobra @@ -42,7 +42,7 @@ type Desc struct { help string // constLabelPairs contains precalculated DTO label pairs based on // the constant labels. - // (joao) Not needed for our purposes + // (VerifiedSCION) Not needed for our purposes // constLabelPairs []*dto.LabelPair // variableLabels contains names of labels for which the metric // maintains variable values. diff --git a/verification/dependencies/golang.org/x/net/internal/socket/socket.gobra.old b/verification/dependencies/golang.org/x/net/internal/socket/socket.gobra.old deleted file mode 100644 index e61f20e9f..000000000 --- a/verification/dependencies/golang.org/x/net/internal/socket/socket.gobra.old +++ /dev/null @@ -1,186 +0,0 @@ -// Specification for package "golang.org/x/net/internal/socket" -// Based on file https://github.com/golang/net/blob/master/internal/socket/socket.go - -package socket - -import ( - "net" - - sl "github.com/scionproto/scion/verification/utils/slices" -) - - -// A Message represents an IO message. -type Message struct { - // When writing, the Buffers field must contain at least one - // byte to write. - // When reading, the Buffers field will always contain a byte - // to read. - Buffers [][]byte - - // OOB contains protocol-specific control or miscellaneous - // ancillary data known as out-of-band data. - OOB []byte - - // Addr specifies a destination address when writing. - // It can be nil when the underlying protocol of the raw - // connection uses connection-oriented communication. - // After a successful read, it may contain the source address - // on the received packet. - Addr net.Addr - - N int // # of bytes read or written from/to Buffers - NN int // # of bytes read or written from/to OOB - Flags int // protocol-specific information on the received message - - // (VerifiedSCION) the following are, morally, ghost fields: - // is it still ok to read the buffers and Addr of the Message? - IsActive bool - // do we have a fixed amount of perms to the Addr a wildcard amount? - WildcardPerm bool -} - -pred (m *Message) Mem(lenBuffers int) { - acc(m) && - len(m.Buffers) == lenBuffers && - (forall i int :: { &m.Buffers[i] } 0 <= i && i < len(m.Buffers) ==> acc(&m.Buffers[i])) && - (m.IsActive ==> - (forall i int :: { &m.Buffers[i] } 0 <= i && i < len(m.Buffers) ==> - sl.AbsSlice_Bytes(m.Buffers[i], 0, len(m.Buffers[i])))) && - sl.AbsSlice_Bytes(m.OOB, 0, len(m.OOB)) && - // typeOf(m.Addr) == type[*net.UDPAddr] && - ((m.Addr != nil && m.IsActive && !m.WildcardPerm) ==> m.Addr.Mem()) && - ((m.Addr != nil && m.IsActive && m.WildcardPerm) ==> acc(m.Addr.Mem(), _)) && - 0 <= m.N -} - -pred (m *Message) MemWithoutHalf(lenBuffers int) { - acc(m, 1/2) && - len(m.Buffers) == lenBuffers && - (forall i int :: { &m.Buffers[i] } 0 <= i && i < len(m.Buffers) ==> acc(&m.Buffers[i])) && - (m.IsActive ==> - (forall i int :: { &m.Buffers[i] } 0 <= i && i < len(m.Buffers) ==> - sl.AbsSlice_Bytes(m.Buffers[i], 0, len(m.Buffers[i])))) && - sl.AbsSlice_Bytes(m.OOB, 0, len(m.OOB)) && - // typeOf(m.Addr) == type[*net.UDPAddr] && - ((m.Addr != nil && m.IsActive && !m.WildcardPerm) ==> m.Addr.Mem()) && - ((m.Addr != nil && m.IsActive && m.WildcardPerm) ==> acc(m.Addr.Mem(), _)) && - 0 <= m.N -} - -ghost -requires acc(m.Mem(lenBuffers), _) -decreases -pure func (m *Message) HasWildcardPermAddr(lenBuffers int) bool { - return unfolding acc(m.Mem(lenBuffers), _) in m.WildcardPerm -} - -ghost -requires acc(m.Mem(lenBuffers), _) -decreases -pure func (m *Message) HasActiveBuffers(lenBuffers int) bool { - return unfolding acc(m.Mem(lenBuffers), _) in m.IsActive -} - -ghost -requires acc(m.Mem(lenBuffers), _) -decreases -pure func (m *Message) GetAddr(lenBuffers int) net.Addr { - return unfolding acc(m.Mem(lenBuffers), _) in m.Addr -} - -ghost -requires acc(m.Mem(lenBuffers), _) -decreases -pure func (m *Message) GetMessage(lenBuffers int) Message { - return unfolding acc(m.Mem(lenBuffers), _) in *m -} - -ghost -requires acc(m.MemWithoutHalf(lenBuffers), _) -decreases -pure func (m *Message) GetAddrWithoutHalf(lenBuffers int) net.Addr { - return unfolding acc(m.MemWithoutHalf(lenBuffers), _) in m.Addr -} - -ghost -requires acc(m.Mem(lenBuffers), _) -requires 0 <= i && i < lenBuffers -decreases -pure func (m *Message) GetBuffer(lenBuffers int, i int) []byte { - return unfolding acc(m.Mem(lenBuffers), _) in m.Buffers[i] -} - -// Only defined for the case where lenBuffers == 1 -ghost -requires acc(m.Mem(1), _) -decreases -pure func (m *Message) GetFstBuffer() []byte { - return unfolding acc(m.Mem(1), _) in m.Buffers[0] -} - -// Only defined for the case where lenBuffers == 1 -ghost -requires acc(m.Mem(1), _) -decreases -pure func (m *Message) GetN() int { - return unfolding acc(m.Mem(1), _) in m.N -} - -ghost -requires m.Mem(1) -ensures acc(m, 1/2) && m.MemWithoutHalf(1) -ensures old(m.GetAddr(1)) === m.GetAddrWithoutHalf(1) -ensures m.N == old(unfolding m.Mem(1) in m.N) -ensures m.Buffers === old(unfolding m.Mem(1) in m.Buffers) -ensures old(m.GetFstBuffer()) === unfolding m.MemWithoutHalf(1) in m.Buffers[0] -ensures old(m.GetN()) == m.N -ensures old(m.HasWildcardPermAddr(1)) == m.WildcardPerm -ensures old(m.HasActiveBuffers(1)) == m.IsActive -ensures old(m.GetMessage(1)) === *m -decreases -func (m *Message) SplitPerm() { - unfold m.Mem(1) - fold m.MemWithoutHalf(1) -} - -ghost -requires acc(m, 1/2) && m.MemWithoutHalf(1) -ensures m.Mem(1) -ensures m.GetAddr(1) === old(m.GetAddrWithoutHalf(1)) -ensures old(m.N) == unfolding m.Mem(1) in m.N -ensures m.GetFstBuffer() === old(unfolding m.MemWithoutHalf(1) in m.Buffers[0]) -ensures unfolding m.Mem(1) in m.Buffers === old(m.Buffers) -ensures m.GetN() == old(m.N) -ensures m.HasWildcardPermAddr(1) == old(m.WildcardPerm) -ensures m.HasActiveBuffers(1) == old(m.IsActive) -ensures m.GetMessage(1) === old(*m) -decreases -func (m *Message) CombinePerm() { - unfold m.MemWithoutHalf(1) - fold m.Mem(1) -} - -ghost -requires forall j int :: { &msgs[j] } 0 <= j && j < len(msgs) ==> msgs[j].Mem(1) -ensures forall j int :: { &msgs[j] } 0 <= j && j < len(msgs) ==> acc(&msgs[j], 1/2) && msgs[j].MemWithoutHalf(1) -ensures forall j int :: { &msgs[j] } 0 <= j && j < len(msgs) ==> old(msgs[j].GetMessage(1)) === msgs[j] -ensures forall j int :: { &msgs[j] } 0 <= j && j < len(msgs) ==> msgs[j].WildcardPerm == old(msgs[j].HasWildcardPermAddr(1)) -ensures forall j int :: { &msgs[j] } 0 <= j && j < len(msgs) ==> msgs[j].IsActive == old(msgs[j].HasActiveBuffers(1)) -decreases -func SplitPermMsgs(msgs []Message) { - invariant 0 <= i && i <= len(msgs) - invariant forall j int :: { &msgs[j] } i <= j && j < len(msgs) ==> msgs[j].Mem(1) - invariant forall j int :: { &msgs[j] } 0 <= j && j < i ==> acc(&msgs[j], 1/2) && msgs[j].MemWithoutHalf(1) - invariant forall j int :: { &msgs[j] } 0 <= j && j < i ==> msgs[j].WildcardPerm == old(msgs[j].HasWildcardPermAddr(1)) - invariant forall j int :: { &msgs[j] } 0 <= j && j < i ==> msgs[j].IsActive == old(msgs[j].HasActiveBuffers(1)) - invariant forall j int :: { &msgs[j] } 0 <= j && j < i ==> msgs[j] === old(msgs[j].GetMessage(1)) - invariant forall j int :: { &msgs[j] } i <= j && j < len(msgs) ==> msgs[j].GetMessage(1) === old(msgs[j].GetMessage(1)) - decreases len(msgs) - i - for i := 0; i < len(msgs); i++ { - assert forall j int :: { &msgs[j] }{ &msgs[j].WildcardPerm } 0 <= j && j < i ==> msgs[j].WildcardPerm == old(msgs[j].HasWildcardPermAddr(1)) - msgs[i].SplitPerm() - assert forall j int :: { &msgs[j] } 0 <= j && j < i ==> msgs[j].WildcardPerm == old(msgs[j].HasWildcardPermAddr(1)) - assert msgs[i].WildcardPerm == old(msgs[i].HasWildcardPermAddr(1)) - } -} \ No newline at end of file diff --git a/verification/dependencies/golang.org/x/net/ipv4/endpoint.gobra b/verification/dependencies/golang.org/x/net/ipv4/endpoint.gobra index 38c0cf5e7..b32881ddc 100644 --- a/verification/dependencies/golang.org/x/net/ipv4/endpoint.gobra +++ b/verification/dependencies/golang.org/x/net/ipv4/endpoint.gobra @@ -46,74 +46,41 @@ type PacketConn struct { // SetDeadline sets the read and write deadlines associated with the // endpoint. -trusted preserves c.Mem() ensures e != nil ==> e.ErrorMem() ensures c.GetUnderlyingConn() === old(c.GetUnderlyingConn()) decreases _ -func (c *PacketConn) SetDeadline(t time.Time) (e error) { - if !c.payloadHandler.ok() { - return errInvalidConn - } - return c.payloadHandler.PacketConn.SetDeadline(t) -} +func (c *PacketConn) SetDeadline(t time.Time) (e error) // SetReadDeadline sets the read deadline associated with the // endpoint. -trusted preserves c.Mem() ensures e != nil ==> e.ErrorMem() ensures c.GetUnderlyingConn() === old(c.GetUnderlyingConn()) decreases _ -func (c *PacketConn) SetReadDeadline(t time.Time) (e error) { - if !c.payloadHandler.ok() { - return errInvalidConn - } - return c.payloadHandler.PacketConn.SetReadDeadline(t) -} +func (c *PacketConn) SetReadDeadline(t time.Time) (e error) // SetWriteDeadline sets the write deadline associated with the // endpoint. -trusted preserves c.Mem() ensures e != nil ==> e.ErrorMem() ensures c.GetUnderlyingConn() === old(c.GetUnderlyingConn()) decreases _ -func (c *PacketConn) SetWriteDeadline(t time.Time) (e error) { - if !c.payloadHandler.ok() { - return errInvalidConn - } - return c.payloadHandler.PacketConn.SetWriteDeadline(t) -} +func (c *PacketConn) SetWriteDeadline(t time.Time) (e error) // Close closes the endpoint. -trusted requires c.Mem() ensures e != nil ==> e.ErrorMem() decreases _ -func (c *PacketConn) Close() (e error) { - if !c.payloadHandler.ok() { - return errInvalidConn - } - return c.payloadHandler.PacketConn.Close() -} +func (c *PacketConn) Close() (e error) // NewPacketConn returns a new PacketConn using c as its underlying // transport. -trusted requires c != nil && c.Mem() ensures p != nil && p.Mem() ensures p.GetUnderlyingConn() === c decreases _ -func NewPacketConn(c net.PacketConn) (p *PacketConn) { - cc, _ := socket.NewConn(c.(net.Conn)) - p := &PacketConn{ - genericOpt: genericOpt{Conn: cc}, - dgramOpt: dgramOpt{Conn: cc}, - payloadHandler: payloadHandler{PacketConn: c, Conn: cc}, - } - return p -} +func NewPacketConn(c net.PacketConn) (p *PacketConn) ghost requires acc(p.Mem(), _) @@ -146,107 +113,58 @@ type RawConn struct { // SetDeadline sets the read and write deadlines associated with the // endpoint. -trusted preserves c.Mem() ensures e != nil ==> e.ErrorMem() decreases _ -func (c *RawConn) SetDeadline(t time.Time) (e error) { - if !c.packetHandler.ok() { - return errInvalidConn - } - return c.packetHandler.IPConn.SetDeadline(t) -} +func (c *RawConn) SetDeadline(t time.Time) (e error) // SetReadDeadline sets the read deadline associated with the // endpoint. -trusted preserves c.Mem() ensures e != nil ==> e.ErrorMem() decreases _ -func (c *RawConn) SetReadDeadline(t time.Time) (e error) { - if !c.packetHandler.ok() { - return errInvalidConn - } - return c.packetHandler.IPConn.SetReadDeadline(t) -} +func (c *RawConn) SetReadDeadline(t time.Time) (e error) // SetWriteDeadline sets the write deadline associated with the // endpoint. -trusted preserves c.Mem() ensures e != nil ==> e.ErrorMem() decreases _ -func (c *RawConn) SetWriteDeadline(t time.Time) (e error) { - if !c.packetHandler.ok() { - return errInvalidConn - } - return c.packetHandler.IPConn.SetWriteDeadline(t) -} +func (c *RawConn) SetWriteDeadline(t time.Time) (e error) // Close closes the endpoint. -trusted requires c.Mem() ensures e != nil ==> e.ErrorMem() decreases _ -func (c *RawConn) Close() (e error) { - if !c.packetHandler.ok() { - return errInvalidConn - } - return c.packetHandler.IPConn.Close() -} +func (c *RawConn) Close() (e error) // NewRawConn returns a new RawConn using c as its underlying // transport. -trusted requires c.Mem() ensures err == nil ==> r.Mem() ensures err != nil ==> err.ErrorMem() decreases _ -func NewRawConn(c net.PacketConn) (r *RawConn, err error) { - cc, err := socket.NewConn(c.(net.Conn)) - if err != nil { - return nil, err - } - r := &RawConn{ - genericOpt: genericOpt{Conn: cc}, - dgramOpt: dgramOpt{Conn: cc}, - packetHandler: packetHandler{IPConn: c.(*net.IPConn), Conn: cc}, - } - so, ok := sockOpts[ssoHeaderPrepend] - if !ok { - return nil, errNotImplemented - } - if err := so.SetInt(r.dgramOpt.Conn, boolint(true)); err != nil { - return nil, err - } - return r, nil -} +func NewRawConn(c net.PacketConn) (r *RawConn, err error) // (VerifiedSCION) Promote method from payloadHandler to PacketConn. // ReadBatch reads a batch of messages. // On a successful read it returns the number of messages received, up // to len(ms). -trusted preserves acc(c.Mem(), _) preserves forall i int :: { &ms[i] } 0 <= i && i < len(ms) ==> (&ms[i]).Mem() ensures c.GetUnderlyingConn() === old(c.GetUnderlyingConn()) ensures err == nil ==> (0 <= n && n <= len(ms)) ensures err != nil ==> err.ErrorMem() -func (c *PacketConn) ReadBatch(ms []socket.Message, flags int) (n int, err error) { - return c.payloadHandler.ReadBatch(ms, flags) -} +func (c *PacketConn) ReadBatch(ms []socket.Message, flags int) (n int, err error) // (VerifiedSCION) Promote method from payloadHandler to PacketConn. // WriteBatch writes a batch of messages. // It returns the number of messages written on a successful write. -trusted preserves forall i int :: { &ms[i] } 0 <= i && i < len(ms) ==> acc((&ms[i]).Mem(), R10) preserves acc(c.Mem(), _) ensures c.GetUnderlyingConn() === old(c.GetUnderlyingConn()) ensures err == nil ==> 0 <= n && n <= len(ms) ensures err != nil ==> err.ErrorMem() -func (c *PacketConn) WriteBatch(ms []socket.Message, flags int) (n int, err error) { - return c.payloadHandler.WriteBatch(ms, flags) -} \ No newline at end of file +func (c *PacketConn) WriteBatch(ms []socket.Message, flags int) (n int, err error) \ No newline at end of file diff --git a/verification/dependencies/golang.org/x/net/ipv6/endpoint.gobra b/verification/dependencies/golang.org/x/net/ipv6/endpoint.gobra index aee3a6c80..85061a9e9 100644 --- a/verification/dependencies/golang.org/x/net/ipv6/endpoint.gobra +++ b/verification/dependencies/golang.org/x/net/ipv6/endpoint.gobra @@ -27,36 +27,16 @@ type Conn struct { // PathMTU returns a path MTU value for the destination associated // with the endpoint. -trusted preserves c.Mem() ensures e != nil ==> e.ErrorMem() decreases _ -func (c *Conn) PathMTU() (n int, e error) { - if !c.ok() { - return 0, errInvalidConn - } - so, ok := sockOpts[ssoPathMTU] - if !ok { - return 0, errNotImplemented - } - _, mtu, err := so.getMTUInfo(c.Conn) - if err != nil { - return 0, err - } - return mtu, nil -} +func (c *Conn) PathMTU() (n int, e error) // NewConn returns a new Conn. -trusted requires c.Mem() ensures conn.Mem() decreases _ -func NewConn(c net.Conn) (conn *Conn) { - cc, _ := socket.NewConn(c) - return &Conn{ - genericOpt: genericOpt{Conn: cc}, - } -} +func NewConn(c net.Conn) (conn *Conn) // A PacketConn represents a packet network endpoint that uses IPv6 // transport. It is used to control several IP-level socket options @@ -69,73 +49,41 @@ type PacketConn struct { // SetDeadline sets the read and write deadlines associated with the // endpoint. -trusted preserves c.Mem() ensures e != nil ==> e.ErrorMem() ensures c.GetUnderlyingConn() === old(c.GetUnderlyingConn()) decreases _ -func (c *PacketConn) SetDeadline(t time.Time) (e error) { - if !c.payloadHandler.ok() { - return errInvalidConn - } - return c.payloadHandler.SetDeadline(t) -} +func (c *PacketConn) SetDeadline(t time.Time) (e error) // SetReadDeadline sets the read deadline associated with the // endpoint. -trusted preserves c.Mem() ensures e != nil ==> e.ErrorMem() ensures c.GetUnderlyingConn() === old(c.GetUnderlyingConn()) decreases _ -func (c *PacketConn) SetReadDeadline(t time.Time) (e error) { - if !c.payloadHandler.ok() { - return errInvalidConn - } - return c.payloadHandler.SetReadDeadline(t) -} +func (c *PacketConn) SetReadDeadline(t time.Time) (e error) // SetWriteDeadline sets the write deadline associated with the // endpoint. -trusted preserves c.Mem() ensures e != nil ==> e.ErrorMem() ensures c.GetUnderlyingConn() === old(c.GetUnderlyingConn()) decreases _ -func (c *PacketConn) SetWriteDeadline(t time.Time) (e error) { - if !c.payloadHandler.ok() { - return errInvalidConn - } - return c.payloadHandler.SetWriteDeadline(t) -} +func (c *PacketConn) SetWriteDeadline(t time.Time) (e error) // Close closes the endpoint. -trusted requires c.Mem() ensures e != nil ==> e.ErrorMem() decreases _ -func (c *PacketConn) Close() (e error) { - if !c.payloadHandler.ok() { - return errInvalidConn - } - return c.payloadHandler.Close() -} +func (c *PacketConn) Close() (e error) // NewPacketConn returns a new PacketConn using c as its underlying // transport. -trusted requires c != nil && c.Mem() ensures p != nil && p.Mem() ensures p.GetUnderlyingConn() === c decreases _ -func NewPacketConn(c net.PacketConn) (p *PacketConn) { - cc, _ := socket.NewConn(c.(net.Conn)) - return &PacketConn{ - genericOpt: genericOpt{Conn: cc}, - dgramOpt: dgramOpt{Conn: cc}, - payloadHandler: payloadHandler{PacketConn: c, Conn: cc}, - } -} +func NewPacketConn(c net.PacketConn) (p *PacketConn) ghost requires acc(p.Mem(), _) @@ -161,26 +109,20 @@ func (p *PacketConn) ExchangeWildcardPerm() (c net.PacketConn) // ReadBatch reads a batch of messages. // On a successful read it returns the number of messages received, up // to len(ms). -trusted preserves forall i int :: { &ms[i] } 0 <= i && i < len(ms) ==> (&ms[i]).Mem() preserves acc(c.Mem(), _) ensures c.GetUnderlyingConn() === old(c.GetUnderlyingConn()) ensures err == nil ==> (0 <= n && n <= len(ms)) ensures err != nil ==> err.ErrorMem() -func (c *PacketConn) ReadBatch(ms []socket.Message, flags int) (n int, err error) { - return c.payloadHandler.ReadBatch(ms, flags) -} +func (c *PacketConn) ReadBatch(ms []socket.Message, flags int) (n int, err error) // (VerifiedSCION) Promote method from payloadHandler to PacketConn. // WriteBatch writes a batch of messages. // It returns the number of messages written on a successful write. -trusted preserves forall i int :: { &ms[i] } 0 <= i && i < len(ms) ==> acc((&ms[i]).Mem(), R10) preserves acc(c.Mem(), _) ensures c.GetUnderlyingConn() === old(c.GetUnderlyingConn()) ensures err == nil ==> 0 <= n && n <= len(ms) ensures err != nil ==> err.ErrorMem() -func (c *PacketConn) WriteBatch(ms []socket.Message, flags int) (n int, err error) { - return c.payloadHandler.WriteBatch(ms, flags) -} \ No newline at end of file +func (c *PacketConn) WriteBatch(ms []socket.Message, flags int) (n int, err error) \ No newline at end of file diff --git a/verification/dependencies/strconv/itoa.gobra.verified_backup b/verification/dependencies/strconv/itoa.gobra.verified_backup deleted file mode 100644 index b42f462ad..000000000 --- a/verification/dependencies/strconv/itoa.gobra.verified_backup +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in https://golang.org/LICENSE - -// Signatures for the public declarations in file -// https://github.com/golang/go/blob/master/src/strconv/itoa.gobra - -package strconv - -// import "math/bits" - -const fastSmalls = true // enable fast path for small integers - -// FormatUint returns the string representation of i in the given base, -// for 2 <= base <= 36. The result uses the lower-case letters 'a' to 'z' -// for digit values >= 10. -requires i >= 0 -requires 2 <= base && base <= 36 -decreases -func FormatUint(i uint64, base int) string { - if fastSmalls && i < nSmalls && base == 10 { - return small(int(i)) - } - _, s := formatBits(nil, i, base, false, false) - return s -} - -// FormatInt returns the string representation of i in the given base, -// for 2 <= base <= 36. The result uses the lower-case letters 'a' to 'z' -// for digit values >= 10. -requires 2 <= base && base <= 36 -decreases -func FormatInt(i int64, base int) string { - if fastSmalls && 0 <= i && i < nSmalls && base == 10 { - return small(int(i)) - } - _, s := formatBits(nil, uint64(i), base, i < 0, false) - return s -} - -// Itoa is equivalent to FormatInt(int64(i), 10). -decreases -func Itoa(i int) string { - return FormatInt(int64(i), 10) -} - -// AppendInt appends the string form of the integer i, -// as generated by FormatInt, to dst and returns the extended buffer. -requires 2 <= base && base <= len(digits) -requires forall i int :: 0 <= i && i < len(dst) ==> acc(&dst[i]) -ensures forall i int :: 0 <= i && i < len(res) ==> acc(&res[i]) -decreases -func AppendInt(dst []byte, i int64, base int) (res []byte) { - if fastSmalls && 0 <= i && i < nSmalls && base == 10 { - // (Gobra) unpacking strings is not yet supported. The conversion - // to []byte was introduced to overcome that - return append(perm(1/2), dst, []byte(small(int(i)))...) - } - dst, _ = formatBits(dst, uint64(i), base, i < 0, true) - return dst -} - -// AppendUint appends the string form of the unsigned integer i, -// as generated by FormatUint, to dst and returns the extended buffer. -requires 2 <= base && base <= len(digits) -requires forall i int :: 0 <= i && i < len(dst) ==> acc(&dst[i]) -ensures forall i int :: 0 <= i && i < len(res) ==> acc(&res[i]) -decreases -func AppendUint(dst []byte, i uint64, base int) (res []byte) { - // valid assumption, i is of type uint64. Currently, Gobra - // does not prove this. - assume i >= 0 - if fastSmalls && i < nSmalls && base == 10 { - // (Gobra) unpacking strings is not yet supported. The conversion - // to []byte was introduced to overcome that - return append(perm(1/2), dst, []byte(small(int(i)))...) - } - dst, _ = formatBits(dst, i, base, false, true) - return dst -} - -// small returns the string for an i with 0 <= i < nSmalls. -requires 0 <= i && i < nSmalls -decreases -func small(i int) string { - if i < 10 { - return digits[i : i+1] - } - return smallsString[i*2 : i*2+2] -} - -const nSmalls = 100 - -const smallsString = "00010203040506070809" + - "10111213141516171819" + - "20212223242526272829" + - "30313233343536373839" + - "40414243444546474849" + - "50515253545556575859" + - "60616263646566676869" + - "70717273747576777879" + - "80818283848586878889" + - "90919293949596979899" - -const host32bit = ^uint(0)>>32 == 0 - -const digits = "0123456789abcdefghijklmnopqrstuvwxyz" - -// formatBits computes the string representation of u in the given base. -// If neg is set, u is treated as negative int64 value. If append_ is -// set, the string is appended to dst and the resulting byte slice is -// returned as the first result value; otherwise the string is returned -// as the second result value. -// -requires append_ ==> forall i int :: 0 <= i && i < len(dst) ==> acc(&dst[i]) -requires 2 <= base && base <= len(digits) -ensures append_ ==> forall i int :: 0 <= i && i < len(d) ==> acc(&d[i]) -decreases _ -func formatBits(dst []byte, u uint64, base int, neg, append_ bool) (d []byte, s string) // { -// if base < 2 || base > len(digits) { -// panic("strconv: illegal AppendInt/FormatInt base") -// } -// // 2 <= base && base <= len(digits) -// -// var a [64 + 1]byte // +1 for sign of 64bit value in base 2 -// i := len(a) -// -// if neg { -// u = -u -// } -// -// // convert bits -// // We use uint values where we can because those will -// // fit into a single register even on a 32bit machine. -// if base == 10 { -// // common case: use constants for / because -// // the compiler can optimize it into a multiply+shift -// -// if host32bit { -// // convert the lower digits using 32bit operations -// for u >= 1e9 { -// // Avoid using r = a%b in addition to q = a/b -// // since 64bit division and modulo operations -// // are calculated by runtime functions on 32bit machines. -// q := u / 1e9 -// us := uint(u - q*1e9) // u % 1e9 fits into a uint -// for j := 4; j > 0; j-- { -// is := us % 100 * 2 -// us /= 100 -// i -= 2 -// a[i+1] = smallsString[is+1] -// a[i+0] = smallsString[is+0] -// } -// -// // us < 10, since it contains the last digit -// // from the initial 9-digit us. -// i-- -// a[i] = smallsString[us*2+1] -// -// u = q -// } -// // u < 1e9 -// } -// -// // u guaranteed to fit into a uint -// us := uint(u) -// for us >= 100 { -// is := us % 100 * 2 -// us /= 100 -// i -= 2 -// a[i+1] = smallsString[is+1] -// a[i+0] = smallsString[is+0] -// } -// -// // us < 100 -// is := us * 2 -// i-- -// a[i] = smallsString[is+1] -// if us >= 10 { -// i-- -// a[i] = smallsString[is] -// } -// -// } else if isPowerOfTwo(base) { -// // Use shifts and masks instead of / and %. -// // Base is a power of 2 and 2 <= base <= len(digits) where len(digits) is 36. -// // The largest power of 2 below or equal to 36 is 32, which is 1 << 5; -// // i.e., the largest possible shift count is 5. By &-ind that value with -// // the constant 7 we tell the compiler that the shift count is always -// // less than 8 which is smaller than any register width. This allows -// // the compiler to generate better code for the shift operation. -// shift := uint(bits.TrailingZeros(uint(base))) & 7 -// b := uint64(base) -// m := uint(base) - 1 // == 1<= b { -// i-- -// a[i] = digits[uint(u)&m] -// u >>= shift -// } -// // u < base -// i-- -// a[i] = digits[uint(u)] -// } else { -// // general case -// b := uint64(base) -// for u >= b { -// i-- -// // Avoid using r = a%b in addition to q = a/b -// // since 64bit division and modulo operations -// // are calculated by runtime functions on 32bit machines. -// q := u / b -// a[i] = digits[uint(u-q*b)] -// u = q -// } -// // u < base -// i-- -// a[i] = digits[uint(u)] -// } -// -// // add sign, if any -// if neg { -// i-- -// a[i] = '-' -// } -// -// if append_ { -// d = append(dst, a[i:]...) -// return -// } -// s = string(a[i:]) -// return -//} - -decreases -func isPowerOfTwo(x int) bool { - return x&(x-1) == 0 -} diff --git a/verification/dependencies/time/time.gobra b/verification/dependencies/time/time.gobra index 055354185..141537282 100644 --- a/verification/dependencies/time/time.gobra +++ b/verification/dependencies/time/time.gobra @@ -65,7 +65,7 @@ func (m Month) String() (res string) type Weekday int const ( - // (joao) this used to be defined in terms of iota + // (VerifiedSCION) this used to be defined in terms of iota Sunday Weekday = 0 Monday Weekday = 1 Tuesday Weekday = 2 @@ -176,17 +176,6 @@ func (d Duration) Microseconds() int64 { return int64(d) / 1000 } decreases func (d Duration) Milliseconds() int64 { return int64(d) / 1000000 } -// Seconds returns the duration as a floating point number of seconds. -/* (joao) no support for float -func (d Duration) Seconds() float64 - -// Minutes returns the duration as a floating point number of minutes. -func (d Duration) Minutes() float64 - -// Hours returns the duration as a floating point number of hours. -func (d Duration) Hours() float64 -*/ - // Truncate returns the result of rounding d toward zero to a multiple of m. // If m <= 0, Truncate returns d unchanged. ensures m <= 0 ==> res == d From df05f900ed68dfba25021a01e8f2dadd7640847d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Thu, 18 Apr 2024 12:08:46 +0200 Subject: [PATCH 19/57] backup (#324) --- router/dataplane.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/router/dataplane.go b/router/dataplane.go index 2e69a0c3a..fb5e0d976 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -141,7 +141,7 @@ type BatchConn interface { // @ ensures err == nil ==> // @ forall i int :: { &msgs[i] } 0 <= i && i < n ==> // @ MsgToAbsVal(dp, &msgs[i], ingressID) == old(MultiReadBioIO_val(place, n)[i]) - // TODO (Markus): uint16 or option[io.IO_ifs] for ingress + // TODO (VerifiedSCION): uint16 or option[io.IO_ifs] for ingress ReadBatch(msgs underlayconn.Messages /*@, ghost ingressID uint16, ghost prophecyM int, ghost place io.Place, ghost dp io.DataPlaneSpec @*/) (n int, err error) // @ requires acc(addr.Mem(), _) // @ requires acc(Mem(), _) From 8d708b6e1aef0799d12d69db6dae1f64eabfa63b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Thu, 18 Apr 2024 12:12:37 +0200 Subject: [PATCH 20/57] Remove more names (#325) * backup * remove more --- router/dataplane.go | 2 +- .../github.com/prometheus/client_golang/prometheus/metric.gobra | 2 +- verification/dependencies/strings/strings.gobra | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/router/dataplane.go b/router/dataplane.go index fb5e0d976..01f71849e 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -2667,7 +2667,7 @@ func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte, ghost dp io.DataPla // @ unfold acc(p.scionLayer.Mem(ub), R55) if err := p.path.IncPath( /*@ ubPath @*/ ); err != nil { // TODO parameter problem invalid path - // TODO(joao): we currently expose a lot of internal information from slayers here. Can we avoid it? + // (VerifiedSCION) we currently expose a lot of internal information from slayers here. Can we avoid it? // @ ghost sl.CombineRange_Bytes(ub, startP, endP, writePerm) // @ unfold p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]) // @ p.scionLayer.PathPoolMemExchange(p.scionLayer.PathType, p.scionLayer.Path) diff --git a/verification/dependencies/github.com/prometheus/client_golang/prometheus/metric.gobra b/verification/dependencies/github.com/prometheus/client_golang/prometheus/metric.gobra index 04393ff96..e64ce246e 100644 --- a/verification/dependencies/github.com/prometheus/client_golang/prometheus/metric.gobra +++ b/verification/dependencies/github.com/prometheus/client_golang/prometheus/metric.gobra @@ -39,7 +39,7 @@ type Metric interface { // Write encodes the Metric into a "Metric" Protocol Buffer data // transmission object. - // (joao) Not supported; we do not need to go down this rabbit hole for our purposes + // (VerifiedSCION) Not supported; we do not need to go down this rabbit hole for our purposes // Write(*dto.Metric) error } diff --git a/verification/dependencies/strings/strings.gobra b/verification/dependencies/strings/strings.gobra index 4af6da2ff..13fb98747 100644 --- a/verification/dependencies/strings/strings.gobra +++ b/verification/dependencies/strings/strings.gobra @@ -89,7 +89,7 @@ func Fields(s string) (res []string) requires forall i int :: { &elems[i] } 0 <= i && i < len(elems) ==> acc(&elems[i], _) ensures len(elems) == 0 ==> res == "" ensures len(elems) == 1 ==> res == elems[0] -// (joao) Leads to precondition of call might not hold (permission to elems[i] might not suffice) +// (VerifiedSCION) Leads to precondition of call might not hold (permission to elems[i] might not suffice) // ensures len(elems) > 1 ==> res == elems[0] + sep + Join(elems[1:], sep) decreases _ pure func Join(elems []string, sep string) (res string) From 598749b6b356aabca7532f149c8b368cfa8d1b1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Thu, 18 Apr 2024 20:18:38 +0200 Subject: [PATCH 21/57] Disable conditionalizePermissions (#319) * Update gobra.yml * Update gobra.yml * fix verification error * fixed precondition of XoverEvent * enable moreJoins impure (#321) * invariant strengthening * undo change to the state consolidator --------- Co-authored-by: mlimbeck Co-authored-by: Dspil --- .github/workflows/gobra.yml | 5 ++--- router/dataplane.go | 11 +++++++---- router/io-spec-abstract-transitions.gobra | 4 ++-- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/.github/workflows/gobra.yml b/.github/workflows/gobra.yml index f16eae5a2..137fa6572 100644 --- a/.github/workflows/gobra.yml +++ b/.github/workflows/gobra.yml @@ -416,9 +416,8 @@ jobs: assumeInjectivityOnInhale: ${{ env.assumeInjectivityOnInhale }} checkConsistency: ${{ env.checkConsistency }} parallelizeBranches: '1' - # The following flag has a significant influence on the number of branches verified. - # Without it, verification would take a lot longer. - conditionalizePermissions: '1' + conditionalizePermissions: '0' + moreJoins: 'impure' imageVersion: ${{ env.imageVersion }} mceMode: 'on' requireTriggers: ${{ env.requireTriggers }} diff --git a/router/dataplane.go b/router/dataplane.go index 01f71849e..4b22afc2c 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -2678,14 +2678,14 @@ func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte, ghost dp io.DataPla var err error if p.hopField, err = p.path.GetCurrentHopField( /*@ ubPath @*/ ); err != nil { // @ ghost sl.CombineRange_Bytes(ub, startP, endP, writePerm) - // @ fold p.scionLayer.Mem(ub) + // @ fold acc(p.scionLayer.Mem(ub), 1-R55) // @ p.scionLayer.DowngradePerm(ub) // TODO parameter problem invalid path return processResult{}, err } if p.infoField, err = p.path.GetCurrentInfoField( /*@ ubPath @*/ ); err != nil { // @ ghost sl.CombineRange_Bytes(ub, startP, endP, writePerm) - // @ fold p.scionLayer.Mem(ub) + // @ fold acc(p.scionLayer.Mem(ub), 1-R55) // @ p.scionLayer.DowngradePerm(ub) // TODO parameter problem invalid path return processResult{}, err @@ -2696,6 +2696,8 @@ func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte, ghost dp io.DataPla // @ TemporaryAssumeForIO(len(get(old(absPkt(dp, ub)).LeftSeg).History) == 0) // @ TemporaryAssumeForIO(slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub)) // @ TemporaryAssumeForIO(absPkt(dp, ub) == AbsDoXover(old(absPkt(dp, ub)))) + // @ TemporaryAssumeForIO(p.EqAbsHopField(absPkt(dp, ub))) + // @ TemporaryAssumeForIO(p.EqAbsInfoField(absPkt(dp, ub))) // @ fold acc(p.scionLayer.Mem(ub), 1-R55) return processResult{}, nil } @@ -3266,6 +3268,7 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, return r, serrors.WithCtx(err, "info", "after xover") /*@, false, absReturnErr(dp, r) @*/ } // @ assert AbsVerifyCurrentMACConstraint(nextPkt, dp) + // @ unfold acc(p.scionLayer.Mem(ub), R3) } // @ fold acc(p.scionLayer.Mem(ub), R3) // @ assert p.segmentChange ==> nextPkt.RightSeg != none[io.IO_seg2] @@ -3293,6 +3296,7 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, // @ p.d.getExternalMem() // @ if p.d.external != nil { unfold acc(accBatchConn(p.d.external), _) } if c, ok := p.d.external[egressID]; ok { + // @ TemporaryAssumeForIO(egressID != 0) if err := p.processEgress( /*@ ub, dp @*/ ); err != nil { // @ fold p.d.validResult(processResult{}, false) return processResult{}, err /*@, false, absReturnErr(dp, processResult{}) @*/ @@ -4125,8 +4129,7 @@ func decodeLayers(data []byte, base *slayers.SCION, // ghost clean-up: // @ ghost - // @ invariant 0 <= i0 && i0 <= len(opts) - // @ invariant -1 <= c && c <= i0 + // @ invariant -1 <= c && c < i0 // @ invariant len(processed) == len(opts) // @ invariant len(offsets) == len(opts) // @ invariant forall i int :: {&opts[i]} 0 <= i && i < len(opts) ==> acc(&opts[i], R10) diff --git a/router/io-spec-abstract-transitions.gobra b/router/io-spec-abstract-transitions.gobra index 2d36d4054..fb9d60482 100644 --- a/router/io-spec-abstract-transitions.gobra +++ b/router/io-spec-abstract-transitions.gobra @@ -212,7 +212,7 @@ requires len(get(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID).LeftSeg).Fu requires len(get(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID).LeftSeg).History) == 0 requires AbsVerifyCurrentMACConstraint(AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)), dp) requires AbsValidateEgressIDConstraintXover(AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)), dp) -requires AbsEgressInterfaceConstraint(AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)), egressID) +requires egressID != none[io.IO_ifs] ==> AbsEgressInterfaceConstraint(AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)), egressID) requires egressID == none[io.IO_ifs] ==> newPkt == AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)) requires egressID != none[io.IO_ifs] ==> newPkt == AbsProcessEgress(AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID))) preserves acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; @@ -227,8 +227,8 @@ func XoverEvent(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt reveal AbsVerifyCurrentMACConstraint(intermediatePkt1, dp) reveal AbsVerifyCurrentMACConstraint(intermediatePkt2, dp) reveal AbsValidateEgressIDConstraintXover(intermediatePkt2, dp) - reveal AbsEgressInterfaceConstraint(intermediatePkt2, egressID) if(egressID != none[io.IO_ifs]){ + reveal AbsEgressInterfaceConstraint(intermediatePkt2, egressID) reveal AbsProcessEgress(intermediatePkt2) } AtomicXover(oldPkt, ingressID, newPkt, egressID, ioLock, ioSharedArg, dp) From bc81806b3593d9c08a56a9e3d742a0a4ad52070a Mon Sep 17 00:00:00 2001 From: Markus Limbeck <92801626+mlimbeck@users.noreply.github.com> Date: Thu, 18 Apr 2024 22:20:59 +0200 Subject: [PATCH 22/57] Refactored Widen-lemma (#327) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update gobra.yml * Update gobra.yml * fix verification error * fixed precondition of XoverEvent * enable moreJoins impure (#321) * invariant strengthening * progress widen-lemma proof * fix verification error * proven? * fix * bugfix * Update router/widen-lemma.gobra Co-authored-by: João Pereira * Update router/widen-lemma.gobra Co-authored-by: João Pereira * joao * indent --------- Co-authored-by: João Pereira Co-authored-by: Dspil Co-authored-by: Dionysios Spiliopoulos <32896454+Dspil@users.noreply.github.com> --- router/widen-lemma.gobra | 961 +++++++++------------------------------ 1 file changed, 225 insertions(+), 736 deletions(-) diff --git a/router/widen-lemma.gobra b/router/widen-lemma.gobra index 61580235e..05d87f978 100644 --- a/router/widen-lemma.gobra +++ b/router/widen-lemma.gobra @@ -21,13 +21,13 @@ import ( "verification/io" . "verification/utils/definitions" "verification/dependencies/encoding/binary" + "github.com/scionproto/scion/pkg/slayers" "github.com/scionproto/scion/pkg/slayers/path" "github.com/scionproto/scion/pkg/slayers/path/scion" ) -// Some thins in this file can be simplified. Nonetheless, the important definition here +// Some things in this file can be simplified. Nonetheless, the important definition here // is absIO_valWidenLemma. Everything else can be seen as an implementation detail. -// TODO: prove Lemma ghost requires 0 <= length && length <= len(raw) requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R49) @@ -38,19 +38,19 @@ ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R49) ensures absIO_val(dp, raw[:length], ingressID).isIO_val_Pkt2 ==> absIO_val(dp, raw[:length], ingressID) == absIO_val(dp, raw, ingressID) decreases -func absIO_valWidenLemma(dp io.DataPlaneSpec, raw []byte, ingressID uint16, length int) -/* { +func absIO_valWidenLemma(dp io.DataPlaneSpec, raw []byte, ingressID uint16, length int) { var ret1 io.IO_val var ret2 io.IO_val - if (validPktMetaHdr(raw[:length]) && absPkt(dp, raw[:length]) != none[io.IO_pkt2]) { - validPktMetaHdrWidenLemma(raw, length) - assert validPktMetaHdr(raw) + if (slayers.ValidPktMetaHdr(raw[:length]) && slayers.IsSupportedPkt(raw[:length])) { + ValidPktMetaHdrWidenLemma(raw, length) + assert slayers.ValidPktMetaHdr(raw) + IsSupportedPktWidenLemma(raw, length) + assert slayers.IsSupportedPkt(raw) absPktWidenLemma(dp, raw, length) - assert absPkt(dp, raw) != none[io.IO_pkt2] - ret1 = io.IO_val(io.IO_val_Pkt2{ifsToIO_ifs(ingressID), get(absPkt(dp, raw))}) - ret2 = io.IO_val(io.IO_val_Pkt2{ifsToIO_ifs(ingressID), get(absPkt(dp, raw[:length]))}) + ret1 = io.IO_val(io.IO_val_Pkt2{path.ifsToIO_ifs(ingressID), absPkt(dp, raw)}) + ret2 = io.IO_val(io.IO_val_Pkt2{path.ifsToIO_ifs(ingressID), absPkt(dp, raw[:length])}) assert ret1 == reveal absIO_val(dp, raw, ingressID) assert ret2 == reveal absIO_val(dp, raw[:length], ingressID) assert ret1 == ret2 @@ -65,17 +65,37 @@ ghost requires 0 <= length && length <= len(raw) requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) -requires validPktMetaHdr(raw[:length]) +requires slayers.ValidPktMetaHdr(raw[:length]) ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) -ensures validPktMetaHdr(raw) +ensures slayers.ValidPktMetaHdr(raw) decreases -func validPktMetaHdrWidenLemma(raw []byte, length int) { +func ValidPktMetaHdrWidenLemma(raw []byte, length int) { unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) unfold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R56) - reveal validPktMetaHdr(raw[:length]) - ret1 := reveal validPktMetaHdr(raw) - ret2 := reveal validPktMetaHdr(raw[:length]) + reveal slayers.ValidPktMetaHdr(raw[:length]) + ret1 := reveal slayers.ValidPktMetaHdr(raw) + ret2 := reveal slayers.ValidPktMetaHdr(raw[:length]) + assert ret1 == ret2 + fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) + fold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R56) +} + +ghost +requires 0 <= length && length <= len(raw) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +requires slayers.IsSupportedPkt(raw[:length]) +ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +ensures slayers.IsSupportedPkt(raw) +decreases +func IsSupportedPktWidenLemma(raw []byte, length int) { + unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) + unfold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R56) + reveal slayers.IsSupportedPkt(raw[:length]) + ret1 := reveal slayers.IsSupportedPkt(raw) + ret2 := reveal slayers.IsSupportedPkt(raw[:length]) assert ret1 == ret2 fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) fold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R56) @@ -86,39 +106,28 @@ requires 0 <= length && length <= len(raw) requires dp.Valid() requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R50) requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R50) -requires validPktMetaHdr(raw) -requires validPktMetaHdr(raw[:length]) +requires slayers.ValidPktMetaHdr(raw) +requires slayers.ValidPktMetaHdr(raw[:length]) ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R50) ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R50) -ensures validPktMetaHdr(raw) -ensures validPktMetaHdr(raw[:length]) +ensures slayers.ValidPktMetaHdr(raw) +ensures slayers.ValidPktMetaHdr(raw[:length]) ensures absPkt(dp, raw) == absPkt(dp, raw[:length]) decreases func absPktWidenLemma(dp io.DataPlaneSpec, raw []byte, length int) { - // declarations - var last1 io.IO_as - var last2 io.IO_as - var first1 io.IO_as - var first2 io.IO_as - var leftAsidSeq1 option[seq[io.IO_as]] - var leftAsidSeq2 option[seq[io.IO_as]] - var rightAsidSeq1 option[seq[io.IO_as]] - var rightAsidSeq2 option[seq[io.IO_as]] - var midAsidSeq1 option[seq[io.IO_as]] - var midAsidSeq2 option[seq[io.IO_as]] - var midAsid1 option[io.IO_as] - var midAsid2 option[io.IO_as] - var ret1 option[io.IO_pkt2] - var ret2 option[io.IO_pkt2] - var lm bool - - // abspkt step by step - _ := reveal validPktMetaHdr(raw) - _ := reveal validPktMetaHdr(raw[:length]) - hdr1 := unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) in binary.BigEndian.Uint32(raw[0:4]) - hdr2 := unfolding acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) in binary.BigEndian.Uint32(raw[:length][0:4]) - assert unfolding acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) in binary.BigEndian.Uint32(raw[:length][0:4]) == unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) in binary.BigEndian.Uint32(raw[:length][0:4]) + reveal slayers.ValidPktMetaHdr(raw) + reveal slayers.ValidPktMetaHdr(raw[:length]) + headerOffset1 := slayers.GetAddressOffset(raw) + headerOffset2 := slayers.GetAddressOffset(raw[:length]) + + assert forall k int :: {&raw[k]} headerOffset1 <= k && k < headerOffset1 + scion.MetaLen ==> + &raw[headerOffset1:headerOffset1+scion.MetaLen][k-headerOffset1] == &raw[k] + assert forall k int :: {&raw[:length][k]} headerOffset2 <= k && k < headerOffset2 + scion.MetaLen ==> + &raw[headerOffset2:headerOffset2+scion.MetaLen][k-headerOffset2] == &raw[k] + hdr1 := unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) in binary.BigEndian.Uint32(raw[headerOffset1:headerOffset1+scion.MetaLen]) + hdr2 := unfolding acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) in binary.BigEndian.Uint32(raw[:length][headerOffset2:headerOffset2+scion.MetaLen]) + assert unfolding acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) in binary.BigEndian.Uint32(raw[:length][headerOffset2:headerOffset2+scion.MetaLen]) == unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) in binary.BigEndian.Uint32(raw[headerOffset1:headerOffset1+scion.MetaLen]) assert hdr1 == hdr2 metaHdr1 := scion.DecodedFrom(hdr1) @@ -145,683 +154,166 @@ func absPktWidenLemma(dp io.DataPlaneSpec, raw []byte, length int) { seg3Len2 := int(metaHdr2.SegLen[2]) assert seg3Len1 == seg3Len2 - segLen1 := lengthOfCurrSeg(currHFIdx1, seg1Len1, seg2Len1, seg3Len1) - segLen2 := lengthOfCurrSeg(currHFIdx2, seg1Len2, seg2Len2, seg3Len2) + segLen1 := scion.LengthOfCurrSeg(currHFIdx1, seg1Len1, seg2Len1, seg3Len1) + segLen2 := scion.LengthOfCurrSeg(currHFIdx2, seg1Len2, seg2Len2, seg3Len2) assert segLen1 == segLen2 - prevSegLen1 := lengthOfPrevSeg(currHFIdx1, seg1Len1, seg2Len1, seg3Len1) - prevSegLen2 := lengthOfPrevSeg(currHFIdx2, seg1Len2, seg2Len2, seg3Len2) + prevSegLen1 := scion.LengthOfPrevSeg(currHFIdx1, seg1Len1, seg2Len1, seg3Len1) + prevSegLen2 := scion.LengthOfPrevSeg(currHFIdx2, seg1Len2, seg2Len2, seg3Len2) assert prevSegLen1 == prevSegLen2 - numINF1 := numInfoFields(seg1Len1, seg2Len1, seg3Len1) - numINF2 := numInfoFields(seg1Len2, seg2Len2, seg3Len2) + numINF1 := scion.NumInfoFields(seg1Len1, seg2Len1, seg3Len1) + numINF2 := scion.NumInfoFields(seg1Len2, seg2Len2, seg3Len2) assert numINF1 == numINF2 - offset1 := hopFieldOffset(numINF1, 0) - offset2 := hopFieldOffset(numINF2, 0) + offset1 := scion.HopFieldOffset(numINF1, 0, headerOffset1) + offset2 := scion.HopFieldOffset(numINF2, 0, headerOffset2) assert offset1 == offset2 - consDir1 := path.ConsDir(raw, currINFIdx1) - consDir2 := path.ConsDir(raw[:length], currINFIdx2) - consDirWidenLemma(raw, length, currINFIdx1) - assert consDir1 == consDir2 - - asidForCurrSegWidenLemma(dp, raw, numINF1, currHFIdx1, prevSegLen1+segLen1, prevSegLen1, consDir1, dp.Asid(), length) - currAsidSeq2 := asidForCurrSeg(dp, raw, numINF1, currHFIdx1, prevSegLen1+segLen1, prevSegLen1, consDir1, dp.Asid()) - currAsidSeq1 := asidForCurrSeg(dp, raw[:length], numINF2, currHFIdx2, prevSegLen2+segLen2, prevSegLen2, consDir2, dp.Asid()) - assert currAsidSeq1 == currAsidSeq2 - - if (currAsidSeq1 == none[seq[io.IO_as]]) { - ret := none[io.IO_pkt2] - assert ret == reveal absPkt(dp, raw) - assert ret == reveal absPkt(dp, raw[:length]) - } else { - - last1 = get(currAsidSeq1)[segLen1-1] - last2 = get(currAsidSeq2)[segLen1-1] - assert last1 == last2 - - first1 = get(currAsidSeq1)[0] - first2 = get(currAsidSeq2)[0] - assert first1 == first2 - - asidsForLeftSegWidenLemma(dp, raw, numINF1, currINFIdx1+1, seg1Len1, seg2Len1, seg3Len1, last1, length) - leftAsidSeq1 = asidsForLeftSeg(dp, raw, numINF1, currINFIdx1 + 1, seg1Len1, seg2Len1, seg3Len1, last1) - leftAsidSeq2 = asidsForLeftSeg(dp, raw[:length], numINF2, currINFIdx2 + 1, seg1Len2, seg2Len2, seg3Len2, last2) - assert leftAsidSeq1 == leftAsidSeq2 - - asidsForRightSegWidenLemma(dp, raw, numINF1, currINFIdx1-1, seg1Len1, seg2Len1, seg3Len1, first1, length) - rightAsidSeq1 = asidsForRightSeg(dp, raw, numINF1, currINFIdx1 - 1, seg1Len1, seg2Len1, seg3Len1, first1) - rightAsidSeq2 = asidsForRightSeg(dp, raw[:length], numINF2, currINFIdx2 - 1, seg1Len2, seg2Len2, seg3Len2, first2) - assert rightAsidSeq1 == rightAsidSeq2 - - if (leftAsidSeq1 == none[seq[io.IO_as]] || rightAsidSeq1 == none[seq[io.IO_as]]) { - ret := none[io.IO_pkt2] - assert ret == reveal absPkt(dp, raw) - assert ret == reveal absPkt(dp, raw[:length]) - } else { - assert leftAsidSeq2 != none[seq[io.IO_as]] && rightAsidSeq2 != none[seq[io.IO_as]] - - midAsid1 = ((currINFIdx1 == 0 && seg2Len1 > 0 && seg3Len1 > 0) ? some(get(leftAsidSeq1)[len(get(leftAsidSeq1))-1]) : (currINFIdx1 == 2 && seg2Len1 > 0) ? some(get(rightAsidSeq1)[0]) : none[io.IO_as]) - midAsid2 = ((currINFIdx2 == 0 && seg2Len2 > 0 && seg3Len2 > 0) ? some(get(leftAsidSeq2)[len(get(leftAsidSeq2))-1]) : (currINFIdx2 == 2 && seg2Len2 > 0) ? some(get(rightAsidSeq2)[0]) : none[io.IO_as]) - assert midAsid1 == midAsid2 - - asidsForMidSegWidenLemma(dp, raw, numINF1, currINFIdx1+2, seg1Len1, seg2Len1, seg3Len1, midAsid1, length) - midAsidSeq1 = asidsForMidSeg(dp, raw, numINF1, currINFIdx1 + 2, seg1Len1, seg2Len1, seg3Len1, midAsid1) - midAsidSeq2 = asidsForMidSeg(dp, raw[:length], numINF2, currINFIdx2 + 2, seg1Len2, seg2Len2, seg3Len2, midAsid2) - assert midAsidSeq1 == midAsidSeq2 - if (midAsidSeq1 == none[seq[io.IO_as]]) { - ret := none[io.IO_pkt2] - assert ret == reveal absPkt(dp, raw) - assert ret == reveal absPkt(dp, raw[:length]) - } else { - currSegWidenLemma(raw, offset1+prevSegLen1, currINFIdx1, currHFIdx1-prevSegLen1, get(currAsidSeq1), length) - leftSegWidenLemma(raw, currINFIdx1 + 1, seg1Len1, seg2Len1, seg3Len1, get(leftAsidSeq1), length) - midSegWidenLemma(raw, currINFIdx1 + 2, seg1Len1, seg2Len1, seg3Len1, get(midAsidSeq1), length) - rightSegWidenLemma(raw, currINFIdx1 - 1, seg1Len1, seg2Len1, seg3Len1, get(rightAsidSeq1), length) - ret1 = some(io.IO_pkt2(io.IO_Packet2{ - CurrSeg : currSeg(raw, offset1+prevSegLen1, currINFIdx1, currHFIdx1-prevSegLen1, get(currAsidSeq1)), - LeftSeg : leftSeg(raw, currINFIdx1 + 1, seg1Len1, seg2Len1 , seg3Len1, get(leftAsidSeq1)), - MidSeg : midSeg(raw, currINFIdx1 + 2, seg1Len1, seg2Len1 , seg3Len1, get(midAsidSeq1)), - RightSeg : rightSeg(raw, currINFIdx1 - 1, seg1Len1, seg2Len1 , seg3Len1, get(rightAsidSeq1)), - })) - ret2 = some(io.IO_pkt2(io.IO_Packet2{ - CurrSeg : currSeg(raw[:length], offset2+prevSegLen2, currINFIdx2, currHFIdx2-prevSegLen2, get(currAsidSeq2)), - LeftSeg : leftSeg(raw[:length], currINFIdx2 + 1, seg1Len2, seg2Len2 , seg3Len2, get(leftAsidSeq2)), - MidSeg : midSeg(raw[:length], currINFIdx2 + 2, seg1Len2, seg2Len2 , seg3Len2, get(midAsidSeq2)), - RightSeg : rightSeg(raw[:length], currINFIdx2 - 1, seg1Len2, seg2Len2 , seg3Len2, get(rightAsidSeq2)), - })) - reveal absPkt(dp, raw) - reveal absPkt(dp, raw[:length]) - assert ret1 == absPkt(dp, raw) - assert ret2 == absPkt(dp, raw[:length]) - assert ret1 == ret2 - } - } - } -} - -ghost -requires 0 <= length && length <= len(raw) -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) -requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R55) -requires 0 <= currINFIdx -requires path.InfoFieldOffset(currINFIdx) < length -ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) -ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R55) -ensures path.ConsDir(raw, currINFIdx) == path.ConsDir(raw[:length], currINFIdx) -decreases -func consDirWidenLemma(raw []byte, length int, currINFIdx int) { - unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) - unfold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R56) - assert &raw[path.InfoFieldOffset(currINFIdx)] == &raw[:length][path.InfoFieldOffset(currINFIdx)] - assert raw[path.InfoFieldOffset(currINFIdx)] == raw[:length][path.InfoFieldOffset(currINFIdx)] - assert (raw[path.InfoFieldOffset(currINFIdx)] & 0x1 == 0x1) == (raw[:length][path.InfoFieldOffset(currINFIdx)] & 0x1 == 0x1) - fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) - fold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R56) -} - -ghost -requires 0 <= length && length <= len(raw) -requires 1 <= numINF1 -requires 0 <= prevSegLen1 && prevSegLen1 <= currHFIdx1 -requires currHFIdx1 < segLen1 -requires hopFieldOffset(numINF1, segLen1) <= length -requires dp.Valid() -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R52) -requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R52) -ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R52) -ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R52) -ensures asidForCurrSeg(dp, raw, numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) == - asidForCurrSeg(dp, raw[:length], numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) -decreases -func asidForCurrSegWidenLemma( - dp io.DataPlaneSpec, - raw []byte, - numINF1 int, - currHFIdx1 int, - segLen1 int, - prevSegLen1 int, - consDir1 bool, - asid1 io.IO_as, - length int) { - - var ret1 option[seq[io.IO_as]] - var ret2 option[seq[io.IO_as]] - var left1 option[seq[io.IO_as]] - var left2 option[seq[io.IO_as]] - var right1 option[seq[io.IO_as]] - var right2 option[seq[io.IO_as]] - - - if (segLen1 == 0) { - assert segLen1 == 0 - ret1 = some(seq[io.IO_as]{}) - ret2 = some(seq[io.IO_as]{}) - assert ret1 == asidForCurrSeg(dp, raw, numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) - assert ret2 == asidForCurrSeg(dp, raw[:length], numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) - assert ret1 == ret2 - } else { - asidsBeforeWidenLemma(dp, raw, numINF1, numINF1, currHFIdx1, currHFIdx1, prevSegLen1, prevSegLen1, consDir1, consDir1, asid1, asid1, length) - left1 = asidsBefore(dp, raw, numINF1, currHFIdx1, prevSegLen1, consDir1, asid1) - left2 = asidsBefore(dp, raw[:length], numINF1, currHFIdx1, prevSegLen1, consDir1, asid1) - assert left1 == left2 - newP := (R52 + R53)/2 - asidsAfterWidenLemma(dp, raw, numINF1, currHFIdx1, segLen1, consDir1, asid1, length, newP) - right1 = asidsAfter(dp, raw, numINF1, currHFIdx1, segLen1, consDir1, asid1) - right2 = asidsAfter(dp, raw[:length], numINF1, currHFIdx1, segLen1, consDir1, asid1) - assert right1 == right2 - if (left1 == none[seq[io.IO_as]] || right1 == none[seq[io.IO_as]]) { - assert (left2 == none[seq[io.IO_as]] || right2 == none[seq[io.IO_as]]) - ret1 = none[seq[io.IO_as]] - ret2 = none[seq[io.IO_as]] - assert ret1 == reveal asidForCurrSeg(dp, raw, numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) - assert ret2 == reveal asidForCurrSeg(dp, raw[:length], numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) - assert ret1 == ret2 - } else { - assert (left2 != none[seq[io.IO_as]] && right2 != none[seq[io.IO_as]]) - ret1 = some(get(left1) ++ get(right1)[1:]) - ret2 = some(get(left2) ++ get(right2)[1:]) - assert ret1 == reveal asidForCurrSeg(dp, raw, numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) - assert ret2 == reveal asidForCurrSeg(dp, raw[:length], numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) - assert ret1 == ret2 - } - } - assert ret1 == reveal asidForCurrSeg(dp, raw, numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) - assert ret2 == reveal asidForCurrSeg(dp, raw[:length], numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) + currSegWidenLemma(raw, offset1+prevSegLen1, currINFIdx1, currHFIdx1-prevSegLen1, segLen1, headerOffset1, length) + currSeg1 := scion.CurrSeg(raw, offset1+prevSegLen1, currINFIdx1, currHFIdx1-prevSegLen1, segLen1, headerOffset1) + currSeg2 := scion.CurrSeg(raw[:length], offset2+prevSegLen2, currINFIdx2, currHFIdx2-prevSegLen2, segLen2, headerOffset2) + assert currSeg1 == currSeg2 + + leftSegWidenLemma(raw, currINFIdx1 + 1, seg1Len1, seg2Len1 , seg3Len1, headerOffset1, length) + leftSeg1 := scion.LeftSeg(raw, currINFIdx1 + 1, seg1Len1, seg2Len1 , seg3Len1, headerOffset1) + leftSeg2 := scion.LeftSeg(raw[:length], currINFIdx2 + 1, seg1Len2, seg2Len2 , seg3Len2, headerOffset2) + assert leftSeg1 == leftSeg2 + + midSegWidenLemma(raw, currINFIdx1 + 2, seg1Len1, seg2Len1 , seg3Len1, headerOffset1, length) + midSeg1 := scion.MidSeg(raw, currINFIdx1 + 2, seg1Len1, seg2Len1 , seg3Len1, headerOffset1) + midSeg2 := scion.MidSeg(raw[:length], currINFIdx2 + 2, seg1Len2, seg2Len2 , seg3Len2, headerOffset2) + assert midSeg1 == midSeg2 + + rightSegWidenLemma(raw, currINFIdx1 - 1, seg1Len1, seg2Len1 , seg3Len1, headerOffset1, length) + rightSeg1 := scion.RightSeg(raw, currINFIdx1 - 1, seg1Len1, seg2Len1 , seg3Len1, headerOffset1) + rightSeg2 := scion.RightSeg(raw[:length], currINFIdx2 - 1, seg1Len2, seg2Len2 , seg3Len2, headerOffset2) + assert rightSeg1 == rightSeg2 + + ret1 := io.IO_pkt2(io.IO_Packet2{ + CurrSeg : currSeg1, + LeftSeg : leftSeg1, + MidSeg : midSeg1, + RightSeg : rightSeg1, + }) + ret2 := io.IO_pkt2(io.IO_Packet2{ + CurrSeg : currSeg2, + LeftSeg : leftSeg2, + MidSeg : midSeg2, + RightSeg : rightSeg2, + }) + + reveal absPkt(dp, raw) + reveal absPkt(dp, raw[:length]) + assert ret1 == absPkt(dp, raw) + assert ret2 == absPkt(dp, raw[:length]) assert ret1 == ret2 } ghost -requires 1 <= numINF1 -requires 0 <= prevSegLen1 && prevSegLen1 <= currHFIdx1 +requires 0 <= headerOffset +requires path.InfoFieldOffset(currINFIdx, headerOffset) + path.InfoLen <= offset +requires 0 < segLen +requires offset + path.HopLen * segLen <= length requires length <= len(raw) -requires hopFieldOffset(numINF1, currHFIdx1) + path.HopLen <= length -requires dp.Valid() -requires consDir1 == consDir2 -requires prevSegLen1 == prevSegLen2 -requires currHFIdx1 == currHFIdx2 -requires numINF1 == numINF2 -requires asid1 == asid2 -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) -requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R53) -ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) -ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R53) -ensures forall i int :: { &raw[i] } 0 <= i && i < len(raw) ==> old(unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) in raw[i]) == (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) in raw[i]) -ensures forall i int :: { &raw[:length][i] } 0 <= i && i < len(raw[:length]) ==> old(unfolding acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R53) in raw[:length][i]) == (unfolding acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R53) in raw[:length][i]) -ensures asidsBefore(dp, raw, numINF1, currHFIdx1, prevSegLen1, consDir1, asid1) == - asidsBefore(dp, raw[:length], numINF2, currHFIdx2, prevSegLen2, consDir2, asid2) -decreases currHFIdx1 - prevSegLen1 -func asidsBeforeWidenLemma( - dp io.DataPlaneSpec, - raw []byte, - numINF1 int, - numINF2 int, - currHFIdx1 int, - currHFIdx2 int, - prevSegLen1 int, - prevSegLen2 int, - consDir1 bool, - consDir2 bool, - asid1 io.IO_as, - asid2 io.IO_as, - length int) { - - var ret1 option[seq[io.IO_as]] - var ret2 option[seq[io.IO_as]] - var nextAsid1 option[io.IO_as] - var nextAsid2 option[io.IO_as] - var nextAsidSeq1 option[seq[io.IO_as]] - var nextAsidSeq2 option[seq[io.IO_as]] - - if (currHFIdx1 == prevSegLen1) { - assert currHFIdx2 == prevSegLen2 - ret1 = some(seq[io.IO_as]{asid1}) - ret2 = some(seq[io.IO_as]{asid2}) - assert ret1 == ret2 - } else { - assert currHFIdx2 != prevSegLen2 - nextAsid1 = asidFromIfs(dp, raw, numINF1, currHFIdx1, !consDir1, asid1) - nextAsid2 = asidFromIfs(dp, raw[:length], numINF2, currHFIdx2, !consDir2, asid2) - asidFromIfsWidenLemma(dp, raw, numINF1, numINF2, currHFIdx1, currHFIdx2, !consDir1, !consDir2, asid1, asid2, length) - assert nextAsid1 == nextAsid2 - if (nextAsid1 == none[io.IO_as]) { - assert nextAsid2 == none[io.IO_as] - ret1 = none[seq[io.IO_as]] - ret2 = none[seq[io.IO_as]] - assert ret1 == ret2 - assert ret1 == asidsBefore(dp, raw, numINF1, currHFIdx1, prevSegLen1, consDir1, asid1) - assert ret2 == asidsBefore(dp, raw[:length], numINF2, currHFIdx2, prevSegLen2, consDir2, asid2) - } else { - assert nextAsid2 != none[io.IO_as] - asidsBeforeWidenLemma(dp, raw, numINF1, numINF2, currHFIdx1-1, currHFIdx2-1, prevSegLen1, prevSegLen2, consDir1, consDir2, get(nextAsid1), get(nextAsid2), length) - nextAsidSeq1 = asidsBefore(dp, raw, numINF1, currHFIdx1-1, prevSegLen1, consDir1, get(nextAsid1)) - nextAsidSeq2 = asidsBefore(dp, raw[:length], numINF2, currHFIdx2-1, prevSegLen2, consDir2, get(nextAsid2)) - assert nextAsidSeq1 == nextAsidSeq2 - if (nextAsidSeq1 == none[seq[io.IO_as]]) { - assert nextAsidSeq2 == none[seq[io.IO_as]] - ret1 = none[seq[io.IO_as]] - ret2 = none[seq[io.IO_as]] - assert ret1 == ret2 - assert ret1 == asidsBefore(dp, raw, numINF1, currHFIdx1, prevSegLen1, consDir1, asid1) - assert ret2 == asidsBefore(dp, raw[:length], numINF2, currHFIdx2, prevSegLen2, consDir2, asid2) - } else { - ret1 = some(get(nextAsidSeq1) ++ seq[io.IO_as]{asid1}) - ret2 = some(get(nextAsidSeq2) ++ seq[io.IO_as]{asid2}) - assert ret1 == ret2 - assert ret1 == asidsBefore(dp, raw, numINF1, currHFIdx1, prevSegLen1, consDir1, asid1) - assert ret2 == asidsBefore(dp, raw[:length], numINF2, currHFIdx2, prevSegLen2, consDir2, asid2) - } - } - } -} - -ghost -requires 1 <= numINF1 -requires 0 <= currHFIdx1 -requires numINF1 == numINF2 -requires currHFIdx1 == currHFIdx2 -requires consDir1 == consDir2 -requires asid1 == asid2 -requires 0 <= length && length <= len(raw) -requires hopFieldOffset(numINF1, currHFIdx1) + path.HopLen <= length -requires dp.Valid() -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R54) -requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R54) -ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R54) -ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R54) -ensures asidFromIfs(dp, raw, numINF1, currHFIdx1, consDir1, asid1) == - asidFromIfs(dp, raw[:length], numINF2, currHFIdx2, consDir2, asid2) -decreases -func asidFromIfsWidenLemma( - dp io.DataPlaneSpec, - raw []byte, - numINF1 int, - numINF2 int, - currHFIdx1 int, - currHFIdx2 int, - consDir1 bool, - consDir2 bool, - asid1 io.IO_as, - asid2 io.IO_as, - length int) { - var ret1 option[io.IO_as] - var ret2 option[io.IO_as] - - idx1 := hopFieldOffset(numINF1, currHFIdx1) - idx2 := hopFieldOffset(numINF2, currHFIdx1) - assert idx1 == idx2 - unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) - unfold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R55) - assert forall i int :: { &raw[idx1+2+i] } { &raw[idx1+2:idx1+4][i] } 0 <= i && i < 2 ==> - &raw[idx1+2+i] == &raw[idx1+2:idx1+4][i] - assert forall i int :: { &raw[:length][idx2+2+i] } { &raw[:length][idx2+2:idx2+4][i] } 0 <= i && i < 2 ==> - &raw[:length][idx2+2+i] == &raw[:length][idx2+2:idx2+4][i] - assert forall i int :: { &raw[idx1+4+i] } { &raw[idx1+4:idx1+6][i] } 0 <= i && i < 2 ==> - &raw[idx1+4+i] == &raw[idx1+4:idx1+6][i] - assert forall i int :: { &raw[:length][idx2+4+i] } { &raw[idx2+4:idx2+6][i] } 0 <= i && i < 2 ==> - &raw[:length][idx2+4+i] == &raw[:length][idx2+4:idx2+6][i] - ifs1 := consDir1 ? binary.BigEndian.Uint16(raw[idx1+4:idx1+6]) : binary.BigEndian.Uint16(raw[idx1+2:idx1+4]) - ifs2 := consDir2 ? binary.BigEndian.Uint16(raw[:length][idx2+4:idx2+6]) : binary.BigEndian.Uint16(raw[:length][idx2+2:idx2+4]) - assert ifs1 == ifs2 - asIfPair1 := io.AsIfsPair{asid1, io.IO_ifs(ifs1)} - asIfPair2 := io.AsIfsPair{asid2, io.IO_ifs(ifs2)} - assert asIfPair1 == asIfPair2 - if (asIfPair1 in domain(dp.GetLinks())) { - assert asIfPair2 in domain(dp.GetLinks()) - ret1 = some(dp.Lookup(asIfPair1).asid) - ret2 = some(dp.Lookup(asIfPair2).asid) - assert ret1 == ret2 - assert ret1 == asidFromIfs(dp, raw, numINF1, currHFIdx1, consDir1, asid1) - assert ret2 == asidFromIfs(dp, raw[:length], numINF2, currHFIdx2, consDir2, asid2) - } else { - assert !(asIfPair2 in domain(dp.GetLinks())) - ret1 = none[io.IO_as] - ret2 = none[io.IO_as] - assert ret1 == ret2 - assert ret1 == asidFromIfs(dp, raw, numINF1, currHFIdx1, consDir1, asid1) - assert ret2 == asidFromIfs(dp, raw[:length], numINF2, currHFIdx2, consDir2, asid2) - } - fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) - fold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R55) - assert ret1 == ret2 - assert ret1 == asidFromIfs(dp, raw, numINF1, currHFIdx1, consDir1, asid1) - assert ret2 == asidFromIfs(dp, raw[:length], numINF2, currHFIdx2, consDir2, asid2) -} - -// --- The file has been simplified past this point - -ghost -requires R53 < p -requires 1 <= numINF -requires 0 <= currHFIdx && currHFIdx < segLen -requires length <= len(raw) -requires hopFieldOffset(numINF, segLen) <= length -requires dp.Valid() -preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), p) -preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), p) -ensures asidsAfter(dp, raw, numINF, currHFIdx, segLen, consDir, asid) == - asidsAfter(dp, raw[:length], numINF, currHFIdx, segLen, consDir, asid) -decreases segLen - currHFIdx + 1 -func asidsAfterWidenLemma(dp io.DataPlaneSpec, raw []byte, numINF int, currHFIdx int, segLen int, consDir bool, asid io.IO_as, length int, p perm) { - if currHFIdx != segLen - 1 { - nextAsid1 := asidFromIfs(dp, raw, numINF, currHFIdx, consDir, asid) - nextAsid2 := asidFromIfs(dp, raw[:length], numINF, currHFIdx, consDir, asid) - asidFromIfsWidenLemma(dp, raw, numINF, numINF, currHFIdx, currHFIdx, consDir, consDir, asid, asid, length) - assert nextAsid1 == nextAsid2 - if nextAsid1 == none[io.IO_as] { - ret := none[seq[io.IO_as]] - assert ret == asidsAfter(dp, raw, numINF, currHFIdx, segLen, consDir, asid) - assert ret == asidsAfter(dp, raw[:length], numINF, currHFIdx, segLen, consDir, asid) - } else { - newP := (p + R53)/2 - asidsAfterWidenLemma(dp, raw, numINF, currHFIdx+1, segLen, consDir, get(nextAsid1), length, newP) - nextAsidSeq1 := asidsAfter(dp, raw, numINF, currHFIdx+1, segLen, consDir, get(nextAsid1)) - nextAsidSeq2 := asidsAfter(dp, raw[:length], numINF, currHFIdx+1, segLen, consDir, get(nextAsid2)) - assert nextAsidSeq1 == nextAsidSeq2 - if nextAsidSeq1 == none[seq[io.IO_as]] { - ret := none[seq[io.IO_as]] - assert ret == asidsAfter(dp, raw, numINF, currHFIdx, segLen, consDir, asid) - assert ret == asidsAfter(dp, raw[:length], numINF, currHFIdx, segLen, consDir, asid) - } else { - ret := some(seq[io.IO_as]{asid} ++ get(nextAsidSeq1)) - assert ret == asidsAfter(dp, raw, numINF, currHFIdx, segLen, consDir, asid) - assert ret == asidsAfter(dp, raw[:length], numINF, currHFIdx, segLen, consDir, asid) - } - } - } -} - -ghost -requires dp.Valid() -requires 1 <= numINF -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len -requires 0 <= length && length <= len(raw) -requires hopFieldOffset(numINF, seg1Len + seg2Len + seg3Len) <= length -requires currINFIdx <= numINF + 1 -requires 1 <= currINFIdx && currINFIdx < 4 -preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) -preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) -ensures asidsForLeftSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) == - asidsForLeftSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) -decreases -func asidsForLeftSegWidenLemma(dp io.DataPlaneSpec, raw []byte, numINF int, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, asid io.IO_as, length int) { - consDir1 := path.ConsDir(raw, currINFIdx) - consDir2 := path.ConsDir(raw[:length], currINFIdx) - consDirWidenLemma(raw, length, currINFIdx) - assert consDir1 == consDir2 - - if currINFIdx == 1 && seg2Len > 0 { - asidForCurrSegWidenLemma(dp, raw, numINF, seg1Len, seg1Len+seg2Len, seg1Len, consDir1, asid, length) - ret1 := asidForCurrSeg(dp, raw, numINF, seg1Len, seg1Len+seg2Len, seg1Len, consDir1, asid) - ret2 := asidForCurrSeg(dp, raw[:length], numINF, seg1Len, seg1Len+seg2Len, seg1Len, consDir2, asid) - assert ret1 == reveal asidsForLeftSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) - assert ret2 == reveal asidsForLeftSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) - assert ret1 == ret2 - } else if currINFIdx == 2 && seg2Len > 0 && seg3Len > 0 { - asidForCurrSegWidenLemma(dp, raw, numINF, seg1Len+seg2Len, seg1Len+seg2Len+seg3Len, seg1Len+seg2Len, consDir1, asid, length) - ret1 := asidForCurrSeg(dp, raw, numINF, seg1Len+seg2Len, seg1Len+seg2Len+seg3Len, seg1Len+seg2Len, consDir1, asid) - ret2 := asidForCurrSeg(dp, raw[:length], numINF, seg1Len+seg2Len, seg1Len+seg2Len+seg3Len, seg1Len+seg2Len, consDir2, asid) - assert ret1 == reveal asidsForLeftSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) - assert ret2 == reveal asidsForLeftSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) - assert ret1 == ret2 - } else { - ret := some(seq[io.IO_as]{}) - assert ret == reveal asidsForLeftSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) - assert ret == reveal asidsForLeftSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) - } -} - -ghost -requires dp.Valid() -requires 1 <= numINF -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len -requires 0 <= length && length <= len(raw) -requires hopFieldOffset(numINF, seg1Len + seg2Len + seg3Len) <= length -requires currINFIdx <= numINF + 1 -requires -1 <= currINFIdx && currINFIdx < 2 -preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) -preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) -ensures asidsForRightSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) == - asidsForRightSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) -decreases -func asidsForRightSegWidenLemma(dp io.DataPlaneSpec, raw []byte, numINF int, currINFIdx int, seg1Len int, seg2Len int,seg3Len int, asid io.IO_as, length int) { - if currINFIdx == 1 && seg2Len > 0 { - consDir1 := path.ConsDir(raw, currINFIdx) - consDir2 := path.ConsDir(raw[:length], currINFIdx) - consDirWidenLemma(raw, length, currINFIdx) - assert consDir1 == consDir2 - asidForCurrSegWidenLemma(dp, raw, numINF, seg1Len+seg2Len-1, seg1Len+seg2Len, seg1Len, consDir1, asid, length) - ret1 := asidForCurrSeg(dp, raw, numINF, seg1Len+seg2Len-1, seg1Len+seg2Len, seg1Len, consDir1, asid) - ret2 := asidForCurrSeg(dp, raw[:length], numINF, seg1Len+seg2Len-1, seg1Len+seg2Len, seg1Len, consDir2, asid) - assert ret1 == reveal asidsForRightSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) - assert ret2 == reveal asidsForRightSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) - assert ret1 == ret2 - } else if currINFIdx == 0 { - consDir1 := path.ConsDir(raw, currINFIdx) - consDir2 := path.ConsDir(raw[:length], currINFIdx) - consDirWidenLemma(raw, length, currINFIdx) - assert consDir1 == consDir2 - asidForCurrSegWidenLemma(dp, raw, numINF, seg1Len-1, seg1Len, 0, consDir1, asid, length) - ret1 := asidForCurrSeg(dp, raw, numINF, seg1Len-1, seg1Len, 0, consDir1, asid) - ret2 := asidForCurrSeg(dp, raw[:length], numINF, seg1Len-1, seg1Len, 0, consDir2, asid) - assert ret1 == reveal asidsForRightSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) - assert ret2 == reveal asidsForRightSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) - assert ret1 == ret2 - } else { - ret := some(seq[io.IO_as]{}) - assert ret == reveal asidsForRightSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) - assert ret == reveal asidsForRightSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) - } -} - -ghost -requires dp.Valid() -requires 1 <= numINF -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len -requires 0 <= length && length <= len(raw) -requires hopFieldOffset(numINF, seg1Len + seg2Len + seg3Len) <= length -requires currINFIdx <= numINF + 1 -requires 2 <= currINFIdx && currINFIdx < 5 -requires (currINFIdx == 4 && seg2Len > 0) ==> asid != none[io.IO_as] -requires (currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ==> asid != none[io.IO_as] -preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) -preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) -ensures asidsForMidSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) == - asidsForMidSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) -decreases -func asidsForMidSegWidenLemma(dp io.DataPlaneSpec, raw []byte, numINF int, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, asid option[io.IO_as], length int) { - if currINFIdx == 4 && seg2Len > 0 { - consDir1 := path.ConsDir(raw, 1) - consDir2 := path.ConsDir(raw[:length], 1) - consDirWidenLemma(raw, length, 1) - assert consDir1 == consDir2 - asidForCurrSegWidenLemma(dp, raw, numINF, seg1Len-1, seg1Len, 0, consDir1, get(asid), length) - ret1 := asidForCurrSeg(dp, raw, numINF, seg1Len-1, seg1Len, 0, consDir1, get(asid)) - ret2 := asidForCurrSeg(dp, raw[:length], numINF, seg1Len-1, seg1Len, 0, consDir2, get(asid)) - assert ret1 == reveal asidsForMidSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) - assert ret2 == reveal asidsForMidSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) - assert ret1 == ret2 - } else if currINFIdx == 2 && seg2Len > 0 && seg3Len > 0 { - consDir1 := path.ConsDir(raw, 2) - consDir2 := path.ConsDir(raw[:length], 2) - consDirWidenLemma(raw, length, 2) - assert consDir1 == consDir2 - asidForCurrSegWidenLemma(dp, raw, numINF, seg1Len+seg2Len, seg1Len+seg2Len+seg3Len, seg1Len+seg2Len, consDir1, get(asid), length) - ret1 := asidForCurrSeg(dp, raw, numINF, seg1Len + seg2Len, seg1Len + seg2Len + seg3Len, seg1Len + seg2Len, consDir1, get(asid)) - ret2 := asidForCurrSeg(dp, raw[:length], numINF, seg1Len + seg2Len, seg1Len + seg2Len + seg3Len, seg1Len + seg2Len, consDir2, get(asid)) - assert ret1 == reveal asidsForMidSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) - assert ret2 == reveal asidsForMidSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) - assert ret1 == ret2 - } else { - ret := some(seq[io.IO_as]{}) - assert ret == reveal asidsForMidSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) - assert ret == reveal asidsForMidSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) - } -} - -ghost -requires path.InfoFieldOffset(currINFIdx) + path.InfoLen <= offset -requires 0 < len(asid) -requires 0 <= length && length <= len(raw) -requires offset + path.HopLen * len(asid) <= length -requires 0 <= currHFIdx && currHFIdx <= len(asid) +requires 0 <= currHFIdx && currHFIdx <= segLen requires 0 <= currINFIdx && currINFIdx < 3 preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) -ensures currSeg(raw, offset, currINFIdx, currHFIdx, asid) == - currSeg(raw[:length], offset, currINFIdx, currHFIdx, asid) +ensures scion.CurrSeg(raw, offset, currINFIdx, currHFIdx, segLen, headerOffset) == + scion.CurrSeg(raw[:length], offset, currINFIdx, currHFIdx, segLen, headerOffset) decreases -func currSegWidenLemma(raw []byte, offset int, currINFIdx int, currHFIdx int, asid seq[io.IO_as], length int) { +func currSegWidenLemma(raw []byte, offset int, currINFIdx int, currHFIdx int, segLen int, headerOffset int, length int) { unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) unfold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R53) - ainfo1 := path.Timestamp(raw, currINFIdx) - ainfo2 := path.Timestamp(raw[:length], currINFIdx) + ainfo1 := path.Timestamp(raw, currINFIdx, headerOffset) + ainfo2 := path.Timestamp(raw[:length], currINFIdx, headerOffset) assert ainfo1 == ainfo2 - consDir1 := path.ConsDir(raw, currINFIdx) - consDir2 := path.ConsDir(raw[:length], currINFIdx) + uinfo1 := path.AbsUinfo(raw, currINFIdx, headerOffset) + uinfo2 := path.AbsUinfo(raw[:length], currINFIdx, headerOffset) + assert uinfo1 == uinfo2 + + consDir1 := path.ConsDir(raw, currINFIdx, headerOffset) + consDir2 := path.ConsDir(raw[:length], currINFIdx, headerOffset) assert consDir1 == consDir2 - peer1 := path.Peer(raw, currINFIdx) - peer2 := path.Peer(raw[:length], currINFIdx) + peer1 := path.Peer(raw, currINFIdx, headerOffset) + peer2 := path.Peer(raw[:length], currINFIdx, headerOffset) assert peer1 == peer2 - segmentWidenLemma(raw, offset, currHFIdx, asid, ainfo1, consDir1, peer1, length) - ret1 := segment(raw, offset, currHFIdx, asid, ainfo1, consDir1, peer1) - ret2 := segment(raw[:length], offset, currHFIdx, asid, ainfo2, consDir2, peer2) - assert ret1 == reveal currSeg(raw, offset, currINFIdx, currHFIdx, asid) - assert ret2 == reveal currSeg(raw[:length], offset, currINFIdx, currHFIdx, asid) + segmentWidenLemma(raw, offset, currHFIdx, ainfo1, uinfo1, consDir1, peer1, segLen, length) + ret1 := scion.segment(raw, offset, currHFIdx, ainfo1, uinfo1, consDir1, peer1, segLen) + ret2 := scion.segment(raw[:length], offset, currHFIdx, ainfo2, uinfo2, consDir2, peer2, segLen) + assert ret1 == reveal scion.CurrSeg(raw, offset, currINFIdx, currHFIdx, segLen, headerOffset) + assert ret2 == reveal scion.CurrSeg(raw[:length], offset, currINFIdx, currHFIdx, segLen, headerOffset) assert ret1 == ret2 - fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) fold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R53) } ghost requires 0 <= offset -requires 0 < len(asid) -requires 0 <= length && length <= len(raw) -requires offset + path.HopLen * len(asid) <= length -requires 0 <= currHFIdx && currHFIdx <= len(asid) +requires 0 < segLen +requires 0 <= currHFIdx && currHFIdx <= segLen +requires length <= len(raw) +requires offset + path.HopLen * segLen <= length requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R52) requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R52) ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R52) ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R52) -ensures segment(raw, offset, currHFIdx, asid, ainfo, consDir, peer) == segment(raw[:length], offset, currHFIdx, asid, ainfo, consDir, peer) +ensures scion.segment(raw, offset, currHFIdx, ainfo, uinfo, consDir, peer, segLen) == + scion.segment(raw[:length], offset, currHFIdx, ainfo, uinfo, consDir, peer, segLen) decreases -func segmentWidenLemma(raw []byte, offset int, currHFIdx int, asid seq[io.IO_as], ainfo io.IO_ainfo, consDir bool, peer bool, length int) { +func segmentWidenLemma(raw []byte, offset int, currHFIdx int, ainfo io.IO_ainfo, uinfo set[io.IO_msgterm], consDir bool, peer bool, segLen int, length int) { newP := (R52 + R53)/2 assert R53 < newP && newP < R52 - hopFieldsConsDirWidenLemma(raw, offset, 0, set[io.IO_msgterm]{}, asid, ainfo, length, newP) - hopFieldsNotConsDirWidenLemma(raw, offset, len(asid)-1, set[io.IO_msgterm]{}, asid, ainfo, length, newP) - hopfields1 := consDir ? hopFieldsConsDir(raw, offset, 0, set[io.IO_msgterm]{}, asid, ainfo) : hopFieldsNotConsDir(raw, offset, len(asid) - 1, set[io.IO_msgterm]{}, asid, ainfo) - hopfields2 := consDir ? hopFieldsConsDir(raw[:length], offset, 0, set[io.IO_msgterm]{}, asid, ainfo) : hopFieldsNotConsDir(raw[:length], offset, len(asid) - 1, set[io.IO_msgterm]{}, asid, ainfo) + hopFieldsWidenLemma(raw, offset, 0, segLen, length, newP) + hopfields1 := scion.hopFields(raw, offset, 0, segLen) + hopfields2 := scion.hopFields(raw[:length], offset, 0, segLen) assert hopfields1 == hopfields2 - uinfo := uInfo(hopfields1, currHFIdx, consDir) - ret1 := io.IO_seg2(io.IO_seg3_{ AInfo :ainfo, UInfo : uinfo, ConsDir : consDir, Peer : peer, - Past : segPast(hopfields1, currHFIdx - 1), - Future : segFuture(hopfields1, currHFIdx), - History : segHistory(hopfields1, currHFIdx - 1), + Past : scion.segPast(hopfields1, currHFIdx - 1), + Future : scion.segFuture(hopfields1, currHFIdx), + History : scion.segHistory(hopfields1, currHFIdx - 1), }) ret2 := io.IO_seg2(io.IO_seg3_{ AInfo :ainfo, UInfo : uinfo, ConsDir : consDir, Peer : peer, - Past : segPast(hopfields2, currHFIdx - 1), - Future : segFuture(hopfields2, currHFIdx), - History : segHistory(hopfields2, currHFIdx - 1), + Past : scion.segPast(hopfields2, currHFIdx - 1), + Future : scion.segFuture(hopfields2, currHFIdx), + History : scion.segHistory(hopfields2, currHFIdx - 1), }) - assert ret1 == segment(raw, offset, currHFIdx, asid, ainfo, consDir, peer) - assert ret2 == segment(raw[:length], offset, currHFIdx, asid, ainfo, consDir, peer) + assert ret1 == scion.segment(raw, offset, currHFIdx, ainfo, uinfo, consDir, peer, segLen) + assert ret2 == scion.segment(raw[:length], offset, currHFIdx, ainfo, uinfo, consDir, peer, segLen) assert ret1 == ret2 } ghost -requires R53 < p -requires 0 <= offset -requires 0 <= currHFIdx && currHFIdx <= len(asid) -requires 0 <= length && length <= len(raw) -requires offset + path.HopLen * len(asid) <= length -preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), p) -preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), p) -ensures hopFieldsConsDir(raw, offset, currHFIdx, beta, asid, ainfo) == - hopFieldsConsDir(raw[:length], offset, currHFIdx, beta, asid, ainfo) -decreases len(asid) - currHFIdx -func hopFieldsConsDirWidenLemma(raw []byte, offset int, currHFIdx int, beta set[io.IO_msgterm], asid seq[io.IO_as], ainfo io.IO_ainfo, length int, p perm) { - if currHFIdx == len(asid) { - ret := seq[io.IO_HF]{} - assert ret == hopFieldsConsDir(raw, offset, currHFIdx, beta, asid, ainfo) - assert ret == hopFieldsConsDir(raw[:length], offset, currHFIdx, beta, asid, ainfo) - } else { - hopFieldWidenLemma(raw, offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo, length) - hf1 := hopField(raw, offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo) - hf2 := hopField(raw[:length], offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo) - assert hf1 == hf2 - - newP := (p + R53)/2 - assert R53 < newP && newP < p - hopFieldsConsDirWidenLemma(raw, offset, currHFIdx + 1, (beta union set[io.IO_msgterm]{hf1.HVF}), asid, ainfo, length, newP) - ret1 := seq[io.IO_HF]{hf1} ++ hopFieldsConsDir(raw, offset, currHFIdx + 1, (beta union set[io.IO_msgterm]{hf1.HVF}), asid, ainfo) - ret2 := seq[io.IO_HF]{hf2} ++ hopFieldsConsDir(raw[:length], offset, currHFIdx + 1, (beta union set[io.IO_msgterm]{hf2.HVF}), asid, ainfo) - assert ret1 == hopFieldsConsDir(raw, offset, currHFIdx, beta, asid, ainfo) - assert ret2 == hopFieldsConsDir(raw[:length], offset, currHFIdx, beta, asid, ainfo) - assert ret1 == ret2 - } -} - -ghost -requires 0 <= length && length <= len(raw) -requires idx + path.HopLen <= length -requires 0 <= idx +requires 0 <= middle +requires middle + path.HopLen <= length +requires length <= len(raw) preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R54) preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R54) -ensures hopField(raw, idx, beta, asid, ainfo) == hopField(raw[:length], idx, beta, asid, ainfo) +ensures path.BytesToIO_HF(raw, 0, middle, len(raw)) == + path.BytesToIO_HF(raw[:length], 0, middle, length) decreases -func hopFieldWidenLemma(raw []byte, idx int, beta set[io.IO_msgterm], asid io.IO_as, ainfo io.IO_ainfo, length int) { +func BytesToIO_HFWidenLemma(raw []byte, middle int, length int) { unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) unfold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R55) - - assert forall i int :: { &raw[idx+2+i] } { &raw[idx+2:idx+4][i] } 0 <= i && i < 2 ==> &raw[idx+2+i] == &raw[idx+2:idx+4][i] - assert forall i int :: { &raw[idx+4+i] } { &raw[idx+4:idx+6][i] } 0 <= i && i < 2 ==> &raw[idx+4+i] == &raw[idx+4:idx+6][i] - inif21 := binary.BigEndian.Uint16(raw[idx+2:idx+4]) - inif22 := binary.BigEndian.Uint16(raw[:length][idx+2:idx+4]) - assert inif21 == inif22 - - egif2 := binary.BigEndian.Uint16(raw[idx+4:idx+6]) - op_inif2 := inif21 == 0 ? none[io.IO_ifs] : some(io.IO_ifs(inif21)) - op_egif2 := egif2 == 0 ? none[io.IO_ifs] : some(io.IO_ifs(egif2)) - ts := io.IO_msgterm(io.MsgTerm_Num{ainfo}) - l := io.IO_msgterm(io.MsgTerm_L{seq[io.IO_msgterm]{ts, io.if2term(op_inif2), io.if2term(op_egif2), io.IO_msgterm(io.MsgTerm_FS{beta})}}) - hvf := io.mac(io.macKey(io.asidToKey(asid)), l) - - ret1 := io.IO_HF(io.IO_HF_{ - InIF2 : op_inif2, - EgIF2 : op_egif2, - HVF : hvf, - }) - ret2 := io.IO_HF(io.IO_HF_{ - InIF2 : op_inif2, - EgIF2 : op_egif2, - HVF : hvf, - }) - assert ret1 == hopField(raw, idx, beta, asid, ainfo) - assert ret2 == hopField(raw[:length], idx, beta, asid, ainfo) - assert ret1 == ret2 + hfBytes1 := path.BytesToIO_HF(raw, 0, middle, len(raw)) + hfBytes2 := path.BytesToIO_HF(raw[:length], 0, middle, length) + assert hfBytes1.EgIF2 == hfBytes2.EgIF2 + assert hfBytes1.InIF2 == hfBytes2.InIF2 + assert hfBytes1.HVF == hfBytes2.HVF fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) fold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R55) } @@ -829,144 +321,141 @@ func hopFieldWidenLemma(raw []byte, idx int, beta set[io.IO_msgterm], asid io.IO ghost requires R53 < p requires 0 <= offset -requires -1 <= currHFIdx && currHFIdx < len(asid) -requires 0 <= length && length <= len(raw) -requires offset + path.HopLen * currHFIdx + path.HopLen <= length +requires 0 <= currHFIdx && currHFIdx <= segLen +requires offset + path.HopLen * segLen <= length +requires length <= len(raw) preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), p) preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), p) -ensures hopFieldsNotConsDir(raw, offset, currHFIdx, beta, asid, ainfo) == - hopFieldsNotConsDir(raw[:length], offset, currHFIdx, beta, asid, ainfo) -decreases currHFIdx + 1 -func hopFieldsNotConsDirWidenLemma(raw []byte, offset int, currHFIdx int, beta set[io.IO_msgterm], asid seq[io.IO_as], ainfo io.IO_ainfo, length int, p perm) { - if currHFIdx == -1 { +ensures scion.hopFields(raw, offset, currHFIdx, segLen) == + scion.hopFields(raw[:length], offset, currHFIdx, segLen) +decreases segLen - currHFIdx +func hopFieldsWidenLemma(raw []byte, offset int, currHFIdx int, segLen int, length int, p perm) { + if currHFIdx == segLen { ret := seq[io.IO_HF]{} - assert ret == hopFieldsNotConsDir(raw, offset, currHFIdx, beta, asid, ainfo) - assert ret == hopFieldsNotConsDir(raw[:length], offset, currHFIdx, beta, asid, ainfo) + assert ret == scion.hopFields(raw, offset, currHFIdx, segLen) + assert ret == scion.hopFields(raw[:length], offset, currHFIdx, segLen) } else { - hopFieldWidenLemma(raw, offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo, length) - hf1 := hopField(raw, offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo) - hf2 := hopField(raw[:length], offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo) + BytesToIO_HFWidenLemma(raw, offset + path.HopLen * currHFIdx, length) + hf1 := path.BytesToIO_HF(raw, 0, offset + path.HopLen * currHFIdx, len(raw)) + hf2 := path.BytesToIO_HF(raw[:length], 0, offset + path.HopLen * currHFIdx, length) assert hf1 == hf2 newP := (p + R53)/2 assert R53 < newP && newP < p - hopFieldsNotConsDirWidenLemma(raw, offset, currHFIdx - 1, (beta union set[io.IO_msgterm]{hf1.HVF}), asid, ainfo, length, newP) - ret1 := hopFieldsNotConsDir(raw, offset, currHFIdx - 1, (beta union set[io.IO_msgterm]{hf1.HVF}), asid, ainfo) ++ seq[io.IO_HF]{hf1} - ret2 := hopFieldsNotConsDir(raw[:length], offset, currHFIdx - 1, (beta union set[io.IO_msgterm]{hf2.HVF}), asid, ainfo) ++ seq[io.IO_HF]{hf2} - assert ret1 == hopFieldsNotConsDir(raw, offset, currHFIdx, beta, asid, ainfo) - assert ret2 == hopFieldsNotConsDir(raw[:length], offset, currHFIdx, beta, asid, ainfo) + hopFieldsWidenLemma(raw, offset, currHFIdx + 1, segLen, length, newP) + ret1 := seq[io.IO_HF]{hf1} ++ scion.hopFields(raw, offset, currHFIdx + 1, segLen) + ret2 := seq[io.IO_HF]{hf2} ++ scion.hopFields(raw[:length], offset, currHFIdx + 1, segLen) + assert ret1 == scion.hopFields(raw, offset, currHFIdx, segLen) + assert ret2 == scion.hopFields(raw[:length], offset, currHFIdx, segLen) assert ret1 == ret2 } } ghost +requires 0 <= headerOffset requires 0 < seg1Len requires 0 <= seg2Len requires 0 <= seg3Len requires 0 <= length && length <= len(raw) -requires pktLen(seg1Len, seg2Len, seg3Len) <= length -requires 1 <= currINFIdx && currINFIdx < 4 -requires (currINFIdx == 1 && seg2Len > 0) ==> len(asid) == seg2Len -requires (currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ==> len(asid) == seg3Len +requires scion.pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= length +requires 1 <= currINFIdx && currINFIdx < 4 preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) -ensures leftSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) == - leftSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) +ensures scion.LeftSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) == + scion.LeftSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) decreases -func leftSegWidenLemma(raw []byte, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, asid seq[io.IO_as], length int) { - offset := hopFieldOffset(numInfoFields(seg1Len, seg2Len, seg3Len), 0) +func leftSegWidenLemma(raw []byte, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, headerOffset int, length int) { + offset := scion.HopFieldOffset(scion.NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) if currINFIdx == 1 && seg2Len > 0 { - currSegWidenLemma(raw, offset + path.HopLen * seg1Len, currINFIdx, 0, asid, length) - ret1 := some(currSeg(raw, offset + path.HopLen * seg1Len, currINFIdx, 0, asid)) - ret2 := some(currSeg(raw[:length], offset + path.HopLen * seg1Len, currINFIdx, 0, asid)) - assert ret1 == reveal leftSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) - assert ret2 == reveal leftSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + currSegWidenLemma(raw, offset + path.HopLen * seg1Len, currINFIdx, 0, seg2Len, headerOffset, length) + ret1 := some(scion.CurrSeg(raw, offset + path.HopLen * seg1Len, currINFIdx, 0, seg2Len, headerOffset)) + ret2 := some(scion.CurrSeg(raw[:length], offset + path.HopLen * seg1Len, currINFIdx, 0, seg2Len, headerOffset)) + assert ret1 == reveal scion.LeftSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) + assert ret2 == reveal scion.LeftSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) assert ret1 == ret2 } else if currINFIdx == 2 && seg2Len > 0 && seg3Len > 0 { - currSegWidenLemma(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid, length) - ret1 := some(currSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid)) - ret2 := some(currSeg(raw[:length], offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid)) - assert ret1 == reveal leftSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) - assert ret2 == reveal leftSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + currSegWidenLemma(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, seg3Len, headerOffset, length) + ret1 := some(scion.CurrSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, seg3Len, headerOffset)) + ret2 := some(scion.CurrSeg(raw[:length], offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, seg3Len, headerOffset)) + assert ret1 == reveal scion.LeftSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) + assert ret2 == reveal scion.LeftSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) assert ret1 == ret2 } else { ret := none[io.IO_seg3] - assert ret == reveal leftSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) - assert ret == reveal leftSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret == reveal scion.LeftSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) + assert ret == reveal scion.LeftSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) } } ghost +requires 0 <= headerOffset requires 0 < seg1Len requires 0 <= seg2Len requires 0 <= seg3Len requires 0 <= length && length <= len(raw) -requires pktLen(seg1Len, seg2Len, seg3Len) <= length +requires scion.pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= length requires -1 <= currINFIdx && currINFIdx < 2 -requires (currINFIdx == 1 && seg2Len > 0 && seg3Len > 0) ==> len(asid) == seg2Len -requires (currINFIdx == 0 && seg2Len > 0) ==> len(asid) == seg1Len preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) -ensures rightSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) == - rightSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) +ensures scion.RightSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) == + scion.RightSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) decreases -func rightSegWidenLemma(raw []byte, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, asid seq[io.IO_as], length int) { - offset := hopFieldOffset(numInfoFields(seg1Len, seg2Len, seg3Len), 0) +func rightSegWidenLemma(raw []byte, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, headerOffset int, length int) { + offset := scion.HopFieldOffset(scion.NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) if currINFIdx == 1 && seg2Len > 0 && seg3Len > 0 { - currSegWidenLemma(raw, offset + path.HopLen * seg1Len, currINFIdx, seg2Len, asid, length) - ret1 := some(currSeg(raw, offset + path.HopLen * seg1Len, currINFIdx, seg2Len, asid)) - ret2 := some(currSeg(raw[:length], offset + path.HopLen * seg1Len, currINFIdx, seg2Len, asid)) - assert ret1 == reveal rightSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) - assert ret2 == reveal rightSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + currSegWidenLemma(raw, offset + path.HopLen * seg1Len, currINFIdx, seg2Len, seg2Len, headerOffset, length) + ret1 := some(scion.CurrSeg(raw, offset + path.HopLen * seg1Len, currINFIdx, seg2Len, seg2Len, headerOffset)) + ret2 := some(scion.CurrSeg(raw[:length], offset + path.HopLen * seg1Len, currINFIdx, seg2Len, seg2Len, headerOffset)) + assert ret1 == reveal scion.RightSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) + assert ret2 == reveal scion.RightSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) assert ret1 == ret2 } else if currINFIdx == 0 && seg2Len > 0 { - currSegWidenLemma(raw, offset, currINFIdx, seg1Len, asid, length) - ret1 := some(currSeg(raw, offset, currINFIdx, seg1Len, asid)) - ret2 := some(currSeg(raw[:length], offset, currINFIdx, seg1Len, asid)) - assert ret1 == reveal rightSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) - assert ret2 == reveal rightSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + currSegWidenLemma(raw, offset, currINFIdx, seg1Len, seg1Len, headerOffset, length) + ret1 := some(scion.CurrSeg(raw, offset, currINFIdx, seg1Len, seg1Len, headerOffset)) + ret2 := some(scion.CurrSeg(raw[:length], offset, currINFIdx, seg1Len, seg1Len, headerOffset)) + assert ret1 == reveal scion.RightSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) + assert ret2 == reveal scion.RightSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) assert ret1 == ret2 } else { ret := none[io.IO_seg3] - assert ret == reveal rightSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) - assert ret == reveal rightSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret == reveal scion.RightSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) + assert ret == reveal scion.RightSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) } } + ghost -requires 0 <= seg2Len +requires 0 <= headerOffset requires 0 < seg1Len -requires 0 <= length && length <= len(raw) +requires 0 <= seg2Len requires 0 <= seg3Len requires 2 <= currINFIdx && currINFIdx < 5 -requires pktLen(seg1Len, seg2Len, seg3Len) <= length -requires (currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ==> len(asid) == seg3Len -requires (currINFIdx == 4 && seg2Len > 0) ==> len(asid) == seg1Len +requires 0 <= length && length <= len(raw) +requires scion.pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= length preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) -ensures midSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) == - midSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) +ensures scion.MidSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) == + scion.MidSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) decreases -func midSegWidenLemma(raw []byte, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, asid seq[io.IO_as], length int) { - offset := hopFieldOffset(numInfoFields(seg1Len, seg2Len, seg3Len), 0) +func midSegWidenLemma(raw []byte, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, headerOffset int, length int) { + offset := scion.HopFieldOffset(scion.NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) if currINFIdx == 4 && seg2Len > 0 { - currSegWidenLemma(raw, offset, 0, seg1Len, asid, length) - ret1 := some(currSeg(raw, offset, 0, seg1Len, asid)) - ret2 := some(currSeg(raw[:length], offset, 0, seg1Len, asid)) - assert ret1 == reveal midSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) - assert ret2 == reveal midSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + currSegWidenLemma(raw, offset, 0, seg1Len, seg1Len, headerOffset, length) + ret1 := some(scion.CurrSeg(raw, offset, 0, seg1Len, seg1Len, headerOffset)) + ret2 := some(scion.CurrSeg(raw[:length], offset, 0, seg1Len, seg1Len, headerOffset)) + assert ret1 == reveal scion.MidSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) + assert ret2 == reveal scion.MidSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) assert ret1 == ret2 } else if currINFIdx == 2 && seg2Len > 0 && seg3Len > 0 { - currSegWidenLemma(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid, length) - ret1 := some(currSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid)) - ret2 := some(currSeg(raw[:length], offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid)) - assert ret1 == reveal midSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) - assert ret2 == reveal midSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + currSegWidenLemma(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, seg3Len, headerOffset, length) + ret1 := some(scion.CurrSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, seg3Len, headerOffset)) + ret2 := some(scion.CurrSeg(raw[:length], offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, seg3Len, headerOffset)) + assert ret1 == reveal scion.MidSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) + assert ret2 == reveal scion.MidSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) assert ret1 == ret2 } else { ret := none[io.IO_seg3] - assert ret == reveal midSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) - assert ret == reveal midSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret == reveal scion.MidSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) + assert ret == reveal scion.MidSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) } } -*/ \ No newline at end of file From e5d6d22129c523024446e46d1d619b08b7c9902e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Thu, 18 Apr 2024 22:30:29 +0200 Subject: [PATCH 23/57] enable chop 10 in the CI (#328) --- .github/workflows/gobra.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/gobra.yml b/.github/workflows/gobra.yml index 137fa6572..d65b8277e 100644 --- a/.github/workflows/gobra.yml +++ b/.github/workflows/gobra.yml @@ -415,6 +415,7 @@ jobs: includePaths: ${{ env.includePaths }} assumeInjectivityOnInhale: ${{ env.assumeInjectivityOnInhale }} checkConsistency: ${{ env.checkConsistency }} + chop: 10 parallelizeBranches: '1' conditionalizePermissions: '0' moreJoins: 'impure' From 410465b88b6d75070b97d6645f64427bebc31207 Mon Sep 17 00:00:00 2001 From: Markus Limbeck <92801626+mlimbeck@users.noreply.github.com> Date: Thu, 18 Apr 2024 22:33:29 +0200 Subject: [PATCH 24/57] drop assumption in processPkt (#318) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: João Pereira --- router/dataplane.go | 32 ++++++++++++++------------------ router/io-spec.gobra | 11 +---------- 2 files changed, 15 insertions(+), 28 deletions(-) diff --git a/router/dataplane.go b/router/dataplane.go index 4b22afc2c..9db45aa39 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -1499,7 +1499,7 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, ub = p.rawPkt[o.start:o.end] llStart = o.start llEnd = o.end - sl.SplitRange_Bytes(p.rawPkt, o.start, o.end, writePerm) + sl.SplitRange_Bytes(p.rawPkt, o.start, o.end, HalfPerm) } } hasHbhLayer := processed[0] @@ -1511,14 +1511,15 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, assert processed[0] ==> p.hbhLayer.Mem(ubHbhLayer) assert processed[1] ==> p.e2eLayer.Mem(ubE2eLayer) @*/ - // @ assert sl.AbsSlice_Bytes(ub, 0, len(ub)) + // @ assert acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), HalfPerm) pld /*@ , start, end @*/ := p.lastLayer.LayerPayload( /*@ ub @*/ ) - // @ sl.SplitRange_Bytes(ub, start, end, writePerm) + // @ sl.SplitRange_Bytes(ub, start, end, HalfPerm) // @ sl.NilAcc_Bytes() - pathType := /*@ unfolding p.scionLayer.Mem(rawPkt) in @*/ p.scionLayer.PathType switch pathType { case empty.PathType: + // @ ghost sl.SplitRange_Bytes(p.rawPkt, o.start, o.end, HalfPerm) + // @ sl.SplitRange_Bytes(ub, start, end, HalfPerm) // @ ghost if mustCombineRanges { ghost defer sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, writePerm) } if p.lastLayer.NextLayerType( /*@ ub @*/ ) == layers.LayerTypeBFD { // @ ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, true, hasHbhLayer, hasE2eLayer) @@ -1536,6 +1537,8 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, "type", pathType, "header", nextHdr(p.lastLayer /*@, ub @*/)) /*@, false, io.IO_val_Unit{} @*/ case onehop.PathType: if p.lastLayer.NextLayerType( /*@ ub @*/ ) == layers.LayerTypeBFD { + // @ ghost sl.SplitRange_Bytes(p.rawPkt, o.start, o.end, HalfPerm) + // @ sl.SplitRange_Bytes(ub, start, end, HalfPerm) // @ ghost if mustCombineRanges { ghost defer sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, writePerm) } // @ ghost defer sl.CombineRange_Bytes(ub, start, end, writePerm) // @ unfold acc(p.scionLayer.Mem(p.rawPkt), R10) @@ -1553,33 +1556,26 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, // @ ghost defer ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, true, hasHbhLayer, hasE2eLayer) return processResult{}, p.processInterBFD(ohp, pld) /*@, false, io.IO_val_Unit{} @*/ } - // @ sl.CombineRange_Bytes(ub, start, end, writePerm) + // @ sl.CombineRange_Bytes(ub, start, end, HalfPerm) // @ ghost if lastLayerIdx >= 0 && !offsets[lastLayerIdx].isNil { // @ o := offsets[lastLayerIdx] - // @ sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, writePerm) + // @ sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, HalfPerm) // @ } // @ assert sl.AbsSlice_Bytes(p.rawPkt, 0, len(p.rawPkt)) // @ unfold acc(p.d.Mem(), _) - // @ TemporaryAssumeForIO(reveal p.scionLayer.EqPathType(p.rawPkt)) + // @ assert reveal p.scionLayer.EqPathType(p.rawPkt) // @ assert !(reveal slayers.IsSupportedPkt(p.rawPkt)) v1, v2 /*@, aliasesPkt, newAbsPkt @*/ := p.processOHP( /* @ dp @ */ ) // @ ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, true, hasHbhLayer, hasE2eLayer) // @ fold p.sInit() return v1, v2 /*@, aliasesPkt, newAbsPkt @*/ case scion.PathType: - // @ sl.CombineRange_Bytes(ub, start, end, writePerm) + // @ sl.CombineRange_Bytes(ub, start, end, HalfPerm) // @ ghost if lastLayerIdx >= 0 && !offsets[lastLayerIdx].isNil { // @ o := offsets[lastLayerIdx] - // @ sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, writePerm) + // @ sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, HalfPerm) // @ } // @ assert sl.AbsSlice_Bytes(p.rawPkt, 0, len(p.rawPkt)) - // (VerifiedSCION) the following statements assume properties that follow directly - // from `decodeLayers`, but we cannot currently establish them because we cannot - // properly frame this yet around calls to the ghost slice operations. - // @ TemporaryAssumeForIO((typeOf(p.scionLayer.GetPath(p.rawPkt)) == *scion.Raw) ==> slayers.ValidPktMetaHdr(p.rawPkt)) - // @ TemporaryAssumeForIO((typeOf(p.scionLayer.GetPath(p.rawPkt)) == *scion.Raw) ==> p.scionLayer.EqAbsHeader(p.rawPkt)) - // @ TemporaryAssumeForIO(p.scionLayer.EqPathType(p.rawPkt)) - // @ TemporaryAssumeForIOWitness(absIO_val(dp, p.rawPkt, p.ingressID), p.ingressID, ioSharedArg) v1, v2 /*@ , addrAliasesPkt, newAbsPkt @*/ := p.processSCION( /*@ p.rawPkt, ub == nil, llStart, llEnd, ioLock, ioSharedArg, dp @*/ ) // @ ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, v2 == nil, hasHbhLayer, hasE2eLayer) // @ fold p.sInit() @@ -1590,9 +1586,9 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, // @ fold p.sInit() return v1, v2 /*@, false, io.IO_val_Unit{} @*/ default: - // @ ghost if mustCombineRanges { ghost defer sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, writePerm) } + // @ ghost if mustCombineRanges { ghost defer sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, HalfPerm) } // @ ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, true, hasHbhLayer, hasE2eLayer) - // @ sl.CombineRange_Bytes(ub, start, end, writePerm) + // @ sl.CombineRange_Bytes(ub, start, end, HalfPerm) // @ fold p.d.validResult(processResult{}, false) // @ fold p.sInit() // @ establishMemUnsupportedPathType() diff --git a/router/io-spec.gobra b/router/io-spec.gobra index dde5bc198..0562e4792 100644 --- a/router/io-spec.gobra +++ b/router/io-spec.gobra @@ -182,13 +182,4 @@ decreases pure func MsgToAbsVal(dp io.DataPlaneSpec, msg *ipv4.Message, ingressID uint16) (res io.IO_val) { return unfolding acc(msg.Mem(), R50) in absIO_val(dp, msg.Buffers[0], ingressID) -} - -// This assumption will be dropped as soon as we can establish that the contents -// of the underlying buffer did not change between the call to `decodeLayers` and -// `p.processSCION` in the function `processPkt` in the router. -ghost -ensures absPkt.isIO_val_Pkt2 ==> - ElemWitness(ioSharedArg.IBufY, path.ifsToIO_ifs(ingressID), absPkt.IO_val_Pkt2_2) -decreases -func TemporaryAssumeForIOWitness(absPkt io.IO_val, ingressID uint16, ioSharedArg SharedArg) \ No newline at end of file +} \ No newline at end of file From 764d862c985b940287010347e9845672024c60d7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Fri, 19 Apr 2024 02:24:03 +0200 Subject: [PATCH 25/57] fix termination measuer (#329) --- pkg/slayers/scion.go | 4 +++- verification/utils/bitwise/bitwise-eqs.gobra | 6 ++++++ verification/utils/bitwise/proofs.dfy | 6 ++++++ 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/pkg/slayers/scion.go b/pkg/slayers/scion.go index 2f0a44735..bb713a41d 100644 --- a/pkg/slayers/scion.go +++ b/pkg/slayers/scion.go @@ -1078,9 +1078,11 @@ func (s *SCION) upperLayerChecksum(upperLayer []byte, csum uint32) uint32 { // (VerifiedSCION) The following function terminates but Gobra can't // deduce that because of limited support of bitwise operations. -// @ decreases _ +// @ decreases func (s *SCION) foldChecksum(csum uint32) (res uint16) { + // @ decreases csum for csum > 0xffff { + // @ b.FoldChecksumLemma(csum) csum = (csum >> 16) + (csum & 0xffff) } return ^uint16(csum) diff --git a/verification/utils/bitwise/bitwise-eqs.gobra b/verification/utils/bitwise/bitwise-eqs.gobra index 2b8b5b18b..205841a92 100644 --- a/verification/utils/bitwise/bitwise-eqs.gobra +++ b/verification/utils/bitwise/bitwise-eqs.gobra @@ -68,3 +68,9 @@ ensures 3 & 1 == 1 ensures 3 & 2 == 2 decreases pure func InfoFieldFirstByteSerializationLemmas() bool + +ensures csum > 0xffff ==> + let newCsum := (csum >> 16) + (csum & 0xffff) in + newCsum < csum +decreases +pure func FoldChecksumLemma(csum uint32) struct{} \ No newline at end of file diff --git a/verification/utils/bitwise/proofs.dfy b/verification/utils/bitwise/proofs.dfy index 9d38da8ce..dc0286d4f 100644 --- a/verification/utils/bitwise/proofs.dfy +++ b/verification/utils/bitwise/proofs.dfy @@ -162,4 +162,10 @@ lemma FUint32AfterFPutUint32(v: bv32) lemma FPutUint32AfterFUint32(b0: bv8, b1: bv8, b2: bv8, b3: bv8) ensures var v := FUint32Spec(b0, b1, b2, b3); FPutUint32Spec(v) == (b0, b1, b2, b3) +{} + +lemma FoldChecksumLemma(csum: bv32) + ensures csum > 0xffff ==> + var newCsum := (csum >> 16) + (csum & 0xffff); + newCsum < csum {} \ No newline at end of file From 7ddba09e4a0df8da8c328c45b84cb99185b83d98 Mon Sep 17 00:00:00 2001 From: Dionysios Spiliopoulos <32896454+Dspil@users.noreply.github.com> Date: Fri, 19 Apr 2024 14:53:45 +0200 Subject: [PATCH 26/57] change triggers in absPktWidenLemma (#330) * change triggers in abspktwidenlemma * triggers * remove quantifiers --- router/io-spec.gobra | 4 +--- router/widen-lemma.gobra | 4 ---- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/router/io-spec.gobra b/router/io-spec.gobra index 0562e4792..bcc00b254 100644 --- a/router/io-spec.gobra +++ b/router/io-spec.gobra @@ -39,8 +39,6 @@ decreases pure func absPkt(dp io.DataPlaneSpec, raw []byte) (res io.IO_pkt2) { return let _ := reveal slayers.ValidPktMetaHdr(raw) in let headerOffset := slayers.GetAddressOffset(raw) in - let _ := Asserting(forall k int :: {&raw[headerOffset:headerOffset+scion.MetaLen][k]} 0 <= k && k < scion.MetaLen ==> - &raw[headerOffset:headerOffset+scion.MetaLen][k] == &raw[headerOffset + k]) in let hdr := (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in binary.BigEndian.Uint32(raw[headerOffset:headerOffset+scion.MetaLen])) in let metaHdr := scion.DecodedFrom(hdr) in @@ -182,4 +180,4 @@ decreases pure func MsgToAbsVal(dp io.DataPlaneSpec, msg *ipv4.Message, ingressID uint16) (res io.IO_val) { return unfolding acc(msg.Mem(), R50) in absIO_val(dp, msg.Buffers[0], ingressID) -} \ No newline at end of file +} diff --git a/router/widen-lemma.gobra b/router/widen-lemma.gobra index 05d87f978..199e24241 100644 --- a/router/widen-lemma.gobra +++ b/router/widen-lemma.gobra @@ -121,10 +121,6 @@ func absPktWidenLemma(dp io.DataPlaneSpec, raw []byte, length int) { headerOffset1 := slayers.GetAddressOffset(raw) headerOffset2 := slayers.GetAddressOffset(raw[:length]) - assert forall k int :: {&raw[k]} headerOffset1 <= k && k < headerOffset1 + scion.MetaLen ==> - &raw[headerOffset1:headerOffset1+scion.MetaLen][k-headerOffset1] == &raw[k] - assert forall k int :: {&raw[:length][k]} headerOffset2 <= k && k < headerOffset2 + scion.MetaLen ==> - &raw[headerOffset2:headerOffset2+scion.MetaLen][k-headerOffset2] == &raw[k] hdr1 := unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) in binary.BigEndian.Uint32(raw[headerOffset1:headerOffset1+scion.MetaLen]) hdr2 := unfolding acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) in binary.BigEndian.Uint32(raw[:length][headerOffset2:headerOffset2+scion.MetaLen]) assert unfolding acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) in binary.BigEndian.Uint32(raw[:length][headerOffset2:headerOffset2+scion.MetaLen]) == unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) in binary.BigEndian.Uint32(raw[headerOffset1:headerOffset1+scion.MetaLen]) From 429d00791e77246ac2b3dae7650cb0d778af7892 Mon Sep 17 00:00:00 2001 From: Markus Limbeck <92801626+mlimbeck@users.noreply.github.com> Date: Fri, 19 Apr 2024 15:57:22 +0200 Subject: [PATCH 27/57] Drop Assumption in validateEgressID (#326) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update gobra.yml * Update gobra.yml * fix verification error * fixed precondition of XoverEvent * enable moreJoins impure (#321) * drop assumption in validateEgressID and process * clean up * invariant strengthening * undo change to the state consolidator * refactored wellConfigured assumptions * finish proof of EgressIDNotZeroLemma * Apply suggestions from code review * removed TopologySpec * minor fmt * fix verification error * removed comments Co-authored-by: João Pereira --------- Co-authored-by: João Pereira Co-authored-by: Dspil Co-authored-by: Dionysios Spiliopoulos <32896454+Dspil@users.noreply.github.com> --- router/dataplane.go | 5 +- router/dataplane_spec.gobra | 28 ++++++++- router/dataplane_spec_test.gobra | 58 ++++++++++-------- router/io-spec-abstract-transitions.gobra | 13 ++--- router/io-spec.gobra | 71 ++++++++++++++--------- verification/io/dataplane_abstract.gobra | 41 +++++-------- 6 files changed, 126 insertions(+), 90 deletions(-) diff --git a/router/dataplane.go b/router/dataplane.go index 9db45aa39..f286eb5a6 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -2240,6 +2240,7 @@ func (p *scionPacketProcessor) validateTransitUnderlaySrc( /*@ ghost ub []byte @ // @ ensures reserr != nil ==> reserr.ErrorMem() // contracts for IO-spec // @ requires dp.Valid() +// @ requires p.d.WellConfigured() // @ requires p.d.DpAgreesWithSpec(dp) // @ requires len(oldPkt.CurrSeg.Future) > 0 // @ requires p.EqAbsHopField(oldPkt) @@ -2277,8 +2278,6 @@ func (p *scionPacketProcessor) validateEgressID( /*@ ghost oldPkt io.IO_pkt2, gh ) } - // @ TemporaryAssumeForIO(pktEgressID != 0 && - // @ (io.IO_ifs(pktEgressID) in domain(dp.GetNeighborIAs()))) // @ p.d.getLinkTypesMem() ingress, egress := p.d.linkTypes[p.ingressID], p.d.linkTypes[pktEgressID] // @ p.d.LinkTypesLemma(dp) @@ -3292,7 +3291,7 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, // @ p.d.getExternalMem() // @ if p.d.external != nil { unfold acc(accBatchConn(p.d.external), _) } if c, ok := p.d.external[egressID]; ok { - // @ TemporaryAssumeForIO(egressID != 0) + // @ p.d.EgressIDNotZeroLemma(egressID, dp) if err := p.processEgress( /*@ ub, dp @*/ ); err != nil { // @ fold p.d.validResult(processResult{}, false) return processResult{}, err /*@, false, absReturnErr(dp, processResult{}) @*/ diff --git a/router/dataplane_spec.gobra b/router/dataplane_spec.gobra index 77303745e..4fdf29163 100644 --- a/router/dataplane_spec.gobra +++ b/router/dataplane_spec.gobra @@ -252,12 +252,33 @@ pure func (d *DataPlane) getDomExternal() set[uint16] { domain(d.external)) } +ghost +requires acc(d.Mem(), _) +decreases +pure func (d *DataPlane) getDomNeighborIAs() set[uint16] { + return unfolding acc(d.Mem(), _) in + d.neighborIAs == nil ? + set[uint16]{} : domain(d.neighborIAs) +} + +ghost +requires acc(d.Mem(), _) +decreases +pure func (d *DataPlane) getDomLinkTypes() set[uint16] { + return unfolding acc(d.Mem(), _) in + d.linkTypes == nil ? + set[uint16]{} : domain(d.linkTypes) +} + ghost opaque requires acc(d.Mem(), _) decreases pure func (d *DataPlane) WellConfigured() bool { - return d.getDomExternal() subset d.getDomForwardingMetrics() + return d.getDomNeighborIAs() == d.getDomExternal() && + d.getDomExternal() == d.getDomLinkTypes() && + !(0 in d.getDomNeighborIAs()) && + d.getDomExternal() subset d.getDomForwardingMetrics() } ghost @@ -265,7 +286,10 @@ opaque requires acc(d.Mem(), _) decreases pure func (d *DataPlane) PreWellConfigured() bool { - return d.getDomExternal() intersection d.GetDomInternalNextHops() == set[uint16]{} + return d.getDomNeighborIAs() == d.getDomExternal() && + d.getDomExternal() == d.getDomLinkTypes() && + !(0 in d.getDomNeighborIAs()) && + d.getDomExternal() intersection d.GetDomInternalNextHops() == set[uint16]{} } ghost diff --git a/router/dataplane_spec_test.gobra b/router/dataplane_spec_test.gobra index f1d0dce79..26c4fe81d 100644 --- a/router/dataplane_spec_test.gobra +++ b/router/dataplane_spec_test.gobra @@ -99,18 +99,28 @@ func testRun( b1 := allocateBatchConn() b2 := allocateBatchConn() + b3 := allocateBatchConn() d.external = map[uint16]BatchConn{ uint16(1): b1, uint16(2): b2, + uint16(3): b3, } fold accBatchConn(d.external) - d.linkTypes = make(map[uint16]topology.LinkType) - d.neighborIAs = make(map[uint16]addr.IA) + d.linkTypes = map[uint16]topology.LinkType{ + uint16(1): topology.Child, + uint16(2): topology.Child, + uint16(3): topology.Child, + } + d.neighborIAs = map[uint16]addr.IA{ + uint16(1): 1001, + uint16(2): 1002, + uint16(3): 1000, + } a1 := allocateUDPAddr() d.internalNextHops = map[uint16]*net.UDPAddr{ - uint16(3): a1, + uint16(4): a1, } d.internal = allocateBatchConn() @@ -138,14 +148,12 @@ func testRun( 3: 1000, }, localIA: 1000, - topology: io.TopologySpec_{ - coreAS: set[io.IO_as]{1000}, - links: dict[io.AsIfsPair]io.AsIfsPair { - io.AsIfsPair{1000, 1}: io.AsIfsPair{1001, 7}, - io.AsIfsPair{1000, 2}: io.AsIfsPair{1002, 8}, - io.AsIfsPair{1000, 3}: io.AsIfsPair{1000, 3}, - io.AsIfsPair{1001, 7}: io.AsIfsPair{1000, 1}, - io.AsIfsPair{1002, 8}: io.AsIfsPair{1000, 2}}}} + links: dict[io.AsIfsPair]io.AsIfsPair { + io.AsIfsPair{1000, 1}: io.AsIfsPair{1001, 7}, + io.AsIfsPair{1000, 2}: io.AsIfsPair{1002, 8}, + io.AsIfsPair{1000, 3}: io.AsIfsPair{1000, 3}, + io.AsIfsPair{1001, 7}: io.AsIfsPair{1000, 1}, + io.AsIfsPair{1002, 8}: io.AsIfsPair{1000, 2}}} outline( pair1 := io.AsIfsPair{1000, 1} pair2 := io.AsIfsPair{1000, 2} @@ -165,14 +173,12 @@ func testRun( 3: 1000, }, localIA: 1000, - topology: io.TopologySpec_{ - coreAS: set[io.IO_as]{1000}, - links: dict[io.AsIfsPair]io.AsIfsPair { - pair1: pair4, - pair2: pair5, - pair3: pair3, - pair4: pair1, - pair5: pair2}}} + links: dict[io.AsIfsPair]io.AsIfsPair { + pair1: pair4, + pair2: pair5, + pair3: pair3, + pair4: pair1, + pair5: pair2}} assert dp.Lookup(dp.Lookup(pair1)) == pair1 assert dp.Lookup(dp.Lookup(pair2)) == pair2 @@ -181,15 +187,16 @@ func testRun( assert dp.Lookup(dp.Lookup(pair5)) == pair5 assert forall ifs io.IO_ifs :: {ifs in domain(dp.neighborIAs)} ifs in domain(dp.neighborIAs) ==> - io.AsIfsPair{dp.localIA, ifs} in domain(dp.topology.links) + io.AsIfsPair{dp.localIA, ifs} in domain(dp.links) assert forall ifs io.IO_ifs :: {ifs in domain(dp.neighborIAs)} ifs in domain(dp.neighborIAs) ==> dp.Lookup(io.AsIfsPair{dp.localIA, ifs}).asid == dp.neighborIAs[ifs] - assert forall ifs io.IO_ifs :: {ifs in domain(dp.neighborIAs)} io.AsIfsPair{dp.localIA, ifs} in domain(dp.topology.links) ==> + assert forall ifs io.IO_ifs :: {ifs in domain(dp.neighborIAs)} io.AsIfsPair{dp.localIA, ifs} in domain(dp.links) ==> ifs in domain(dp.neighborIAs) - assert forall pair io.AsIfsPair :: {dp.Lookup(pair)} pair in domain(dp.topology.links) ==> + assert forall pair io.AsIfsPair :: {dp.Lookup(pair)} pair in domain(dp.links) ==> let next_pair := dp.Lookup(pair) in - (next_pair in domain(dp.topology.links)) && + (next_pair in domain(dp.links)) && dp.Lookup(next_pair) == pair + assert domain(dp.linkTypes) == domain(dp.neighborIAs) assert reveal dp.Valid() ) @@ -200,7 +207,12 @@ func testRun( assert d.dpSpecWellConfiguredLinkTypes(dp) fold d.Mem() + assert d.getDomNeighborIAs() == d.getDomExternal() + assert d.getDomExternal() == d.getDomLinkTypes() + assert !(0 in d.getDomNeighborIAs()) + assert d.getDomExternal() intersection d.GetDomInternalNextHops() == set[uint16]{} assert reveal d.DpAgreesWithSpec(dp) + assert reveal d.PreWellConfigured() fold MutexInvariant!< d !>() // end of foldDataPlaneMem diff --git a/router/io-spec-abstract-transitions.gobra b/router/io-spec-abstract-transitions.gobra index fb9d60482..ebb4aeb92 100644 --- a/router/io-spec-abstract-transitions.gobra +++ b/router/io-spec-abstract-transitions.gobra @@ -77,10 +77,7 @@ requires len(pkt.CurrSeg.Future) > 0 decreases pure func AbsValidateEgressIDConstraint(pkt io.IO_pkt2, enter bool, dp io.DataPlaneSpec) bool { return let currseg := pkt.CurrSeg in - let nextIf := (currseg.ConsDir ? currseg.Future[0].EgIF2 : currseg.Future[0].InIF2) in - (enter ==> dp.dp2_check_interface_top(currseg.ConsDir, dp.Asid(), currseg.Future[0])) && - nextIf != none[io.IO_ifs] && - (get(nextIf) in domain(dp.GetNeighborIAs())) + (enter ==> dp.dp2_check_interface_top(currseg.ConsDir, dp.Asid(), currseg.Future[0])) } ghost @@ -126,11 +123,8 @@ decreases pure func AbsValidateEgressIDConstraintXover(pkt io.IO_pkt2, dp io.DataPlaneSpec) bool { return let currseg := pkt.CurrSeg in let rightseg := get(pkt.RightSeg) in - let nextIf := (currseg.ConsDir ? currseg.Future[0].EgIF2 : currseg.Future[0].InIF2) in dp.xover2_link_type_dir(dp.Asid(), rightseg.ConsDir, rightseg.Past[0], - currseg.ConsDir, currseg.Future[0]) && - nextIf != none[io.IO_ifs] && - (get(nextIf) in domain(dp.GetNeighborIAs())) + currseg.ConsDir, currseg.Future[0]) } ghost @@ -173,6 +167,8 @@ func InternalEnterEvent(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt i ghost requires dp.Valid() +requires egressID != none[io.IO_ifs] +requires get(egressID) in domain(dp.GetNeighborIAs()) requires len(oldPkt.CurrSeg.Future) > 0 requires ElemWitness(ioSharedArg.IBufY, ingressID, oldPkt) requires AbsValidateIngressIDConstraint(oldPkt, ingressID) @@ -212,6 +208,7 @@ requires len(get(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID).LeftSeg).Fu requires len(get(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID).LeftSeg).History) == 0 requires AbsVerifyCurrentMACConstraint(AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)), dp) requires AbsValidateEgressIDConstraintXover(AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)), dp) +requires egressID != none[io.IO_ifs] ==> get(egressID) in domain(dp.GetNeighborIAs()) requires egressID != none[io.IO_ifs] ==> AbsEgressInterfaceConstraint(AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)), egressID) requires egressID == none[io.IO_ifs] ==> newPkt == AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)) requires egressID != none[io.IO_ifs] ==> newPkt == AbsProcessEgress(AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID))) diff --git a/router/io-spec.gobra b/router/io-spec.gobra index bcc00b254..456e9f73b 100644 --- a/router/io-spec.gobra +++ b/router/io-spec.gobra @@ -59,28 +59,6 @@ pure func absPkt(dp io.DataPlaneSpec, raw []byte) (res io.IO_pkt2) { }) } -ghost -requires len(hopfield.HVF.MsgTerm_Hash_.MsgTerm_MPair_2.MsgTerm_L_) > 0 -decreases -pure func hvfSet(hopfield io.IO_HF) set[io.IO_msgterm] { - return let l := hopfield.HVF.MsgTerm_Hash_.MsgTerm_MPair_2.MsgTerm_L_ in - l[len(l) - 1].MsgTerm_FS_ -} - -ghost -requires 0 < len(hopfields) -requires 0 <= currHFIdx && currHFIdx <= len(hopfields) -requires forall idx int :: {hopfields[idx]} 0 <= idx && idx < len(hopfields) ==> - len(hopfields[idx].HVF.MsgTerm_Hash_.MsgTerm_MPair_2.MsgTerm_L_) > 0 -decreases -pure func uInfo(hopfields seq[io.IO_HF], currHFIdx int, consDir bool) set[io.IO_msgterm] { - return currHFIdx + 1 >= len(hopfields) ? - hvfSet(hopfields[len(hopfields)-1]) : - (consDir ? - hvfSet(hopfields[currHFIdx]) : - hvfSet(hopfields[currHFIdx+1])) -} - ghost requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) ensures val.isIO_val_Unsupported @@ -145,8 +123,7 @@ ghost requires acc(&d.linkTypes, _) && (d.linkTypes != nil ==> acc(d.linkTypes, _)) decreases pure func (d *DataPlane) dpSpecWellConfiguredLinkTypes(dp io.DataPlaneSpec) bool { - return !(0 in domain(d.linkTypes)) && - forall ifs uint16 :: {ifs in domain(d.linkTypes)} ifs in domain(d.linkTypes) ==> + return forall ifs uint16 :: {ifs in domain(d.linkTypes)} ifs in domain(d.linkTypes) ==> io.IO_ifs(ifs) in domain(dp.GetLinkTypes()) && absLinktype(d.linkTypes[ifs]) == dp.GetLinkType(io.IO_ifs(ifs)) } @@ -165,12 +142,54 @@ pure func (d *DataPlane) DpAgreesWithSpec(dp io.DataPlaneSpec) bool { ghost requires acc(d.Mem(), _) requires d.DpAgreesWithSpec(dp) -ensures acc(&d.linkTypes, _) && (d.linkTypes != nil ==> acc(d.linkTypes, _)) -ensures d.dpSpecWellConfiguredLinkTypes(dp) +requires d.WellConfigured() +ensures acc(&d.linkTypes, _) +ensures d.linkTypes != nil ==> acc(d.linkTypes, _) && !(0 in domain(d.linkTypes)) +ensures d.dpSpecWellConfiguredLinkTypes(dp) decreases func (d *DataPlane) LinkTypesLemma(dp io.DataPlaneSpec) { + reveal d.WellConfigured() reveal d.DpAgreesWithSpec(dp) + assert !(0 in d.getDomLinkTypes()) unfold acc(d.Mem(), _) + assert !(0 in domain(d.linkTypes)) +} + +ghost +requires acc(d.Mem(), _) +requires d.DpAgreesWithSpec(dp) +requires d.WellConfigured() +requires acc(&d.external, _) +requires acc(d.external, _) +requires egressID in domain(d.external) +ensures egressID != 0 +ensures io.IO_ifs(egressID) in domain(dp.GetNeighborIAs()) +decreases +func (d *DataPlane) EgressIDNotZeroLemma(egressID uint16, dp io.DataPlaneSpec) { + reveal d.WellConfigured() + reveal d.DpAgreesWithSpec(dp) + d.getDomExternalLemma() + assert d.getDomExternal() == domain(d.external) + assert egressID in d.getDomExternal() + assert d.getDomNeighborIAs() == d.getDomExternal() + assert io.IO_ifs(egressID) in domain(dp.GetNeighborIAs()) + assert egressID != 0 +} + +ghost +requires acc(d.Mem(), _) +requires acc(&d.external, _) +requires acc(d.external, _) +ensures acc(d.Mem(), _) +ensures acc(&d.external, _) +ensures acc(d.external, _) +ensures d.getDomExternal() == domain(d.external) +decreases +func (d *DataPlane) getDomExternalLemma() { + assert d.external != nil + assert d.getDomExternal() == unfolding acc(d.Mem(), _) in + (unfolding acc(accBatchConn(d.external), _) in + domain(d.external)) } ghost diff --git a/verification/io/dataplane_abstract.gobra b/verification/io/dataplane_abstract.gobra index 346f9532d..ba717326a 100644 --- a/verification/io/dataplane_abstract.gobra +++ b/verification/io/dataplane_abstract.gobra @@ -16,24 +16,15 @@ package io +// links: representation of the network topology as a graph. +// `links[(a1,x)] == (a2,y)` means that the interface x of AS a1 is connected +// to the interface y of AS a2. type DataPlaneSpec adt { DataPlaneSpec_{ linkTypes dict[IO_ifs]IO_Link neighborIAs dict[IO_ifs]IO_as localIA IO_as - topology TopologySpec - } -} - -// TopologySpec describes the entire network topology. -// coreAS: IDs of the core Autonomous Systems -// links: representation of the network topology as a graph. -// `links[(a1,x)] == (a2,y)` means that the interface x of AS a1 is connected -// to the interface y of AS a2. -type TopologySpec adt { - TopologySpec_{ - coreAS set[IO_as] - links dict[AsIfsPair]AsIfsPair + links dict[AsIfsPair]AsIfsPair } } @@ -47,15 +38,15 @@ opaque decreases pure func (dp DataPlaneSpec) Valid() bool { return (forall ifs IO_ifs :: {ifs in domain(dp.neighborIAs)} ifs in domain(dp.neighborIAs) ==> - (AsIfsPair{dp.localIA, ifs} in domain(dp.topology.links) && + (AsIfsPair{dp.localIA, ifs} in domain(dp.links) && dp.Lookup(AsIfsPair{dp.localIA, ifs}).asid == dp.neighborIAs[ifs])) && - (forall ifs IO_ifs :: {ifs in domain(dp.neighborIAs)} AsIfsPair{dp.localIA, ifs} in domain(dp.topology.links) ==> + (forall ifs IO_ifs :: {ifs in domain(dp.neighborIAs)} AsIfsPair{dp.localIA, ifs} in domain(dp.links) ==> ifs in domain(dp.neighborIAs)) && - (forall pairs AsIfsPair :: {dp.Lookup(pairs)} pairs in domain(dp.topology.links) ==> + (forall pairs AsIfsPair :: {dp.Lookup(pairs)} pairs in domain(dp.links) ==> let next_pair := dp.Lookup(pairs) in - (next_pair in domain(dp.topology.links)) && - dp.Lookup(next_pair) == pairs) - // && domain(dp.linkTypes) == domain(dp.neighborIAs) + (next_pair in domain(dp.links)) && + dp.Lookup(next_pair) == pairs) && + domain(dp.linkTypes) == domain(dp.neighborIAs) } ghost @@ -90,21 +81,15 @@ pure func (dp DataPlaneSpec) Asid() IO_as { return dp.localIA } -ghost -decreases -pure func (dp DataPlaneSpec) Core() set[IO_as] { - return dp.topology.coreAS -} - ghost decreases pure func (dp DataPlaneSpec) GetLinks() dict[AsIfsPair]AsIfsPair { - return dp.topology.links + return dp.links } ghost -requires pair in domain(dp.topology.links) +requires pair in domain(dp.links) decreases pure func(dp DataPlaneSpec) Lookup(pair AsIfsPair) AsIfsPair { - return dp.topology.links[pair] + return dp.links[pair] } \ No newline at end of file From b8edbb1852c54ad5e2bcc9051118ee9e7e812231 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Thu, 2 May 2024 12:22:18 +0200 Subject: [PATCH 28/57] router: forbid bouncing packets internally (#4502) (#332) Brings changes from scion pr 4502 Co-authored-by: Matthias Frei --- router/dataplane.go | 5 +- tools/braccept/cases/scmp_unknown_hop.go | 152 +++++++++++++++++++++++ tools/braccept/main.go | 1 + 3 files changed, 157 insertions(+), 1 deletion(-) diff --git a/router/dataplane.go b/router/dataplane.go index f286eb5a6..e452a52f5 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -2263,7 +2263,10 @@ func (p *scionPacketProcessor) validateEgressID( /*@ ghost oldPkt io.IO_pkt2, gh // @ p.d.getExternalMem() // @ if p.d.external != nil { unfold acc(accBatchConn(p.d.external), _) } _, eh := p.d.external[pktEgressID] - if !ih && !eh { + // egress interface must be a known interface + // packet coming from internal interface, must go to an external interface + // packet coming from external interface can go to either internal or external interface + if !ih && !eh || (p.ingressID == 0) && !eh { errCode := slayers.SCMPCodeUnknownHopFieldEgress if !p.infoField.ConsDir { errCode = slayers.SCMPCodeUnknownHopFieldIngress diff --git a/tools/braccept/cases/scmp_unknown_hop.go b/tools/braccept/cases/scmp_unknown_hop.go index aeb114c5e..d101bdc69 100644 --- a/tools/braccept/cases/scmp_unknown_hop.go +++ b/tools/braccept/cases/scmp_unknown_hop.go @@ -340,3 +340,155 @@ func SCMPUnknownHopEgress(artifactsDir string, mac hash.Hash) runner.Case { StoreDir: filepath.Join(artifactsDir, "SCMPUnknownHopEgress"), } } + +// SCMPUnknownHopWrongRouter tests a packet from an AS local host sent to the wrong egress +// router. This packet must not be forwarded by the router. +func SCMPUnknownHopWrongRouter(artifactsDir string, mac hash.Hash) runner.Case { + options := gopacket.SerializeOptions{ + FixLengths: true, + ComputeChecksums: true, + } + + ethernet := &layers.Ethernet{ + SrcMAC: net.HardwareAddr{0xf0, 0x0d, 0xca, 0xfe, 0xbe, 0xef}, + DstMAC: net.HardwareAddr{0xf0, 0x0d, 0xca, 0xfe, 0x00, 0x1}, + EthernetType: layers.EthernetTypeIPv4, + } + ip := &layers.IPv4{ + Version: 4, + IHL: 5, + TTL: 64, + SrcIP: net.IP{192, 168, 0, 51}, + DstIP: net.IP{192, 168, 0, 11}, + Protocol: layers.IPProtocolUDP, + Flags: layers.IPv4DontFragment, + } + udp := &layers.UDP{ + SrcPort: layers.UDPPort(30041), + DstPort: layers.UDPPort(30001), + } + _ = udp.SetNetworkLayerForChecksum(ip) + + // (valid) path to ff00:0:8 via interface 181; this interface is configured on brC + // but we're sending it to brA. + sp := &scion.Decoded{ + Base: scion.Base{ + PathMeta: scion.MetaHdr{ + CurrHF: 0, + SegLen: [3]uint8{2, 0, 0}, + }, + NumINF: 1, + NumHops: 2, + }, + InfoFields: []path.InfoField{ + { + SegID: 0x111, + ConsDir: true, + Timestamp: util.TimeToSecs(time.Now()), + }, + }, + HopFields: []path.HopField{ + {ConsIngress: 0, ConsEgress: 181}, + {ConsIngress: 811, ConsEgress: 0}, + }, + } + sp.HopFields[0].Mac = path.MAC(mac, sp.InfoFields[0], sp.HopFields[0], nil) + + scionL := &slayers.SCION{ + Version: 0, + TrafficClass: 0xb8, + FlowID: 0xdead, + NextHdr: slayers.L4UDP, + PathType: scion.PathType, + SrcIA: xtest.MustParseIA("1-ff00:0:1"), + DstIA: xtest.MustParseIA("1-ff00:0:8"), + Path: sp, + } + srcA := addr.MustParseHost("192.168.0.51") + if err := scionL.SetSrcAddr(srcA); err != nil { + panic(err) + } + if err := scionL.SetDstAddr(addr.MustParseHost("174.16.8.1")); err != nil { + panic(err) + } + + scionudp := &slayers.UDP{} + scionudp.SrcPort = 40111 + scionudp.DstPort = 40222 + scionudp.SetNetworkLayerForChecksum(scionL) + + payload := []byte("actualpayloadbytes") + + // Prepare input packet + input := gopacket.NewSerializeBuffer() + if err := gopacket.SerializeLayers(input, options, + ethernet, ip, udp, scionL, scionudp, gopacket.Payload(payload), + ); err != nil { + panic(err) + } + + // Pointer to current hop field + pointer := slayers.CmnHdrLen + scionL.AddrHdrLen() + + scion.MetaLen + path.InfoLen*sp.NumINF + path.HopLen*int(sp.PathMeta.CurrHF) + + // Prepare quoted packet that is part of the SCMP error message. + quoted := gopacket.NewSerializeBuffer() + if err := gopacket.SerializeLayers(quoted, options, + scionL, scionudp, gopacket.Payload(payload), + ); err != nil { + panic(err) + } + quote := quoted.Bytes() + + // Prepare want packet + want := gopacket.NewSerializeBuffer() + ethernet.SrcMAC, ethernet.DstMAC = ethernet.DstMAC, ethernet.SrcMAC + ip.SrcIP, ip.DstIP = ip.DstIP, ip.SrcIP + udp.SrcPort, udp.DstPort = udp.DstPort, udp.SrcPort + + scionL.DstIA = scionL.SrcIA + scionL.SrcIA = xtest.MustParseIA("1-ff00:0:1") + if err := scionL.SetDstAddr(srcA); err != nil { + panic(err) + } + intlA := addr.MustParseHost("192.168.0.11") + if err := scionL.SetSrcAddr(intlA); err != nil { + panic(err) + } + + _, err := sp.Reverse() + if err != nil { + panic(err) + } + + scionL.NextHdr = slayers.End2EndClass + e2e := normalizedSCMPPacketAuthEndToEndExtn() + e2e.NextHdr = slayers.L4SCMP + scmpH := &slayers.SCMP{ + TypeCode: slayers.CreateSCMPTypeCode( + slayers.SCMPTypeParameterProblem, + slayers.SCMPCodeUnknownHopFieldEgress, + ), + } + + scmpH.SetNetworkLayerForChecksum(scionL) + scmpP := &slayers.SCMPParameterProblem{ + Pointer: uint16(pointer), + } + + if err := gopacket.SerializeLayers(want, options, + ethernet, ip, udp, scionL, e2e, scmpH, scmpP, gopacket.Payload(quote), + ); err != nil { + panic(err) + } + + return runner.Case{ + Name: "SCMPUnknownHopWrongRouter", + WriteTo: "veth_int_host", + ReadFrom: "veth_int_host", + Input: input.Bytes(), + Want: want.Bytes(), + StoreDir: filepath.Join(artifactsDir, "SCMPUnknownHopWrongRouter"), + NormalizePacket: scmpNormalizePacket, + } +} diff --git a/tools/braccept/main.go b/tools/braccept/main.go index 6954ea474..4b1891207 100644 --- a/tools/braccept/main.go +++ b/tools/braccept/main.go @@ -107,6 +107,7 @@ func realMain() int { cases.SCMPInternalXover(artifactsDir, hfMAC), cases.SCMPUnknownHop(artifactsDir, hfMAC), cases.SCMPUnknownHopEgress(artifactsDir, hfMAC), + cases.SCMPUnknownHopWrongRouter(artifactsDir, hfMAC), cases.SCMPInvalidHopParentToParent(artifactsDir, hfMAC), cases.SCMPInvalidHopChildToChild(artifactsDir, hfMAC), cases.SCMPTracerouteIngress(artifactsDir, hfMAC), From c59815f2689f2e52b5ae52129f6b90f12d8a2892 Mon Sep 17 00:00:00 2001 From: Markus Limbeck <92801626+mlimbeck@users.noreply.github.com> Date: Mon, 6 May 2024 10:27:10 +0200 Subject: [PATCH 29/57] Proof of Internal Packet Bouncing fix (#4502) (#333) * proof of scionfix #4502 * feedback * drop assume in processOHP() * change getDomExternal() to opaque * fix comment * verification fix and simplification * fix verification errors * feedback --- pkg/slayers/path/hopfield_spec.gobra | 6 +++ router/dataplane.go | 46 +++++++++++------------ router/dataplane_spec.gobra | 17 ++------- router/dataplane_spec_test.gobra | 6 +-- router/io-spec-abstract-transitions.gobra | 27 +++++++++++-- router/io-spec-non-proven-lemmas.gobra | 44 ++++++++++++++++++++++ router/io-spec.gobra | 26 ++++++------- 7 files changed, 112 insertions(+), 60 deletions(-) diff --git a/pkg/slayers/path/hopfield_spec.gobra b/pkg/slayers/path/hopfield_spec.gobra index e93e22f39..311a60a66 100644 --- a/pkg/slayers/path/hopfield_spec.gobra +++ b/pkg/slayers/path/hopfield_spec.gobra @@ -34,6 +34,12 @@ pure func ifsToIO_ifs(ifs uint16) option[io.IO_ifs]{ return ifs == 0 ? none[io.IO_ifs] : some(io.IO_ifs(ifs)) } +ghost +decreases +pure func IO_ifsToIfs(ifs option[io.IO_ifs]) uint16{ + return ifs == none[io.IO_ifs] ? 0 : uint16(get(ifs)) +} + ghost requires 0 <= start && start <= middle requires middle + HopLen <= end && end <= len(raw) diff --git a/router/dataplane.go b/router/dataplane.go index e452a52f5..520d61858 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -781,11 +781,13 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ decreases // @ outline ( // @ reveal d.PreWellConfigured() + // @ reveal d.getDomExternal() // @ reveal d.DpAgreesWithSpec(dp) // @ unfold d.Mem() d.running = true // @ fold MutexInvariant!() // @ fold d.Mem() + // @ reveal d.getDomExternal() // @ reveal d.PreWellConfigured() // @ reveal d.DpAgreesWithSpec(dp) // @ ) @@ -1279,6 +1281,7 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ decreases func (d *DataPlane) initMetrics( /*@ ghost dp io.DataPlaneSpec @*/ ) { // @ assert reveal d.PreWellConfigured() + // @ reveal d.getDomExternal() // @ assert reveal d.DpAgreesWithSpec(dp) // @ assert unfolding acc(d.Mem(), _) in // @ d.dpSpecWellConfiguredLocalIA(dp) && @@ -1351,6 +1354,7 @@ func (d *DataPlane) initMetrics( /*@ ghost dp io.DataPlaneSpec @*/ ) { // @ assert d.dpSpecWellConfiguredNeighborIAs(dp) // @ assert d.dpSpecWellConfiguredLinkTypes(dp) // @ fold d.Mem() + // @ reveal d.getDomExternal() // @ reveal d.WellConfigured() // @ assert reveal d.DpAgreesWithSpec(dp) } @@ -2233,7 +2237,7 @@ func (p *scionPacketProcessor) validateTransitUnderlaySrc( /*@ ghost ub []byte @ // @ ensures acc(&p.hopField, R20) // @ ensures acc(&p.ingressID, R21) // @ ensures acc(&p.segmentChange, R20) -// @ ensures acc(&p.d, R20) +// @ ensures acc(&p.d, R20) && acc(p.d.Mem(), _) // @ ensures p.d.validResult(respr, false) // @ ensures reserr == nil ==> respr === processResult{} // @ ensures reserr != nil ==> sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) @@ -2248,6 +2252,7 @@ func (p *scionPacketProcessor) validateTransitUnderlaySrc( /*@ ghost ub []byte @ // @ requires p.segmentChange ==> oldPkt.RightSeg != none[io.IO_seg2] && len(get(oldPkt.RightSeg).Past) > 0 // @ requires !p.segmentChange ==> AbsValidateIngressIDConstraint(oldPkt, path.ifsToIO_ifs(p.ingressID)) // @ requires p.segmentChange ==> AbsValidateIngressIDConstraintXover(oldPkt, path.ifsToIO_ifs(p.ingressID)) +// @ ensures reserr == nil ==> p.NoBouncingPkt(oldPkt) // @ ensures reserr == nil && !p.segmentChange ==> AbsValidateEgressIDConstraint(oldPkt, (p.ingressID != 0), dp) // @ ensures reserr == nil && p.segmentChange ==> oldPkt.RightSeg != none[io.IO_seg2] && len(get(oldPkt.RightSeg).Past) > 0 // @ ensures reserr == nil && p.segmentChange ==> p.ingressID != 0 && AbsValidateEgressIDConstraintXover(oldPkt, dp) @@ -2280,7 +2285,8 @@ func (p *scionPacketProcessor) validateEgressID( /*@ ghost oldPkt io.IO_pkt2, gh /*@ dp, @*/ ) } - + // @ p.d.getDomExternalLemma() + // @ p.EstablishNoBouncingPkt(oldPkt, pktEgressID) // @ p.d.getLinkTypesMem() ingress, egress := p.d.linkTypes[p.ingressID], p.d.linkTypes[pktEgressID] // @ p.d.LinkTypesLemma(dp) @@ -3294,19 +3300,20 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, // @ p.d.getExternalMem() // @ if p.d.external != nil { unfold acc(accBatchConn(p.d.external), _) } if c, ok := p.d.external[egressID]; ok { + // @ p.d.getDomExternalLemma() // @ p.d.EgressIDNotZeroLemma(egressID, dp) if err := p.processEgress( /*@ ub, dp @*/ ); err != nil { // @ fold p.d.validResult(processResult{}, false) return processResult{}, err /*@, false, absReturnErr(dp, processResult{}) @*/ } - // @ p.d.InDomainExternalInForwardingMetrics2(egressID) + // @ p.d.InDomainExternalInForwardingMetrics(egressID) // @ assert absPkt(dp, ub) == AbsProcessEgress(nextPkt) // @ nextPkt = absPkt(dp, ub) // @ TemporaryAssumeForIO(old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub)) // @ ghost if(slayers.IsSupportedPkt(ub)) { // @ ghost if(!p.segmentChange) { // enter/exit event - // @ ExternalEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, path.ifsToIO_ifs(egressID), ioLock, ioSharedArg, dp) + // @ ExternalEnterOrExitEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, path.ifsToIO_ifs(egressID), ioLock, ioSharedArg, dp) // @ } else { // xover event // @ XoverEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, path.ifsToIO_ifs(egressID), ioLock, ioSharedArg, dp) @@ -3316,26 +3323,22 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, // @ fold p.d.validResult(processResult{EgressID: egressID, OutConn: c, OutPkt: p.rawPkt}, false) return processResult{EgressID: egressID, OutConn: c, OutPkt: p.rawPkt}, nil /*@, false, newAbsPkt @*/ } + // @ p.d.getDomExternalLemma() + // @ p.IngressIDNotZeroLemma(nextPkt, egressID) // ASTransit: pkts leaving from another AS BR. // @ p.d.getInternalNextHops() // @ ghost if p.d.internalNextHops != nil { unfold acc(accAddr(p.d.internalNextHops), _) } if a, ok := p.d.internalNextHops[egressID]; ok { // @ p.d.getInternal() - // @ ghost if(path.ifsToIO_ifs(p.ingressID) != none[io.IO_ifs]) { - // @ TemporaryAssumeForIO(old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub)) - // @ ghost if(slayers.IsSupportedPkt(ub)) { - // @ if(!p.segmentChange) { - // enter event - // @ InternalEnterEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, none[io.IO_ifs], ioLock, ioSharedArg, dp) - // @ } else { - // xover event - // @ XoverEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, none[io.IO_ifs], ioLock, ioSharedArg, dp) - // @ } + // @ TemporaryAssumeForIO(old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub)) + // @ ghost if(slayers.IsSupportedPkt(ub)) { + // @ if(!p.segmentChange) { + // @ InternalEnterEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, none[io.IO_ifs], ioLock, ioSharedArg, dp) + // @ } else { + // @ XoverEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, none[io.IO_ifs], ioLock, ioSharedArg, dp) // @ } - // @ newAbsPkt = reveal absIO_val(dp, p.rawPkt, 0) - // @ } else { - // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4497") // @ } + // @ newAbsPkt = reveal absIO_val(dp, p.rawPkt, 0) // @ fold p.d.validResult(processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, false) return processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, nil /*@, false, newAbsPkt @*/ } @@ -3493,13 +3496,8 @@ func (p *scionPacketProcessor) processOHP( /* @ ghost dp io.DataPlaneSpec @ */ ) // @ p.d.getExternalMem() // @ ghost if p.d.external != nil { unfold acc(accBatchConn(p.d.external), _) } if c, ok := p.d.external[ohp.FirstHop.ConsEgress]; ok { - // (VerifiedSCION) the following must hold, obviously. - // Unfortunately, Gobra struggles with instantiating the body - // of the function. - // @ assume ohp.FirstHop.ConsEgress in p.d.getDomExternal() - // buffer should already be correct - // (VerifiedSCION) TODO: we need to add a pre to run that says that the - // domain of forwardingMetrics is the same as the one for external + // @ p.d.getDomExternalLemma() + // @ assert ohp.FirstHop.ConsEgress in p.d.getDomExternal() // @ p.d.InDomainExternalInForwardingMetrics(ohp.FirstHop.ConsEgress) // @ fold p.d.validResult(processResult{EgressID: ohp.FirstHop.ConsEgress, OutConn: c, OutPkt: p.rawPkt}, false) // @ TemporaryAssumeForIO(!slayers.IsSupportedPkt(p.rawPkt)) diff --git a/router/dataplane_spec.gobra b/router/dataplane_spec.gobra index 4fdf29163..5a5e28fc5 100644 --- a/router/dataplane_spec.gobra +++ b/router/dataplane_spec.gobra @@ -242,6 +242,7 @@ pure func (d *DataPlane) GetDomInternalNextHops() set[uint16] { } ghost +opaque requires acc(d.Mem(), _) decreases pure func (d *DataPlane) getDomExternal() set[uint16] { @@ -276,7 +277,7 @@ requires acc(d.Mem(), _) decreases pure func (d *DataPlane) WellConfigured() bool { return d.getDomNeighborIAs() == d.getDomExternal() && - d.getDomExternal() == d.getDomLinkTypes() && + d.getDomNeighborIAs() == d.getDomLinkTypes() && !(0 in d.getDomNeighborIAs()) && d.getDomExternal() subset d.getDomForwardingMetrics() } @@ -683,19 +684,6 @@ func (d *DataPlane) InDomainExternalInForwardingMetrics(id uint16) { reveal d.WellConfigured() } -ghost -requires acc(d.Mem(), _) && d.WellConfigured() -requires acc(&d.external, _) && acc(d.external, _) -requires id in domain(d.external) -ensures acc(d.Mem(), _) -ensures id in d.getDomForwardingMetrics() -decreases -func (d *DataPlane) InDomainExternalInForwardingMetrics2(id uint16) { - unfold acc(d.Mem(), _) - reveal d.WellConfigured() - unfold acc(accBatchConn(d.external), _) -} - ghost requires acc(d.Mem(), _) && d.WellConfigured() requires acc(&d.external, _) && acc(d.external, R55) @@ -706,6 +694,7 @@ ensures id in d.getDomForwardingMetrics() decreases func (d *DataPlane) InDomainExternalInForwardingMetrics3(id uint16) { reveal d.WellConfigured() + reveal d.getDomExternal() assert unfolding acc(d.Mem(), _) in (unfolding acc(accBatchConn(d.external), _) in true) } diff --git a/router/dataplane_spec_test.gobra b/router/dataplane_spec_test.gobra index 26c4fe81d..49b421d64 100644 --- a/router/dataplane_spec_test.gobra +++ b/router/dataplane_spec_test.gobra @@ -207,10 +207,10 @@ func testRun( assert d.dpSpecWellConfiguredLinkTypes(dp) fold d.Mem() - assert d.getDomNeighborIAs() == d.getDomExternal() - assert d.getDomExternal() == d.getDomLinkTypes() + assert d.getDomNeighborIAs() == reveal d.getDomExternal() + assert d.getDomNeighborIAs() == d.getDomLinkTypes() assert !(0 in d.getDomNeighborIAs()) - assert d.getDomExternal() intersection d.GetDomInternalNextHops() == set[uint16]{} + assert reveal d.getDomExternal() intersection d.GetDomInternalNextHops() == set[uint16]{} assert reveal d.DpAgreesWithSpec(dp) assert reveal d.PreWellConfigured() diff --git a/router/io-spec-abstract-transitions.gobra b/router/io-spec-abstract-transitions.gobra index ebb4aeb92..4faa43b52 100644 --- a/router/io-spec-abstract-transitions.gobra +++ b/router/io-spec-abstract-transitions.gobra @@ -25,6 +25,14 @@ import ( . "verification/utils/definitions" ) +ghost +requires len(pkt.CurrSeg.Future) > 0 +decreases +pure func CurrSegIO_ifs(pkt io.IO_pkt2, dir bool) option[io.IO_ifs] { + return let currseg := pkt.CurrSeg in + (currseg.ConsDir == dir ? currseg.Future[0].InIF2 : currseg.Future[0].EgIF2) +} + ghost opaque requires len(oldPkt.CurrSeg.Future) > 0 @@ -47,7 +55,7 @@ decreases pure func AbsValidateIngressIDConstraint(pkt io.IO_pkt2, ingressID option[io.IO_ifs]) bool { return let currseg := pkt.CurrSeg in ingressID != none[io.IO_ifs] ==> - ingressID == (currseg.ConsDir ? currseg.Future[0].InIF2 : currseg.Future[0].EgIF2) + ingressID == CurrSegIO_ifs(pkt, true) } ghost @@ -67,7 +75,7 @@ requires len(pkt.CurrSeg.Future) > 0 decreases pure func AbsEgressInterfaceConstraint(pkt io.IO_pkt2, egressID option[io.IO_ifs]) bool { return let currseg := pkt.CurrSeg in - egressID == (currseg.ConsDir ? currseg.Future[0].EgIF2 : currseg.Future[0].InIF2) + egressID == CurrSegIO_ifs(pkt, false) } ghost @@ -140,7 +148,9 @@ pure func AbsVerifyCurrentMACConstraint(pkt io.IO_pkt2, dp io.DataPlaneSpec) boo let uinfo := currseg.UInfo in dp.hf_valid(d, ts, uinfo, hf) } - +// This executes the IO enter event whenever a pkt was received +// from a different AS (ingressID != none[io.IO_ifs]) +// and will be forwarded to another border router within the AS (egressID == none[io.IO_ifs]) ghost requires dp.Valid() requires ingressID != none[io.IO_ifs] @@ -165,6 +175,12 @@ func InternalEnterEvent(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt i AtomicEnter(oldPkt, ingressID, newPkt, egressID, ioLock, ioSharedArg, dp) } +// Either this executes the IO enter event whenever a pkt was received +// from a different AS (ingressID != none[io.IO_ifs]) +// and will leave the AS (egressID != none[io.IO_ifs]) or +// it executes the IO exit event whenever a pkt was received from +// within the AS (ingressID == none[io.IO_ifs]) +// and will leave the AS (egressID != none[io.IO_ifs]) ghost requires dp.Valid() requires egressID != none[io.IO_ifs] @@ -180,7 +196,7 @@ preserves acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioShared ensures dp.Valid() ensures ElemWitness(ioSharedArg.OBufY, egressID, newPkt) decreases -func ExternalEvent(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock *sync.Mutex, ioSharedArg SharedArg, dp io.DataPlaneSpec) { +func ExternalEnterOrExitEvent(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock *sync.Mutex, ioSharedArg SharedArg, dp io.DataPlaneSpec) { reveal dp.Valid() nextPkt := reveal AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID) reveal AbsValidateIngressIDConstraint(oldPkt, ingressID) @@ -195,6 +211,9 @@ func ExternalEvent(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_ } } +// This executes the IO xover event whenever a pkt was received +// from a different AS (ingressID != none[io.IO_ifs]) +// and a segment switch was performed. ghost requires dp.Valid() requires ingressID != none[io.IO_ifs] diff --git a/router/io-spec-non-proven-lemmas.gobra b/router/io-spec-non-proven-lemmas.gobra index 6edcde280..b3b5f0adb 100644 --- a/router/io-spec-non-proven-lemmas.gobra +++ b/router/io-spec-non-proven-lemmas.gobra @@ -166,6 +166,50 @@ pure func (p* scionPacketProcessor) GetIsXoverSpec(ub []byte) bool { p.path.GetIsXoverSpec(ubPath) } +ghost +opaque +requires acc(&p.d, R55) && acc(p.d.Mem(), _) +requires acc(&p.ingressID, R55) +requires len(pkt.CurrSeg.Future) > 0 +decreases +pure func (p *scionPacketProcessor) NoBouncingPkt(pkt io.IO_pkt2) bool { + return let currseg := pkt.CurrSeg in + let OptEgressID := CurrSegIO_ifs(pkt, false) in + let egressID := path.IO_ifsToIfs(OptEgressID) in + ((egressID in p.d.getDomExternal()) || p.ingressID != 0) +} + +ghost +requires acc(&p.d, R55) && acc(p.d.Mem(), _) +requires acc(&p.ingressID, R55) +requires len(pkt.CurrSeg.Future) > 0 +requires AbsEgressInterfaceConstraint(pkt, path.ifsToIO_ifs(egressID)) +requires (egressID in p.d.getDomExternal()) || p.ingressID != 0 +ensures acc(&p.d, R55) && acc(p.d.Mem(), _) +ensures acc(&p.ingressID, R55) +ensures p.NoBouncingPkt(pkt) +decreases +func (p *scionPacketProcessor) EstablishNoBouncingPkt(pkt io.IO_pkt2, egressID uint16) { + reveal AbsEgressInterfaceConstraint(pkt, path.ifsToIO_ifs(egressID)) + reveal p.NoBouncingPkt(pkt) +} + +ghost +requires acc(&p.d, R55) && acc(p.d.Mem(), _) +requires acc(&p.ingressID, R55) +requires len(pkt.CurrSeg.Future) > 0 +requires AbsEgressInterfaceConstraint(pkt, path.ifsToIO_ifs(egressID)) +requires p.NoBouncingPkt(pkt) +requires !(egressID in p.d.getDomExternal()) +ensures acc(&p.d, R55) && acc(p.d.Mem(), _) +ensures acc(&p.ingressID, R55) +ensures p.ingressID != 0 +decreases +func (p *scionPacketProcessor) IngressIDNotZeroLemma(pkt io.IO_pkt2, egressID uint16) { + reveal AbsEgressInterfaceConstraint(pkt, path.ifsToIO_ifs(egressID)) + reveal p.NoBouncingPkt(pkt) +} + // TODO prove ghost requires 0 <= start && start <= end && end <= len(ub) diff --git a/router/io-spec.gobra b/router/io-spec.gobra index 456e9f73b..9b77fe93a 100644 --- a/router/io-spec.gobra +++ b/router/io-spec.gobra @@ -159,37 +159,33 @@ ghost requires acc(d.Mem(), _) requires d.DpAgreesWithSpec(dp) requires d.WellConfigured() -requires acc(&d.external, _) -requires acc(d.external, _) -requires egressID in domain(d.external) +requires egressID in d.getDomExternal() ensures egressID != 0 ensures io.IO_ifs(egressID) in domain(dp.GetNeighborIAs()) decreases func (d *DataPlane) EgressIDNotZeroLemma(egressID uint16, dp io.DataPlaneSpec) { reveal d.WellConfigured() reveal d.DpAgreesWithSpec(dp) - d.getDomExternalLemma() - assert d.getDomExternal() == domain(d.external) - assert egressID in d.getDomExternal() - assert d.getDomNeighborIAs() == d.getDomExternal() - assert io.IO_ifs(egressID) in domain(dp.GetNeighborIAs()) - assert egressID != 0 } ghost requires acc(d.Mem(), _) requires acc(&d.external, _) -requires acc(d.external, _) +requires d.external != nil ==> acc(d.external, _) ensures acc(d.Mem(), _) ensures acc(&d.external, _) -ensures acc(d.external, _) +ensures d.external != nil ==> acc(d.external, _) ensures d.getDomExternal() == domain(d.external) decreases func (d *DataPlane) getDomExternalLemma() { - assert d.external != nil - assert d.getDomExternal() == unfolding acc(d.Mem(), _) in - (unfolding acc(accBatchConn(d.external), _) in - domain(d.external)) + if (d.external != nil) { + assert reveal d.getDomExternal() == unfolding acc(d.Mem(), _) in + (unfolding acc(accBatchConn(d.external), _) in + domain(d.external)) + } else { + assert reveal d.getDomExternal() == + unfolding acc(d.Mem(), _) in set[uint16]{} + } } ghost From 3defe570e82d3755fac408bdd96d853297d68a73 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Mon, 6 May 2024 16:10:23 +0200 Subject: [PATCH 30/57] bring changes from #243 (#335) --- pkg/slayers/path/scion/base.go | 17 +++---------- pkg/slayers/path/scion/base_spec.gobra | 35 ++++++++++++++++++++------ pkg/slayers/scion.go | 10 ++++---- pkg/slayers/scion_spec.gobra | 4 +-- 4 files changed, 38 insertions(+), 28 deletions(-) diff --git a/pkg/slayers/path/scion/base.go b/pkg/slayers/path/scion/base.go index d74fe0c09..003a16d38 100644 --- a/pkg/slayers/path/scion/base.go +++ b/pkg/slayers/path/scion/base.go @@ -80,19 +80,10 @@ type Base struct { // @ requires s.NonInitMem() // @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R50) -// @ ensures r != nil ==> (s.NonInitMem() && r.ErrorMem()) -// @ ensures r == nil ==> ( -// @ s.Mem() && -// @ let lenD := len(data) in -// @ MetaLen <= lenD && -// @ let b0 := sl.GetByte(data, 0, lenD, 0) in -// @ let b1 := sl.GetByte(data, 0, lenD, 1) in -// @ let b2 := sl.GetByte(data, 0, lenD, 2) in -// @ let b3 := sl.GetByte(data, 0, lenD, 3) in -// @ let line := binary.BigEndian.Uint32Spec(b0, b1, b2, b3) in -// @ let metaHdr := DecodedFrom(line) in -// @ metaHdr == s.GetMetaHdr() && -// @ s.InfsMatchHfs()) +// @ ensures r != nil ==> +// @ s.NonInitMem() && r.ErrorMem() +// @ ensures r == nil ==> +// @ s.Mem() && s.DecodeFromBytesSpec(data) && s.InfsMatchHfs() // @ ensures len(data) < MetaLen ==> r != nil // @ decreases func (s *Base) DecodeFromBytes(data []byte) (r error) { diff --git a/pkg/slayers/path/scion/base_spec.gobra b/pkg/slayers/path/scion/base_spec.gobra index 48faeebcf..75620a731 100644 --- a/pkg/slayers/path/scion/base_spec.gobra +++ b/pkg/slayers/path/scion/base_spec.gobra @@ -40,10 +40,8 @@ pred (b *Base) Mem() { acc(&b.PathMeta.SegLen[0]) && acc(&b.PathMeta.SegLen[1]) && acc(&b.PathMeta.SegLen[2]) && - 0 <= b.NumINF && b.NumINF <= MaxINFs && - // the program defines 64 as the maximum number of hops, - // but this does not seem to be enforced anywhere. - 0 <= b.NumHops && // b.NumHops <= MaxHops && + 0 <= b.NumINF && b.NumINF <= MaxINFs && + 0 <= b.NumHops && b.NumHops <= MaxHops && (0 < b.NumINF ==> 0 < b.NumHops) } @@ -75,8 +73,13 @@ pure func (b Base) ValidCurrIdxsSpec() bool { (forall i int :: { b.PathMeta.SegLen[i] } 0 <= i && i < b.NumINF ==> b.PathMeta.SegLen[i] != 0) && (forall i int :: { b.PathMeta.SegLen[i] } b.NumINF <= i && i < MaxINFs ==> - b.PathMeta.SegLen[i] == 0) && - b.PathMeta.CurrINF == b.InfForHfSpec(b.PathMeta.CurrHF) + b.PathMeta.SegLen[i] == 0) +} + +ghost +decreases +pure func (b Base) CurrInfMatchesCurrHF() bool { + return b.PathMeta.CurrINF == b.InfForHfSpec(b.PathMeta.CurrHF) } ghost @@ -284,8 +287,15 @@ pure func (b AbsBase) ValidCurrIdxsSpec() bool { (forall i int :: { b.PathMeta.SegLen[i] } 0 <= i && i < b.NumINF ==> b.PathMeta.SegLen[i] != 0) && (forall i int :: { b.PathMeta.SegLen[i] } b.NumINF <= i && i < MaxINFs ==> - b.PathMeta.SegLen[i] == 0) && - b.PathMeta.CurrINF == b.InfForHfSpec(b.PathMeta.CurrHF) + b.PathMeta.SegLen[i] == 0) +} + +ghost +opaque +requires b.ValidCurrIdxsSpec() +decreases +pure func (b AbsBase) CurrInfMatchesCurrHF() bool { + return b.PathMeta.CurrINF == b.InfForHfSpec(b.PathMeta.CurrHF) } ghost @@ -363,6 +373,15 @@ pure func (m MetaHdr) DecodeFromBytesSpec(b []byte) bool { DecodedFrom(line) == m } +ghost +requires acc(s.Mem(), _) +requires acc(sl.AbsSlice_Bytes(b, 0, len(b)), _) +decreases +pure func (s *Base) DecodeFromBytesSpec(b []byte) bool { + return unfolding acc(s.Mem(), _) in + s.PathMeta.DecodeFromBytesSpec(b) +} + ghost decreases pure func (m MetaHdr) SegsInBounds() bool { diff --git a/pkg/slayers/scion.go b/pkg/slayers/scion.go index bb713a41d..cd8cd906c 100644 --- a/pkg/slayers/scion.go +++ b/pkg/slayers/scion.go @@ -418,11 +418,11 @@ func (s *SCION) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res er // @ fold s.NonInitMem() return err } - /*@ ghost if typeOf(s.Path) == type[*onehop.Path] { - s.Path.(*onehop.Path).InferSizeUb(data[offset : offset+pathLen]) - assert s.Path.Len(data[offset : offset+pathLen]) <= len(data[offset : offset+pathLen]) - assert CmnHdrLen + s.AddrHdrLenSpecInternal() + s.Path.Len(data[offset : offset+pathLen]) <= len(data) - } @*/ + // @ ghost if typeOf(s.Path) == type[*onehop.Path] { + // @ s.Path.(*onehop.Path).InferSizeUb(data[offset : offset+pathLen]) + // @ assert s.Path.Len(data[offset : offset+pathLen]) <= len(data[offset : offset+pathLen]) + // @ assert CmnHdrLen + s.AddrHdrLenSpecInternal() + s.Path.Len(data[offset : offset+pathLen]) <= len(data) + // @ } s.Contents = data[:hdrBytes] s.Payload = data[hdrBytes:] diff --git a/pkg/slayers/scion_spec.gobra b/pkg/slayers/scion_spec.gobra index 01bd159e6..e8f695a0f 100644 --- a/pkg/slayers/scion_spec.gobra +++ b/pkg/slayers/scion_spec.gobra @@ -408,8 +408,8 @@ requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) decreases pure func IsSupportedPkt(raw []byte) bool { return CmnHdrLen <= len(raw) && - let pathType := path.Type(GetPathType(raw)) in - let nextHdr := L4ProtocolType(GetNextHdr(raw)) in + let pathType := path.Type(GetPathType(raw)) in + let nextHdr := L4ProtocolType(GetNextHdr(raw)) in pathType == scion.PathType && nextHdr != L4SCMP } From 6626dfc4328c3388bda40592c60184a9d9509522 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Tue, 7 May 2024 13:34:51 +0200 Subject: [PATCH 31/57] Drop `trusted` annotation in method (#339) * drop trusted from method --- pkg/experimental/epic/epic.go | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/pkg/experimental/epic/epic.go b/pkg/experimental/epic/epic.go index 2a3c083f6..1eee3d3c5 100644 --- a/pkg/experimental/epic/epic.go +++ b/pkg/experimental/epic/epic.go @@ -107,10 +107,6 @@ func VerifyTimestamp(timestamp time.Time, epicTS uint32, now time.Time) (err err // If the same buffer is provided in subsequent calls to this function, the previously returned // EPIC MAC may get overwritten. Only the most recently returned EPIC MAC is guaranteed to be // valid. -// (VerifiedSCION) the following function is marked as trusted, even though it is verified, -// due to an incompletness of Gobra that keeps it from being able to prove that we have -// the magic wand at the end of a successful run. -// @ trusted // @ requires len(auth) == 16 // @ requires sl.AbsSlice_Bytes(buffer, 0, len(buffer)) // @ preserves acc(s.Mem(ub), R20) @@ -124,6 +120,8 @@ func VerifyTimestamp(timestamp time.Time, epicTS uint32, now time.Time) (err err func CalcMac(auth []byte, pktID epic.PktID, s *slayers.SCION, timestamp uint32, buffer []byte /*@ , ghost ub []byte @*/) (res []byte, reserr error) { + // @ ghost oldBuffer := buffer + // @ ghost allocatesNewBuffer := len(buffer) < MACBufferSize if len(buffer) < MACBufferSize { buffer = make([]byte, MACBufferSize) // @ fold sl.AbsSlice_Bytes(buffer, 0, len(buffer)) @@ -149,11 +147,14 @@ func CalcMac(auth []byte, pktID epic.PktID, s *slayers.SCION, // @ ghost end := start + 4 result := input[len(input)-f.BlockSize() : len(input)-f.BlockSize()+4] // @ sl.SplitRange_Bytes(input, start, end, writePerm) - // @ package (sl.AbsSlice_Bytes(result, 0, len(result)) --* sl.AbsSlice_Bytes(buffer, 0, len(buffer))) { - // @ sl.CombineRange_Bytes(input, start, end, writePerm) - // @ sl.CombineRange_Bytes(buffer, 0, inputLength, writePerm) + // @ package (sl.AbsSlice_Bytes(result, 0, len(result)) --* sl.AbsSlice_Bytes(oldBuffer, 0, len(oldBuffer))) { + // @ ghost if !allocatesNewBuffer { + // @ assert oldBuffer === buffer + // @ sl.CombineRange_Bytes(input, start, end, writePerm) + // @ sl.CombineRange_Bytes(oldBuffer, 0, inputLength, writePerm) + // @ } // @ } - // @ assert (sl.AbsSlice_Bytes(result, 0, len(result)) --* sl.AbsSlice_Bytes(buffer, 0, len(buffer))) + // @ assert (sl.AbsSlice_Bytes(result, 0, len(result)) --* sl.AbsSlice_Bytes(oldBuffer, 0, len(oldBuffer))) return result, nil } From 31d222180a8bab2c8d48496baef9028b1f3dcde4 Mon Sep 17 00:00:00 2001 From: Markus Limbeck <92801626+mlimbeck@users.noreply.github.com> Date: Tue, 7 May 2024 18:07:59 +0200 Subject: [PATCH 32/57] IO-spec lemmas (#334) * progress open io-lemmas * fix verification errors * refactoring * fix verification error --- pkg/slayers/scion_spec.gobra | 80 +++++- router/dataplane.go | 17 +- ...oven-lemmas.gobra => io-spec-lemmas.gobra} | 94 +++++-- router/widen-lemma.gobra | 259 +++++++----------- 4 files changed, 268 insertions(+), 182 deletions(-) rename router/{io-spec-non-proven-lemmas.gobra => io-spec-lemmas.gobra} (77%) diff --git a/pkg/slayers/scion_spec.gobra b/pkg/slayers/scion_spec.gobra index e8f695a0f..ea1877e3c 100644 --- a/pkg/slayers/scion_spec.gobra +++ b/pkg/slayers/scion_spec.gobra @@ -350,6 +350,58 @@ func (s *SCION) GetPath(ub []byte) path.Path { return unfolding acc(s.Mem(ub), _) in s.Path } +ghost +opaque +pure +requires acc(s.Mem(ub), _) +requires acc(sl.AbsSlice_Bytes(ub, 0, length), _) +requires CmnHdrLen <= length +decreases +func (s *SCION) ValidHeaderOffset(ub []byte, length int) bool { + return GetAddressOffsetWithinLength(ub, length) == s.PathStartIdx(ub) && + GetLengthWithinLength(ub,length) == s.PathEndIdx(ub) +} + +ghost +requires acc(s.Mem(ub), R56) +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R55) +requires acc(sl.AbsSlice_Bytes(ub, 0, length), R55) +requires CmnHdrLen <= length && length <= len(ub) +requires s.ValidHeaderOffset(ub, len(ub)) +ensures acc(s.Mem(ub), R56) +ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R55) +ensures acc(sl.AbsSlice_Bytes(ub, 0, length), R55) +ensures s.ValidHeaderOffset(ub, length) +decreases +func (s *SCION) ValidHeaderOffsetToSubSliceLemma(ub []byte, length int) { + reveal s.ValidHeaderOffset(ub, len(ub)) + unfold acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) + unfold acc(sl.AbsSlice_Bytes(ub, 0, length), R56) + assert reveal s.ValidHeaderOffset(ub, length) + fold acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) + fold acc(sl.AbsSlice_Bytes(ub, 0, length), R56) +} + +ghost +requires acc(s.Mem(ub), R56) +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R55) +requires acc(sl.AbsSlice_Bytes(ub, 0, length), R55) +requires CmnHdrLen <= length && length <= len(ub) +requires s.ValidHeaderOffset(ub, length) +ensures acc(s.Mem(ub), R56) +ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R55) +ensures acc(sl.AbsSlice_Bytes(ub, 0, length), R55) +ensures s.ValidHeaderOffset(ub, len(ub)) +decreases +func (s *SCION) ValidHeaderOffsetFromSubSliceLemma(ub []byte, length int) { + reveal s.ValidHeaderOffset(ub, len(ub)) + unfold acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) + unfold acc(sl.AbsSlice_Bytes(ub, 0, length), R56) + assert reveal s.ValidHeaderOffset(ub, length) + fold acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) + fold acc(sl.AbsSlice_Bytes(ub, 0, length), R56) +} + ghost opaque pure @@ -371,8 +423,12 @@ func (s *SCION) EqAbsHeader(ub []byte) bool { &ub[low:high][k] == &ub[low + k]) in let _ := Asserting(forall k int :: {&ub[low:high][:scion.MetaLen][k]} 0 <= k && k < scion.MetaLen ==> &ub[low:high][:scion.MetaLen][k] == &ub[low:high][k]) in - s.Path.(*scion.Raw).Base.GetMetaHdr() == - scion.DecodedFrom(binary.BigEndian.Uint32(ub[low:high][:scion.MetaLen])) + let metaHdr := scion.DecodedFrom(binary.BigEndian.Uint32(ub[low:high][:scion.MetaLen])) in + let seg1 := int(metaHdr.SegLen[0]) in + let seg2 := int(metaHdr.SegLen[1]) in + let seg3 := int(metaHdr.SegLen[2]) in + s.Path.(*scion.Raw).Base.GetBase() == + scion.Base{metaHdr, scion.NumInfoFields(seg1, seg2, seg3), seg1 + seg2 + seg3} } // Checks if the common path header is valid in the serialized scion packet. @@ -419,7 +475,15 @@ requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) requires CmnHdrLen <= len(ub) decreases pure func GetAddressOffset(ub []byte) int { - return unfolding acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) in + return GetAddressOffsetWithinLength(ub, len(ub)) +} + +ghost +requires acc(sl.AbsSlice_Bytes(ub, 0, length), _) +requires CmnHdrLen <= length +decreases +pure func GetAddressOffsetWithinLength(ub []byte, length int) int { + return unfolding acc(sl.AbsSlice_Bytes(ub, 0, length), _) in let dstAddrLen := AddrType(ub[9] >> 4 & 0x7).Length() in let srcAddrLen := AddrType(ub[9] & 0x7).Length() in CmnHdrLen + 2*addr.IABytes + dstAddrLen + srcAddrLen @@ -430,7 +494,15 @@ requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) requires CmnHdrLen <= len(ub) decreases pure func GetLength(ub []byte) int { - return unfolding acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) in int(ub[5])*LineLen + return GetLengthWithinLength(ub, len(ub)) +} + +ghost +requires acc(sl.AbsSlice_Bytes(ub, 0, length), _) +requires CmnHdrLen <= length +decreases +pure func GetLengthWithinLength(ub []byte, length int) int { + return unfolding acc(sl.AbsSlice_Bytes(ub, 0, length), _) in int(ub[5])*LineLen } ghost diff --git a/router/dataplane.go b/router/dataplane.go index 520d61858..8268067e7 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -2087,9 +2087,10 @@ func (p *scionPacketProcessor) validateSrcDstIA( /*@ ghost ubScionL []byte, ghos // @ ghost startP := p.scionLayer.PathStartIdx(ubScionL) // @ ghost endP := p.scionLayer.PathEndIdx(ubScionL) // @ ghost ubPath := ubScionL[startP:endP] - // @ sl.SplitRange_Bytes(ubScionL, startP, endP, R55) + // @ sl.SplitRange_Bytes(ubScionL, startP, endP, R50) // @ p.AbsPktToSubSliceAbsPkt(ubScionL, startP, endP, dp) - // @ ghost defer sl.CombineRange_Bytes(ubScionL, startP, endP, R55) + // @ p.scionLayer.ValidHeaderOffsetToSubSliceLemma(ubScionL, startP) + // @ ghost defer sl.CombineRange_Bytes(ubScionL, startP, endP, R50) // @ unfold acc(p.scionLayer.HeaderMem(ubScionL[slayers.CmnHdrLen:]), R20) // @ defer fold acc(p.scionLayer.HeaderMem(ubScionL[slayers.CmnHdrLen:]), R20) // @ p.d.getLocalIA() @@ -2120,6 +2121,7 @@ func (p *scionPacketProcessor) validateSrcDstIA( /*@ ghost ubScionL []byte, ghos } // @ ghost if(p.path.IsLastHopSpec(ubPath)) { // @ p.path.LastHopLemma(ubPath, dp) + // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ubScionL, startP) // @ p.SubSliceAbsPktToAbsPkt(ubScionL, startP, endP, dp) // @ } } @@ -2398,12 +2400,14 @@ func (p *scionPacketProcessor) updateNonConsDirIngressSegID( /*@ ghost ub []byte // @ assume 0 <= p.path.GetCurrINF(ubPath) // @ sl.SplitRange_Bytes(ub, start, end, HalfPerm) // @ p.AbsPktToSubSliceAbsPkt(ub, start, end, dp) + // @ p.scionLayer.ValidHeaderOffsetToSubSliceLemma(ub, start) // @ sl.SplitRange_Bytes(ub, start, end, HalfPerm) if err := p.path.SetInfoField(p.infoField, int( /*@ unfolding acc(p.path.Mem(ubPath), R45) in (unfolding acc(p.path.Base.Mem(), R50) in @*/ p.path.PathMeta.CurrINF) /*@ ) , ubPath , dp@*/); err != nil { // @ ghost sl.CombineRange_Bytes(ub, start, end, writePerm) return serrors.WrapStr("update info field", err) } // @ ghost sl.CombineRange_Bytes(ub, start, end, HalfPerm) + // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, start) // @ p.SubSliceAbsPktToAbsPkt(ub, start, end, dp) // @ ghost sl.CombineRange_Bytes(ub, start, end, HalfPerm) // @ absPktFutureLemma(dp, ub) @@ -2587,9 +2591,11 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte, ghost dp io.D // @ unfold acc(p.scionLayer.Mem(ub), 1-R55) // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) // @ p.AbsPktToSubSliceAbsPkt(ub, startP, endP, dp) + // @ p.scionLayer.ValidHeaderOffsetToSubSliceLemma(ub, startP) // @ reveal p.EqAbsInfoField(absPkt(dp, ub)) // @ reveal p.EqAbsHopField(absPkt(dp, ub)) // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) + // @ reveal p.scionLayer.ValidHeaderOffset(ub, startP) // @ unfold acc(p.scionLayer.Mem(ub), R55) // we are the egress router and if we go in construction direction we // need to update the SegID. @@ -2616,8 +2622,10 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte, ghost dp io.D return serrors.WrapStr("incrementing path", err) } // @ fold acc(p.scionLayer.Mem(ub), R55) + // @ assert reveal p.scionLayer.ValidHeaderOffset(ub, startP) // @ ghost sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) // @ TemporaryAssumeForIO(dp.Valid() && scion.validPktMetaHdr(ubPath) && p.path.EqAbsHeader(ubPath)) + // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, startP) // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP, dp) // @ ghost sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) // @ absPktFutureLemma(dp, ub) @@ -2664,6 +2672,7 @@ func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte, ghost dp io.DataPla // @ unfold acc(p.scionLayer.Mem(ub), 1-R55) // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) // @ p.AbsPktToSubSliceAbsPkt(ub, startP, endP, dp) + // @ p.scionLayer.ValidHeaderOffsetToSubSliceLemma(ub, startP) // @ TemporaryAssumeForIO(len(old(absPkt(dp, ub)).CurrSeg.Future) == 1) // @ reveal p.EqAbsInfoField(absPkt(dp, ub)) // @ reveal p.EqAbsHopField(absPkt(dp, ub)) @@ -2870,6 +2879,7 @@ func (p *scionPacketProcessor) handleIngressRouterAlert( /*@ ghost ub []byte, gh // @ reveal p.LastHopLen(ub, dp) // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) // @ p.AbsPktToSubSliceAbsPkt(ub, startP, endP, dp) + // @ p.scionLayer.ValidHeaderOffsetToSubSliceLemma(ub, startP) // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) if err := p.path.SetHopField(p.hopField, int( /*@ unfolding acc(p.path.Mem(ubPath), R50) in (unfolding acc(p.path.Base.Mem(), R55) in @*/ p.path.PathMeta.CurrHF /*@ ) @*/) /*@ , ubPath @*/); err != nil { // @ sl.CombineRange_Bytes(ub, startP, endP, writePerm) @@ -2879,6 +2889,7 @@ func (p *scionPacketProcessor) handleIngressRouterAlert( /*@ ghost ub []byte, gh // @ sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) // @ assert p.DstIsLocalIngressID(ub) // @ TemporaryAssumeForIO(dp.Valid() && scion.validPktMetaHdr(ubPath) && p.path.EqAbsHeader(ubPath)) // postcondition of SetHopfield + // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, startP) // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP, dp) // @ absPktFutureLemma(dp, ub) // @ TemporaryAssumeForIO(p.EqAbsHopField(absPkt(dp, ub))) // postcondition of SetHopfield @@ -2978,6 +2989,7 @@ func (p *scionPacketProcessor) handleEgressRouterAlert( /*@ ghost ub []byte, gho // @ assume 0 <= p.path.GetCurrHF(ubPath) // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) // @ p.AbsPktToSubSliceAbsPkt(ub, startP, endP, dp) + // @ p.scionLayer.ValidHeaderOffsetToSubSliceLemma(ub, startP) // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) if err := p.path.SetHopField(p.hopField, int( /*@ unfolding acc(p.path.Mem(ubPath), R50) in (unfolding acc(p.path.Base.Mem(), R55) in @*/ p.path.PathMeta.CurrHF /*@ ) @*/) /*@ , ubPath @*/); err != nil { // @ sl.CombineRange_Bytes(ub, startP, endP, writePerm) @@ -2986,6 +2998,7 @@ func (p *scionPacketProcessor) handleEgressRouterAlert( /*@ ghost ub []byte, gho } // @ sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) // @ TemporaryAssumeForIO(dp.Valid() && scion.validPktMetaHdr(ubPath) && p.path.EqAbsHeader(ubPath)) // postcondition of SetHopfield + // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, startP) // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP, dp) // @ absPktFutureLemma(dp, ub) // @ TemporaryAssumeForIO(p.EqAbsHopField(absPkt(dp, ub))) // postcondition of SetHopfield diff --git a/router/io-spec-non-proven-lemmas.gobra b/router/io-spec-lemmas.gobra similarity index 77% rename from router/io-spec-non-proven-lemmas.gobra rename to router/io-spec-lemmas.gobra index b3b5f0adb..018c4cec1 100644 --- a/router/io-spec-non-proven-lemmas.gobra +++ b/router/io-spec-lemmas.gobra @@ -210,12 +210,11 @@ func (p *scionPacketProcessor) IngressIDNotZeroLemma(pkt io.IO_pkt2, egressID ui reveal p.NoBouncingPkt(pkt) } -// TODO prove ghost requires 0 <= start && start <= end && end <= len(ub) requires acc(p.scionLayer.Mem(ub), R55) -requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R55) -requires acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R55) +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R50) +requires acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R50) requires acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) requires p.path === p.scionLayer.GetPath(ub) requires dp.Valid() @@ -223,40 +222,62 @@ requires slayers.ValidPktMetaHdr(ub) requires start == p.scionLayer.PathStartIdx(ub) requires end == p.scionLayer.PathEndIdx(ub) requires p.scionLayer.EqAbsHeader(ub) -ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R55) +ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R50) ensures acc(p.scionLayer.Mem(ub), R55) -ensures acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R55) +ensures acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R50) ensures acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) ensures dp.Valid() ensures slayers.ValidPktMetaHdr(ub) +ensures p.scionLayer.EqAbsHeader(ub) ensures start == p.scionLayer.PathStartIdx(ub) ensures end == p.scionLayer.PathEndIdx(ub) ensures scion.validPktMetaHdr(ub[start:end]) ensures p.path.EqAbsHeader(ub[start:end]) +ensures p.scionLayer.ValidHeaderOffset(ub, len(ub)) ensures absPkt(dp, ub) == p.path.absPkt(dp, ub[start:end]) decreases -func (p* scionPacketProcessor) AbsPktToSubSliceAbsPkt(ub []byte, start int, end int, dp io.DataPlaneSpec) -/* { - reveal validPktMetaHdr(ub) - reveal p.scionLayer.EqAbsHeader(ub) +func (p* scionPacketProcessor) AbsPktToSubSliceAbsPkt(ub []byte, start int, end int, dp io.DataPlaneSpec) { unfold acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) unfold acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R56) + reveal slayers.ValidPktMetaHdr(ub) + reveal p.scionLayer.EqAbsHeader(ub) assert reveal scion.validPktMetaHdr(ub[start:end]) unfold acc(p.scionLayer.Mem(ub), R56) - assert p.scionLayer.Path.(*scion.Raw).EqAbsHeader(ub[start:end]) + reveal p.scionLayer.ValidHeaderOffset(ub, len(ub)) assert p.path.EqAbsHeader(ub[start:end]) fold acc(p.scionLayer.Mem(ub), R56) - assert reveal absPkt(dp, ub) == reveal p.path.absPkt(dp, ub[start:end]) + assert start == slayers.GetAddressOffset(ub) + + hdr1 := binary.BigEndian.Uint32(ub[start:start+scion.MetaLen]) + hdr2 := binary.BigEndian.Uint32(ub[start:end][:scion.MetaLen]) + assert hdr1 == hdr2 + hdr := hdr1 fold acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R56) fold acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) -}*/ -// TODO prove + metaHdr := scion.DecodedFrom(hdr) + currINFIdx := int(metaHdr.CurrINF) + currHFIdx := int(metaHdr.CurrHF) + seg1Len := int(metaHdr.SegLen[0]) + seg2Len := int(metaHdr.SegLen[1]) + seg3Len := int(metaHdr.SegLen[2]) + segLen := scion.LengthOfCurrSeg(currHFIdx, seg1Len, seg2Len, seg3Len) + prevSegLen := scion.LengthOfPrevSeg(currHFIdx, seg1Len, seg2Len, seg3Len) + numINF := scion.NumInfoFields(seg1Len, seg2Len, seg3Len) + offset := scion.HopFieldOffset(numINF, 0, start) + + currSegWidenLemma(ub, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, start, start, end) + leftSegWidenLemma(ub, currINFIdx + 1, seg1Len, seg2Len, seg3Len, start, start, end) + midSegWidenLemma(ub, currINFIdx + 2, seg1Len, seg2Len, seg3Len, start, start, end) + rightSegWidenLemma(ub, currINFIdx - 1, seg1Len, seg2Len, seg3Len, start, start, end) + assert reveal absPkt(dp, ub) == reveal p.path.absPkt(dp, ub[start:end]) +} + ghost requires 0 <= start && start <= end && end <= len(ub) requires acc(p.scionLayer.Mem(ub), R55) -requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R55) -requires acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R55) +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R50) +requires acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R50) requires acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) requires p.path === p.scionLayer.GetPath(ub) requires dp.Valid() @@ -264,9 +285,10 @@ requires scion.validPktMetaHdr(ub[start:end]) requires start == p.scionLayer.PathStartIdx(ub) requires end == p.scionLayer.PathEndIdx(ub) requires p.path.EqAbsHeader(ub[start:end]) -ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R55) +requires p.scionLayer.ValidHeaderOffset(ub, len(ub)) +ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R50) ensures acc(p.scionLayer.Mem(ub), R55) -ensures acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R55) +ensures acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R50) ensures acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) ensures dp.Valid() ensures slayers.ValidPktMetaHdr(ub) @@ -276,7 +298,43 @@ ensures scion.validPktMetaHdr(ub[start:end]) ensures p.scionLayer.EqAbsHeader(ub) ensures absPkt(dp, ub) == p.path.absPkt(dp, ub[start:end]) decreases -func (p* scionPacketProcessor) SubSliceAbsPktToAbsPkt(ub []byte, start int, end int, dp io.DataPlaneSpec) +func (p* scionPacketProcessor) SubSliceAbsPktToAbsPkt(ub []byte, start int, end int, dp io.DataPlaneSpec){ + unfold acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) + unfold acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R56) + unfold acc(p.scionLayer.Mem(ub), R56) + unfold acc(p.scionLayer.Path.Mem(ub[start:end]), R56) + reveal p.scionLayer.ValidHeaderOffset(ub, len(ub)) + assert reveal p.scionLayer.EqAbsHeader(ub) + fold acc(p.scionLayer.Path.Mem(ub[start:end]), R56) + fold acc(p.scionLayer.Mem(ub), R56) + reveal scion.validPktMetaHdr(ub[start:end]) + assert reveal slayers.ValidPktMetaHdr(ub) + assert start == slayers.GetAddressOffset(ub) + + hdr1 := binary.BigEndian.Uint32(ub[start:start+scion.MetaLen]) + hdr2 := binary.BigEndian.Uint32(ub[start:end][:scion.MetaLen]) + assert hdr1 == hdr2 + hdr := hdr1 + fold acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R56) + fold acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) + + metaHdr := scion.DecodedFrom(hdr) + currINFIdx := int(metaHdr.CurrINF) + currHFIdx := int(metaHdr.CurrHF) + seg1Len := int(metaHdr.SegLen[0]) + seg2Len := int(metaHdr.SegLen[1]) + seg3Len := int(metaHdr.SegLen[2]) + segLen := scion.LengthOfCurrSeg(currHFIdx, seg1Len, seg2Len, seg3Len) + prevSegLen := scion.LengthOfPrevSeg(currHFIdx, seg1Len, seg2Len, seg3Len) + numINF := scion.NumInfoFields(seg1Len, seg2Len, seg3Len) + offset := scion.HopFieldOffset(numINF, 0, start) + + currSegWidenLemma(ub, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, start, start, end) + leftSegWidenLemma(ub, currINFIdx + 1, seg1Len, seg2Len, seg3Len, start, start, end) + midSegWidenLemma(ub, currINFIdx + 2, seg1Len, seg2Len, seg3Len, start, start, end) + rightSegWidenLemma(ub, currINFIdx - 1, seg1Len, seg2Len, seg3Len, start, start, end) + assert reveal absPkt(dp, ub) == reveal p.path.absPkt(dp, ub[start:end]) +} ghost opaque diff --git a/router/widen-lemma.gobra b/router/widen-lemma.gobra index 199e24241..9e6673455 100644 --- a/router/widen-lemma.gobra +++ b/router/widen-lemma.gobra @@ -118,156 +118,100 @@ func absPktWidenLemma(dp io.DataPlaneSpec, raw []byte, length int) { reveal slayers.ValidPktMetaHdr(raw) reveal slayers.ValidPktMetaHdr(raw[:length]) + unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) + unfold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) headerOffset1 := slayers.GetAddressOffset(raw) headerOffset2 := slayers.GetAddressOffset(raw[:length]) - - hdr1 := unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) in binary.BigEndian.Uint32(raw[headerOffset1:headerOffset1+scion.MetaLen]) - hdr2 := unfolding acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) in binary.BigEndian.Uint32(raw[:length][headerOffset2:headerOffset2+scion.MetaLen]) - assert unfolding acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) in binary.BigEndian.Uint32(raw[:length][headerOffset2:headerOffset2+scion.MetaLen]) == unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) in binary.BigEndian.Uint32(raw[headerOffset1:headerOffset1+scion.MetaLen]) + assert headerOffset1 == headerOffset2 + headerOffset := headerOffset1 + hdr1 := binary.BigEndian.Uint32(raw[headerOffset:headerOffset+scion.MetaLen]) + hdr2 := binary.BigEndian.Uint32(raw[:length][headerOffset:headerOffset+scion.MetaLen]) assert hdr1 == hdr2 - - metaHdr1 := scion.DecodedFrom(hdr1) - metaHdr2 := scion.DecodedFrom(hdr2) - assert metaHdr1 == metaHdr2 - - currINFIdx1 := int(metaHdr1.CurrINF) - currINFIdx2 := int(metaHdr2.CurrINF) - assert currINFIdx1 == currINFIdx2 - - currHFIdx1 := int(metaHdr1.CurrHF) - currHFIdx2 := int(metaHdr2.CurrHF) - assert currHFIdx1 == currHFIdx2 - - seg1Len1 := int(metaHdr1.SegLen[0]) - seg1Len2 := int(metaHdr2.SegLen[0]) - assert seg1Len1 == seg1Len2 - - seg2Len1 := int(metaHdr1.SegLen[1]) - seg2Len2 := int(metaHdr2.SegLen[1]) - assert seg2Len1 == seg2Len2 - - seg3Len1 := int(metaHdr1.SegLen[2]) - seg3Len2 := int(metaHdr2.SegLen[2]) - assert seg3Len1 == seg3Len2 - - segLen1 := scion.LengthOfCurrSeg(currHFIdx1, seg1Len1, seg2Len1, seg3Len1) - segLen2 := scion.LengthOfCurrSeg(currHFIdx2, seg1Len2, seg2Len2, seg3Len2) - assert segLen1 == segLen2 - - prevSegLen1 := scion.LengthOfPrevSeg(currHFIdx1, seg1Len1, seg2Len1, seg3Len1) - prevSegLen2 := scion.LengthOfPrevSeg(currHFIdx2, seg1Len2, seg2Len2, seg3Len2) - assert prevSegLen1 == prevSegLen2 - - numINF1 := scion.NumInfoFields(seg1Len1, seg2Len1, seg3Len1) - numINF2 := scion.NumInfoFields(seg1Len2, seg2Len2, seg3Len2) - assert numINF1 == numINF2 - - offset1 := scion.HopFieldOffset(numINF1, 0, headerOffset1) - offset2 := scion.HopFieldOffset(numINF2, 0, headerOffset2) - assert offset1 == offset2 - - currSegWidenLemma(raw, offset1+prevSegLen1, currINFIdx1, currHFIdx1-prevSegLen1, segLen1, headerOffset1, length) - currSeg1 := scion.CurrSeg(raw, offset1+prevSegLen1, currINFIdx1, currHFIdx1-prevSegLen1, segLen1, headerOffset1) - currSeg2 := scion.CurrSeg(raw[:length], offset2+prevSegLen2, currINFIdx2, currHFIdx2-prevSegLen2, segLen2, headerOffset2) - assert currSeg1 == currSeg2 - - leftSegWidenLemma(raw, currINFIdx1 + 1, seg1Len1, seg2Len1 , seg3Len1, headerOffset1, length) - leftSeg1 := scion.LeftSeg(raw, currINFIdx1 + 1, seg1Len1, seg2Len1 , seg3Len1, headerOffset1) - leftSeg2 := scion.LeftSeg(raw[:length], currINFIdx2 + 1, seg1Len2, seg2Len2 , seg3Len2, headerOffset2) - assert leftSeg1 == leftSeg2 - - midSegWidenLemma(raw, currINFIdx1 + 2, seg1Len1, seg2Len1 , seg3Len1, headerOffset1, length) - midSeg1 := scion.MidSeg(raw, currINFIdx1 + 2, seg1Len1, seg2Len1 , seg3Len1, headerOffset1) - midSeg2 := scion.MidSeg(raw[:length], currINFIdx2 + 2, seg1Len2, seg2Len2 , seg3Len2, headerOffset2) - assert midSeg1 == midSeg2 - - rightSegWidenLemma(raw, currINFIdx1 - 1, seg1Len1, seg2Len1 , seg3Len1, headerOffset1, length) - rightSeg1 := scion.RightSeg(raw, currINFIdx1 - 1, seg1Len1, seg2Len1 , seg3Len1, headerOffset1) - rightSeg2 := scion.RightSeg(raw[:length], currINFIdx2 - 1, seg1Len2, seg2Len2 , seg3Len2, headerOffset2) - assert rightSeg1 == rightSeg2 - - ret1 := io.IO_pkt2(io.IO_Packet2{ - CurrSeg : currSeg1, - LeftSeg : leftSeg1, - MidSeg : midSeg1, - RightSeg : rightSeg1, - }) - ret2 := io.IO_pkt2(io.IO_Packet2{ - CurrSeg : currSeg2, - LeftSeg : leftSeg2, - MidSeg : midSeg2, - RightSeg : rightSeg2, - }) - - reveal absPkt(dp, raw) - reveal absPkt(dp, raw[:length]) - assert ret1 == absPkt(dp, raw) - assert ret2 == absPkt(dp, raw[:length]) - assert ret1 == ret2 + hdr := hdr1 + fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) + fold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) + + metaHdr := scion.DecodedFrom(hdr) + currINFIdx := int(metaHdr.CurrINF) + currHFIdx := int(metaHdr.CurrHF) + seg1Len := int(metaHdr.SegLen[0]) + seg2Len := int(metaHdr.SegLen[1]) + seg3Len := int(metaHdr.SegLen[2]) + segLen := scion.LengthOfCurrSeg(currHFIdx, seg1Len, seg2Len, seg3Len) + prevSegLen := scion.LengthOfPrevSeg(currHFIdx, seg1Len, seg2Len, seg3Len) + numINF := scion.NumInfoFields(seg1Len, seg2Len, seg3Len) + offset := scion.HopFieldOffset(numINF, 0, headerOffset) + + currSegWidenLemma(raw, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, headerOffset, 0, length) + leftSegWidenLemma(raw, currINFIdx + 1, seg1Len, seg2Len, seg3Len, headerOffset, 0, length) + midSegWidenLemma(raw, currINFIdx + 2, seg1Len, seg2Len, seg3Len, headerOffset, 0, length) + rightSegWidenLemma(raw, currINFIdx - 1, seg1Len, seg2Len, seg3Len, headerOffset, 0, length) + + assert reveal absPkt(dp, raw) == reveal absPkt(dp, raw[:length]) } ghost -requires 0 <= headerOffset +requires 0 <= start && start <= headerOffset requires path.InfoFieldOffset(currINFIdx, headerOffset) + path.InfoLen <= offset requires 0 < segLen requires offset + path.HopLen * segLen <= length -requires length <= len(raw) +requires length <= len(raw) requires 0 <= currHFIdx && currHFIdx <= segLen requires 0 <= currINFIdx && currINFIdx < 3 preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) -preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +preserves acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R51) ensures scion.CurrSeg(raw, offset, currINFIdx, currHFIdx, segLen, headerOffset) == - scion.CurrSeg(raw[:length], offset, currINFIdx, currHFIdx, segLen, headerOffset) + scion.CurrSeg(raw[start:length], offset-start, currINFIdx, currHFIdx, segLen, headerOffset-start) decreases -func currSegWidenLemma(raw []byte, offset int, currINFIdx int, currHFIdx int, segLen int, headerOffset int, length int) { +func currSegWidenLemma(raw []byte, offset int, currINFIdx int, currHFIdx int, segLen int, headerOffset int, start int, length int) { unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) - unfold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R53) + unfold acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R53) ainfo1 := path.Timestamp(raw, currINFIdx, headerOffset) - ainfo2 := path.Timestamp(raw[:length], currINFIdx, headerOffset) + ainfo2 := path.Timestamp(raw[start:length], currINFIdx, headerOffset-start) assert ainfo1 == ainfo2 uinfo1 := path.AbsUinfo(raw, currINFIdx, headerOffset) - uinfo2 := path.AbsUinfo(raw[:length], currINFIdx, headerOffset) + uinfo2 := path.AbsUinfo(raw[start:length], currINFIdx, headerOffset-start) assert uinfo1 == uinfo2 consDir1 := path.ConsDir(raw, currINFIdx, headerOffset) - consDir2 := path.ConsDir(raw[:length], currINFIdx, headerOffset) + consDir2 := path.ConsDir(raw[start:length], currINFIdx, headerOffset-start) assert consDir1 == consDir2 peer1 := path.Peer(raw, currINFIdx, headerOffset) - peer2 := path.Peer(raw[:length], currINFIdx, headerOffset) + peer2 := path.Peer(raw[start:length], currINFIdx, headerOffset-start) assert peer1 == peer2 - segmentWidenLemma(raw, offset, currHFIdx, ainfo1, uinfo1, consDir1, peer1, segLen, length) + segmentWidenLemma(raw, offset, currHFIdx, ainfo1, uinfo1, consDir1, peer1, segLen, start, length) ret1 := scion.segment(raw, offset, currHFIdx, ainfo1, uinfo1, consDir1, peer1, segLen) - ret2 := scion.segment(raw[:length], offset, currHFIdx, ainfo2, uinfo2, consDir2, peer2, segLen) + ret2 := scion.segment(raw[start:length], offset-start, currHFIdx, ainfo2, uinfo2, consDir2, peer2, segLen) assert ret1 == reveal scion.CurrSeg(raw, offset, currINFIdx, currHFIdx, segLen, headerOffset) - assert ret2 == reveal scion.CurrSeg(raw[:length], offset, currINFIdx, currHFIdx, segLen, headerOffset) + assert ret2 == reveal scion.CurrSeg(raw[start:length], offset-start, currINFIdx, currHFIdx, segLen, headerOffset-start) assert ret1 == ret2 fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) - fold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R53) + fold acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R53) } ghost -requires 0 <= offset +requires 0 <= start && start <= offset requires 0 < segLen requires 0 <= currHFIdx && currHFIdx <= segLen requires length <= len(raw) requires offset + path.HopLen * segLen <= length requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R52) -requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R52) +requires acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R52) ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R52) -ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R52) +ensures acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R52) ensures scion.segment(raw, offset, currHFIdx, ainfo, uinfo, consDir, peer, segLen) == - scion.segment(raw[:length], offset, currHFIdx, ainfo, uinfo, consDir, peer, segLen) + scion.segment(raw[start:length], offset-start, currHFIdx, ainfo, uinfo, consDir, peer, segLen) decreases -func segmentWidenLemma(raw []byte, offset int, currHFIdx int, ainfo io.IO_ainfo, uinfo set[io.IO_msgterm], consDir bool, peer bool, segLen int, length int) { +func segmentWidenLemma(raw []byte, offset int, currHFIdx int, ainfo io.IO_ainfo, uinfo set[io.IO_msgterm], consDir bool, peer bool, segLen int, start int, length int) { newP := (R52 + R53)/2 assert R53 < newP && newP < R52 - hopFieldsWidenLemma(raw, offset, 0, segLen, length, newP) + hopFieldsWidenLemma(raw, offset, 0, segLen, start, length, newP) hopfields1 := scion.hopFields(raw, offset, 0, segLen) - hopfields2 := scion.hopFields(raw[:length], offset, 0, segLen) + hopfields2 := scion.hopFields(raw[start:length], offset-start, 0, segLen) assert hopfields1 == hopfields2 ret1 := io.IO_seg2(io.IO_seg3_{ @@ -289,102 +233,102 @@ func segmentWidenLemma(raw []byte, offset int, currHFIdx int, ainfo io.IO_ainfo, History : scion.segHistory(hopfields2, currHFIdx - 1), }) assert ret1 == scion.segment(raw, offset, currHFIdx, ainfo, uinfo, consDir, peer, segLen) - assert ret2 == scion.segment(raw[:length], offset, currHFIdx, ainfo, uinfo, consDir, peer, segLen) + assert ret2 == scion.segment(raw[start:length], offset-start, currHFIdx, ainfo, uinfo, consDir, peer, segLen) assert ret1 == ret2 } ghost -requires 0 <= middle +requires 0 <= start && start <= middle requires middle + path.HopLen <= length requires length <= len(raw) preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R54) -preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R54) +preserves acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R54) ensures path.BytesToIO_HF(raw, 0, middle, len(raw)) == - path.BytesToIO_HF(raw[:length], 0, middle, length) + path.BytesToIO_HF(raw[start:length], 0, middle-start, length-start) decreases -func BytesToIO_HFWidenLemma(raw []byte, middle int, length int) { +func BytesToIO_HFWidenLemma(raw []byte, middle int, start int, length int) { unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) - unfold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R55) + unfold acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R55) hfBytes1 := path.BytesToIO_HF(raw, 0, middle, len(raw)) - hfBytes2 := path.BytesToIO_HF(raw[:length], 0, middle, length) + hfBytes2 := path.BytesToIO_HF(raw[start:length], 0, middle-start, length-start) assert hfBytes1.EgIF2 == hfBytes2.EgIF2 assert hfBytes1.InIF2 == hfBytes2.InIF2 assert hfBytes1.HVF == hfBytes2.HVF fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) - fold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R55) + fold acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R55) } ghost requires R53 < p -requires 0 <= offset +requires 0 <= start && start <= offset requires 0 <= currHFIdx && currHFIdx <= segLen requires offset + path.HopLen * segLen <= length requires length <= len(raw) preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), p) -preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), p) +preserves acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), p) ensures scion.hopFields(raw, offset, currHFIdx, segLen) == - scion.hopFields(raw[:length], offset, currHFIdx, segLen) + scion.hopFields(raw[start:length], offset-start, currHFIdx, segLen) decreases segLen - currHFIdx -func hopFieldsWidenLemma(raw []byte, offset int, currHFIdx int, segLen int, length int, p perm) { +func hopFieldsWidenLemma(raw []byte, offset int, currHFIdx int, segLen int, start int, length int, p perm) { if currHFIdx == segLen { ret := seq[io.IO_HF]{} assert ret == scion.hopFields(raw, offset, currHFIdx, segLen) - assert ret == scion.hopFields(raw[:length], offset, currHFIdx, segLen) + assert ret == scion.hopFields(raw[start:length], offset - start, currHFIdx, segLen) } else { - BytesToIO_HFWidenLemma(raw, offset + path.HopLen * currHFIdx, length) + BytesToIO_HFWidenLemma(raw, offset + path.HopLen * currHFIdx, start, length) hf1 := path.BytesToIO_HF(raw, 0, offset + path.HopLen * currHFIdx, len(raw)) - hf2 := path.BytesToIO_HF(raw[:length], 0, offset + path.HopLen * currHFIdx, length) + hf2 := path.BytesToIO_HF(raw[start:length], 0, offset + path.HopLen * currHFIdx - start, length - start) assert hf1 == hf2 newP := (p + R53)/2 assert R53 < newP && newP < p - hopFieldsWidenLemma(raw, offset, currHFIdx + 1, segLen, length, newP) + hopFieldsWidenLemma(raw, offset, currHFIdx + 1, segLen, start, length, newP) ret1 := seq[io.IO_HF]{hf1} ++ scion.hopFields(raw, offset, currHFIdx + 1, segLen) - ret2 := seq[io.IO_HF]{hf2} ++ scion.hopFields(raw[:length], offset, currHFIdx + 1, segLen) + ret2 := seq[io.IO_HF]{hf2} ++ scion.hopFields(raw[start:length], offset-start, currHFIdx + 1, segLen) assert ret1 == scion.hopFields(raw, offset, currHFIdx, segLen) - assert ret2 == scion.hopFields(raw[:length], offset, currHFIdx, segLen) + assert ret2 == scion.hopFields(raw[start:length], offset-start, currHFIdx, segLen) assert ret1 == ret2 } } ghost -requires 0 <= headerOffset +requires 0 <= start && start <= headerOffset requires 0 < seg1Len requires 0 <= seg2Len requires 0 <= seg3Len requires 0 <= length && length <= len(raw) requires scion.pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= length -requires 1 <= currINFIdx && currINFIdx < 4 +requires 1 <= currINFIdx && currINFIdx < 4 preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) -preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +preserves acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R51) ensures scion.LeftSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) == - scion.LeftSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) + scion.LeftSeg(raw[start:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset-start) decreases -func leftSegWidenLemma(raw []byte, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, headerOffset int, length int) { +func leftSegWidenLemma(raw []byte, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, headerOffset int, start int, length int) { offset := scion.HopFieldOffset(scion.NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) if currINFIdx == 1 && seg2Len > 0 { - currSegWidenLemma(raw, offset + path.HopLen * seg1Len, currINFIdx, 0, seg2Len, headerOffset, length) + currSegWidenLemma(raw, offset + path.HopLen * seg1Len, currINFIdx, 0, seg2Len, headerOffset, start, length) ret1 := some(scion.CurrSeg(raw, offset + path.HopLen * seg1Len, currINFIdx, 0, seg2Len, headerOffset)) - ret2 := some(scion.CurrSeg(raw[:length], offset + path.HopLen * seg1Len, currINFIdx, 0, seg2Len, headerOffset)) + ret2 := some(scion.CurrSeg(raw[start:length], offset + path.HopLen * seg1Len - start, currINFIdx, 0, seg2Len, headerOffset - start)) assert ret1 == reveal scion.LeftSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) - assert ret2 == reveal scion.LeftSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) + assert ret2 == reveal scion.LeftSeg(raw[start:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset-start) assert ret1 == ret2 } else if currINFIdx == 2 && seg2Len > 0 && seg3Len > 0 { - currSegWidenLemma(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, seg3Len, headerOffset, length) + currSegWidenLemma(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, seg3Len, headerOffset, start, length) ret1 := some(scion.CurrSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, seg3Len, headerOffset)) - ret2 := some(scion.CurrSeg(raw[:length], offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, seg3Len, headerOffset)) + ret2 := some(scion.CurrSeg(raw[start:length], offset + path.HopLen * (seg1Len + seg2Len) - start, currINFIdx, 0, seg3Len, headerOffset - start)) assert ret1 == reveal scion.LeftSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) - assert ret2 == reveal scion.LeftSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) + assert ret2 == reveal scion.LeftSeg(raw[start:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset - start) assert ret1 == ret2 } else { ret := none[io.IO_seg3] assert ret == reveal scion.LeftSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) - assert ret == reveal scion.LeftSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) + assert ret == reveal scion.LeftSeg(raw[start:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset- start) } } ghost -requires 0 <= headerOffset +requires 0 <= start && start <= headerOffset requires 0 < seg1Len requires 0 <= seg2Len requires 0 <= seg3Len @@ -392,36 +336,35 @@ requires 0 <= length && length <= len(raw) requires scion.pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= length requires -1 <= currINFIdx && currINFIdx < 2 preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) -preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +preserves acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R51) ensures scion.RightSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) == - scion.RightSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) + scion.RightSeg(raw[start:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset-start) decreases -func rightSegWidenLemma(raw []byte, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, headerOffset int, length int) { +func rightSegWidenLemma(raw []byte, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, headerOffset int, start int, length int) { offset := scion.HopFieldOffset(scion.NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) if currINFIdx == 1 && seg2Len > 0 && seg3Len > 0 { - currSegWidenLemma(raw, offset + path.HopLen * seg1Len, currINFIdx, seg2Len, seg2Len, headerOffset, length) + currSegWidenLemma(raw, offset + path.HopLen * seg1Len, currINFIdx, seg2Len, seg2Len, headerOffset, start, length) ret1 := some(scion.CurrSeg(raw, offset + path.HopLen * seg1Len, currINFIdx, seg2Len, seg2Len, headerOffset)) - ret2 := some(scion.CurrSeg(raw[:length], offset + path.HopLen * seg1Len, currINFIdx, seg2Len, seg2Len, headerOffset)) + ret2 := some(scion.CurrSeg(raw[start:length], offset + path.HopLen * seg1Len - start, currINFIdx, seg2Len, seg2Len, headerOffset - start)) assert ret1 == reveal scion.RightSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) - assert ret2 == reveal scion.RightSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) + assert ret2 == reveal scion.RightSeg(raw[start:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset - start) assert ret1 == ret2 } else if currINFIdx == 0 && seg2Len > 0 { - currSegWidenLemma(raw, offset, currINFIdx, seg1Len, seg1Len, headerOffset, length) + currSegWidenLemma(raw, offset, currINFIdx, seg1Len, seg1Len, headerOffset, start, length) ret1 := some(scion.CurrSeg(raw, offset, currINFIdx, seg1Len, seg1Len, headerOffset)) - ret2 := some(scion.CurrSeg(raw[:length], offset, currINFIdx, seg1Len, seg1Len, headerOffset)) + ret2 := some(scion.CurrSeg(raw[start:length], offset - start, currINFIdx, seg1Len, seg1Len, headerOffset - start)) assert ret1 == reveal scion.RightSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) - assert ret2 == reveal scion.RightSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) + assert ret2 == reveal scion.RightSeg(raw[start:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset - start) assert ret1 == ret2 } else { ret := none[io.IO_seg3] assert ret == reveal scion.RightSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) - assert ret == reveal scion.RightSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) + assert ret == reveal scion.RightSeg(raw[start:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset - start) } } - ghost -requires 0 <= headerOffset +requires 0 <= start && start <= headerOffset requires 0 < seg1Len requires 0 <= seg2Len requires 0 <= seg3Len @@ -429,29 +372,29 @@ requires 2 <= currINFIdx && currINFIdx < 5 requires 0 <= length && length <= len(raw) requires scion.pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= length preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) -preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +preserves acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R51) ensures scion.MidSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) == - scion.MidSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) + scion.MidSeg(raw[start:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset - start) decreases -func midSegWidenLemma(raw []byte, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, headerOffset int, length int) { +func midSegWidenLemma(raw []byte, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, headerOffset int, start int, length int) { offset := scion.HopFieldOffset(scion.NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) if currINFIdx == 4 && seg2Len > 0 { - currSegWidenLemma(raw, offset, 0, seg1Len, seg1Len, headerOffset, length) + currSegWidenLemma(raw, offset, 0, seg1Len, seg1Len, headerOffset, start, length) ret1 := some(scion.CurrSeg(raw, offset, 0, seg1Len, seg1Len, headerOffset)) - ret2 := some(scion.CurrSeg(raw[:length], offset, 0, seg1Len, seg1Len, headerOffset)) + ret2 := some(scion.CurrSeg(raw[start:length], offset - start, 0, seg1Len, seg1Len, headerOffset - start)) assert ret1 == reveal scion.MidSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) - assert ret2 == reveal scion.MidSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) + assert ret2 == reveal scion.MidSeg(raw[start:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset - start) assert ret1 == ret2 } else if currINFIdx == 2 && seg2Len > 0 && seg3Len > 0 { - currSegWidenLemma(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, seg3Len, headerOffset, length) + currSegWidenLemma(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, seg3Len, headerOffset, start, length) ret1 := some(scion.CurrSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, seg3Len, headerOffset)) - ret2 := some(scion.CurrSeg(raw[:length], offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, seg3Len, headerOffset)) + ret2 := some(scion.CurrSeg(raw[start:length], offset + path.HopLen * (seg1Len + seg2Len) - start, currINFIdx, 0, seg3Len, headerOffset - start)) assert ret1 == reveal scion.MidSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) - assert ret2 == reveal scion.MidSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) + assert ret2 == reveal scion.MidSeg(raw[start:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset - start) assert ret1 == ret2 } else { ret := none[io.IO_seg3] assert ret == reveal scion.MidSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) - assert ret == reveal scion.MidSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) + assert ret == reveal scion.MidSeg(raw[start:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset - start) } -} +} \ No newline at end of file From 37fe92f1ed13d1875f8b66a46c2abe52318504aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Tue, 7 May 2024 22:53:19 +0200 Subject: [PATCH 33/57] Enable `conditionalizePermissions` for the `router` (#340) Marco observed that a long time is spent on (sequential) pure function verification in the router package. He also suggested that using `conditionalizePermissions` might reduce the number of branches in these functions (`moreJoins 1` does not have any effect on pure functions), which might speed up verification. --- .github/workflows/gobra.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/gobra.yml b/.github/workflows/gobra.yml index d65b8277e..7cc509301 100644 --- a/.github/workflows/gobra.yml +++ b/.github/workflows/gobra.yml @@ -417,7 +417,7 @@ jobs: checkConsistency: ${{ env.checkConsistency }} chop: 10 parallelizeBranches: '1' - conditionalizePermissions: '0' + conditionalizePermissions: '1' moreJoins: 'impure' imageVersion: ${{ env.imageVersion }} mceMode: 'on' From e939e6d0143450df69042d9ddba45a4e0d9e06c9 Mon Sep 17 00:00:00 2001 From: Markus Limbeck <92801626+mlimbeck@users.noreply.github.com> Date: Thu, 9 May 2024 14:56:40 +0200 Subject: [PATCH 34/57] Refactoring of absPkt (#341) * drop DataPlaneSpec param from absPkt * headerOffset change in absPkt * fix syntax error * fix verification errors * fix verification errors * fix LastHopLemma --- pkg/slayers/path/infofield_spec.gobra | 4 +- pkg/slayers/path/scion/base_spec_test.gobra | 7 - pkg/slayers/path/scion/raw.go | 10 +- pkg/slayers/path/scion/raw_spec.gobra | 33 +- pkg/slayers/scion_spec.gobra | 3 +- router/dataplane.go | 481 +++++++++----------- router/io-spec-abstract-transitions.gobra | 4 - router/io-spec-lemmas.gobra | 83 ++-- router/io-spec.gobra | 29 +- router/widen-lemma.gobra | 41 +- 10 files changed, 318 insertions(+), 377 deletions(-) diff --git a/pkg/slayers/path/infofield_spec.gobra b/pkg/slayers/path/infofield_spec.gobra index b0da954d4..acc19895b 100644 --- a/pkg/slayers/path/infofield_spec.gobra +++ b/pkg/slayers/path/infofield_spec.gobra @@ -23,12 +23,10 @@ import ( . "verification/utils/definitions" ) -ghost const MetaLen = 4 - ghost decreases pure func InfoFieldOffset(currINF, headerOffset int) int { - return headerOffset + MetaLen + InfoLen * currINF + return headerOffset + InfoLen * currINF } ghost diff --git a/pkg/slayers/path/scion/base_spec_test.gobra b/pkg/slayers/path/scion/base_spec_test.gobra index 5d07d6a53..9516e2cf6 100644 --- a/pkg/slayers/path/scion/base_spec_test.gobra +++ b/pkg/slayers/path/scion/base_spec_test.gobra @@ -23,11 +23,4 @@ import ( func canAllocateBase() { b := &Base{} fold b.Mem() -} - -ghost -ensures res -decreases -pure func validMetaLenInPath() (res bool) { - return MetaLen == path.MetaLen } \ No newline at end of file diff --git a/pkg/slayers/path/scion/raw.go b/pkg/slayers/path/scion/raw.go index 7414b11d9..8c73fce79 100644 --- a/pkg/slayers/path/scion/raw.go +++ b/pkg/slayers/path/scion/raw.go @@ -323,7 +323,7 @@ func (s *Raw) GetCurrentInfoField( /*@ ghost ubuf []byte @*/ ) (res path.InfoFie // @ requires sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) // @ requires acc(s.Mem(ubuf), R20) // pres for IO: -// @ requires dp.Valid() && validPktMetaHdr(ubuf) && s.EqAbsHeader(ubuf) +// @ requires validPktMetaHdr(ubuf) && s.EqAbsHeader(ubuf) // @ ensures acc(s.Mem(ubuf), R20) // @ ensures sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) // @ ensures r != nil ==> r.ErrorMem() @@ -331,11 +331,11 @@ func (s *Raw) GetCurrentInfoField( /*@ ghost ubuf []byte @*/ ) (res path.InfoFie // @ ensures r == nil && idx == int(old(s.GetCurrINF(ubuf))) ==> // @ validPktMetaHdr(ubuf) && s.EqAbsHeader(ubuf) // @ ensures r == nil && idx == int(old(s.GetCurrINF(ubuf))) ==> -// @ let oldPkt := old(s.absPkt(dp, ubuf)) in +// @ let oldPkt := old(s.absPkt(ubuf)) in // @ let newPkt := AbsSetInfoField(oldPkt, info.ToIntermediateAbsInfoField()) in -// @ s.absPkt(dp, ubuf) == newPkt +// @ s.absPkt(ubuf) == newPkt // @ decreases -func (s *Raw) SetInfoField(info path.InfoField, idx int /*@, ghost ubuf []byte, ghost dp io.DataPlaneSpec@*/) (r error) { +func (s *Raw) SetInfoField(info path.InfoField, idx int /*@, ghost ubuf []byte @*/) (r error) { //@ share info //@ ghost oldCurrINF := int(old(s.GetCurrINF(ubuf))) //@ unfold acc(s.Mem(ubuf), R50) @@ -375,7 +375,7 @@ func (s *Raw) SetInfoField(info path.InfoField, idx int /*@, ghost ubuf []byte, //@ fold acc(s.Base.Mem(), R50) //@ fold acc(s.Mem(ubuf), R50) //@ assert idx == oldCurrINF ==> reveal validPktMetaHdr(ubuf) - //@ TemporaryAssumeForIO(idx == oldCurrINF ==> s.absPkt(dp, ubuf) == AbsSetInfoField(old(s.absPkt(dp, ubuf)), info.ToIntermediateAbsInfoField())) + //@ TemporaryAssumeForIO(idx == oldCurrINF ==> s.absPkt(ubuf) == AbsSetInfoField(old(s.absPkt(ubuf)), info.ToIntermediateAbsInfoField())) return ret } diff --git a/pkg/slayers/path/scion/raw_spec.gobra b/pkg/slayers/path/scion/raw_spec.gobra index 848f1d808..9cec61b93 100644 --- a/pkg/slayers/path/scion/raw_spec.gobra +++ b/pkg/slayers/path/scion/raw_spec.gobra @@ -532,14 +532,12 @@ pure func MidSeg( ghost opaque -requires dp.Valid() requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) requires validPktMetaHdr(raw) decreases -pure func (s *Raw) absPkt(dp io.DataPlaneSpec, raw []byte) (res io.IO_pkt2) { +pure func (s *Raw) absPkt(raw []byte) (res io.IO_pkt2) { return let _ := reveal validPktMetaHdr(raw) in - let hdr := (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in binary.BigEndian.Uint32(raw[:MetaLen])) in - let metaHdr := DecodedFrom(hdr) in + let metaHdr := RawBytesToMetaHdr(raw) in let currINFIdx := int(metaHdr.CurrINF) in let currHFIdx := int(metaHdr.CurrHF) in let seg1Len := int(metaHdr.SegLen[0]) in @@ -548,12 +546,12 @@ pure func (s *Raw) absPkt(dp io.DataPlaneSpec, raw []byte) (res io.IO_pkt2) { let segLen := LengthOfCurrSeg(currHFIdx, seg1Len, seg2Len, seg3Len) in let prevSegLen := LengthOfPrevSeg(currHFIdx, seg1Len, seg2Len, seg3Len) in let numINF := NumInfoFields(seg1Len, seg2Len, seg3Len) in - let offset := HopFieldOffset(numINF, 0, 0) in + let offset := HopFieldOffset(numINF, 0, MetaLen) in io.IO_pkt2(io.IO_Packet2{ - CurrSeg : CurrSeg(raw, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, 0), - LeftSeg : LeftSeg(raw, currINFIdx + 1, seg1Len, seg2Len , seg3Len, 0), - MidSeg : MidSeg(raw, currINFIdx + 2, seg1Len, seg2Len , seg3Len, 0), - RightSeg : RightSeg(raw, currINFIdx - 1, seg1Len, seg2Len , seg3Len, 0), + CurrSeg : CurrSeg(raw, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, MetaLen), + LeftSeg : LeftSeg(raw, currINFIdx + 1, seg1Len, seg2Len , seg3Len, MetaLen), + MidSeg : MidSeg(raw, currINFIdx + 2, seg1Len, seg2Len , seg3Len, MetaLen), + RightSeg : RightSeg(raw, currINFIdx - 1, seg1Len, seg2Len , seg3Len, MetaLen), }) } @@ -596,7 +594,7 @@ pure func validPktMetaHdr(raw []byte) bool { let base := RawBytesToBase(raw) in 0 < metaHdr.SegLen[0] && base.ValidCurrIdxsSpec() && - pktLen(seg1, seg2, seg3, 0) <= len(raw) + pktLen(seg1, seg2, seg3, MetaLen) <= len(raw) } ghost @@ -644,16 +642,13 @@ ghost preserves acc(s.Mem(ubuf), R55) preserves s.IsLastHopSpec(ubuf) preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R56) -preserves dp.Valid() preserves validPktMetaHdr(ubuf) preserves s.EqAbsHeader(ubuf) -ensures len(s.absPkt(dp, ubuf).CurrSeg.Future) == 1 +ensures len(s.absPkt(ubuf).CurrSeg.Future) == 1 decreases -func (s *Raw) LastHopLemma(ubuf []byte, dp io.DataPlaneSpec) { +func (s *Raw) LastHopLemma(ubuf []byte) { reveal validPktMetaHdr(ubuf) - hdr := (unfolding acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R56) in - binary.BigEndian.Uint32(ubuf[:MetaLen])) - metaHdr := DecodedFrom(hdr) + metaHdr := RawBytesToMetaHdr(ubuf) currINFIdx := int(metaHdr.CurrINF) currHFIdx := int(metaHdr.CurrHF) seg1Len := int(metaHdr.SegLen[0]) @@ -662,8 +657,8 @@ func (s *Raw) LastHopLemma(ubuf []byte, dp io.DataPlaneSpec) { segLen := LengthOfCurrSeg(currHFIdx, seg1Len, seg2Len, seg3Len) prevSegLen := LengthOfPrevSeg(currHFIdx, seg1Len, seg2Len, seg3Len) numINF := NumInfoFields(seg1Len, seg2Len, seg3Len) - offset := HopFieldOffset(numINF, 0, 0) - pkt := reveal s.absPkt(dp, ubuf) - assert pkt.CurrSeg == reveal CurrSeg(ubuf, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, 0) + offset := HopFieldOffset(numINF, 0, MetaLen) + pkt := reveal s.absPkt(ubuf) + assert pkt.CurrSeg == reveal CurrSeg(ubuf, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, MetaLen) assert len(pkt.CurrSeg.Future) == 1 } \ No newline at end of file diff --git a/pkg/slayers/scion_spec.gobra b/pkg/slayers/scion_spec.gobra index ea1877e3c..db41783ec 100644 --- a/pkg/slayers/scion_spec.gobra +++ b/pkg/slayers/scion_spec.gobra @@ -452,10 +452,9 @@ pure func ValidPktMetaHdr(raw []byte) bool { let seg2 := int(metaHdr.SegLen[1]) in let seg3 := int(metaHdr.SegLen[2]) in let base := scion.Base{metaHdr, scion.NumInfoFields(seg1, seg2, seg3), seg1+seg2+seg3} in - metaHdr.InBounds() && 0 < metaHdr.SegLen[0] && base.ValidCurrIdxsSpec() && - scion.pktLen(seg1, seg2, seg3, start) <= length + scion.pktLen(seg1, seg2, seg3, start + scion.MetaLen) <= length } ghost diff --git a/router/dataplane.go b/router/dataplane.go index 8268067e7..3148da9cd 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -133,16 +133,15 @@ type BatchConn interface { // contracts for IO-spec // @ requires Prophecy(prophecyM) // @ requires io.token(place) && MultiReadBio(place, prophecyM) - // @ preserves dp.Valid() // @ ensures err != nil ==> prophecyM == 0 // @ ensures err == nil ==> prophecyM == n // @ ensures io.token(old(MultiReadBioNext(place, prophecyM))) // @ ensures old(MultiReadBioCorrectIfs(place, prophecyM, path.ifsToIO_ifs(ingressID))) // @ ensures err == nil ==> // @ forall i int :: { &msgs[i] } 0 <= i && i < n ==> - // @ MsgToAbsVal(dp, &msgs[i], ingressID) == old(MultiReadBioIO_val(place, n)[i]) + // @ MsgToAbsVal(&msgs[i], ingressID) == old(MultiReadBioIO_val(place, n)[i]) // TODO (VerifiedSCION): uint16 or option[io.IO_ifs] for ingress - ReadBatch(msgs underlayconn.Messages /*@, ghost ingressID uint16, ghost prophecyM int, ghost place io.Place, ghost dp io.DataPlaneSpec @*/) (n int, err error) + ReadBatch(msgs underlayconn.Messages /*@, ghost ingressID uint16, ghost prophecyM int, ghost place io.Place @*/) (n int, err error) // @ requires acc(addr.Mem(), _) // @ requires acc(Mem(), _) // @ preserves acc(sl.AbsSlice_Bytes(b, 0, len(b)), R10) @@ -158,14 +157,12 @@ type BatchConn interface { // @ ensures err == nil ==> 0 <= n && n <= len(msgs) // @ ensures err != nil ==> err.ErrorMem() // contracts for IO-spec - // @ requires dp.Valid() - // @ requires MsgToAbsVal(dp, &msgs[0], egressID) == ioAbsPkts + // @ requires MsgToAbsVal(&msgs[0], egressID) == ioAbsPkts // @ requires io.token(place) && io.CBioIO_bio3s_send(place, ioAbsPkts) - // @ ensures dp.Valid() // (VerifiedSCION) the permission to the protocol must always be returned, otherwise the router could not continue // after failing to send a packet. // @ ensures io.token(old(io.dp3s_iospec_bio3s_send_T(place, ioAbsPkts))) - WriteBatch(msgs underlayconn.Messages, flags int /*@, ghost egressID uint16, ghost place io.Place, ghost ioAbsPkts io.IO_val, ghost dp io.DataPlaneSpec @*/) (n int, err error) + WriteBatch(msgs underlayconn.Messages, flags int /*@, ghost egressID uint16, ghost place io.Place, ghost ioAbsPkts io.IO_val @*/) (n int, err error) // @ requires Mem() // @ ensures err != nil ==> err.ErrorMem() // @ decreases @@ -896,18 +893,18 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ ghost tN := MultiReadBioNext(t, numberOfReceivedPacketsProphecy) // @ assert dp.dp3s_iospec_ordered(sN, tN) // @ BeforeReadBatch: - pkts, err := rd.ReadBatch(msgs /*@, ingressID, numberOfReceivedPacketsProphecy, t , dp @*/) + pkts, err := rd.ReadBatch(msgs /*@, ingressID, numberOfReceivedPacketsProphecy, t @*/) // @ assert old[BeforeReadBatch](MultiReadBioIO_val(t, numberOfReceivedPacketsProphecy)) == ioValSeq // @ assert err == nil ==> // @ forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> // @ ioValSeq[i] == old[BeforeReadBatch](MultiReadBioIO_val(t, numberOfReceivedPacketsProphecy)[i]) // @ assert err == nil ==> - // @ forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> MsgToAbsVal(dp, &msgs[i], ingressID) == ioValSeq[i] + // @ forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> MsgToAbsVal(&msgs[i], ingressID) == ioValSeq[i] // @ ghost *ioSharedArg.State = sN // @ ghost *ioSharedArg.Place = tN // @ assert err == nil ==> // @ forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> - // @ MsgToAbsVal(dp, &msgs[i], ingressID) == old[BeforeReadBatch](MultiReadBioIO_val(t, numberOfReceivedPacketsProphecy)[i]) + // @ MsgToAbsVal(&msgs[i], ingressID) == old[BeforeReadBatch](MultiReadBioIO_val(t, numberOfReceivedPacketsProphecy)[i]) // @ MultiElemWitnessConv(ioSharedArg.IBufY, ioIngressID, ioValSeq) // @ fold SharedInv!< dp, ioSharedArg !>() // @ ioLock.Unlock() @@ -930,7 +927,7 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ assert forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> // @ msgs[i].GetN() <= len(msgs[i].GetFstBuffer()) // @ assert forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> - // @ MsgToAbsVal(dp, &msgs[i], ingressID) == ioValSeq[i] + // @ MsgToAbsVal(&msgs[i], ingressID) == ioValSeq[i] // (VerifiedSCION) using regular for loop instead of range loop to avoid unnecessary // complications with permissions @@ -960,7 +957,7 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ invariant ioIngressID == path.ifsToIO_ifs(ingressID) // @ invariant acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; // @ invariant forall i int :: { &msgs[i] } i0 <= i && i < pkts ==> - // @ MsgToAbsVal(dp, &msgs[i], ingressID) == ioValSeq[i] + // @ MsgToAbsVal(&msgs[i], ingressID) == ioValSeq[i] // @ invariant MultiElemWitnessWithIndex(ioSharedArg.IBufY, ioIngressID, ioValSeq, i0) // @ decreases pkts - i0 for i0 := 0; i0 < pkts; i0++ { @@ -996,10 +993,10 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ assert p.N <= len(p.Buffers[0]) // @ sl.SplitRange_Bytes(p.Buffers[0], 0, p.N, HalfPerm) tmpBuf := p.Buffers[0][:p.N] - // @ ghost absPktTmpBuf := absIO_val(dp, tmpBuf, ingressID) - // @ ghost absPktBuf0 := absIO_val(dp, msgs[i0].Buffers[0], ingressID) + // @ ghost absPktTmpBuf := absIO_val(tmpBuf, ingressID) + // @ ghost absPktBuf0 := absIO_val(msgs[i0].Buffers[0], ingressID) // @ assert msgs[i0] === p - // @ absIO_valWidenLemma(dp, p.Buffers[0], ingressID, p.N) + // @ absIO_valWidenLemma(p.Buffers[0], ingressID, p.N) // @ assert absPktTmpBuf.isIO_val_Pkt2 ==> absPktTmpBuf === absPktBuf0 // @ MultiElemWitnessStep(ioSharedArg.IBufY, ioIngressID, ioValSeq, i0) // @ assert ioValSeq[i0].isIO_val_Pkt2 ==> @@ -1079,8 +1076,8 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta writeMsgs[0].Addr = result.OutAddr } // @ sl.NilAcc_Bytes() - // @ assert absIO_val(dp, result.OutPkt, result.EgressID) == absIO_val(dp, writeMsgs[0].Buffers[0], result.EgressID) - // @ assert result.OutPkt != nil ==> newAbsPkt == absIO_val(dp, writeMsgs[0].Buffers[0], result.EgressID) + // @ assert absIO_val(result.OutPkt, result.EgressID) == absIO_val(writeMsgs[0].Buffers[0], result.EgressID) + // @ assert result.OutPkt != nil ==> newAbsPkt == absIO_val(writeMsgs[0].Buffers[0], result.EgressID) // @ fold acc(writeMsgs[0].Mem(), R50) // @ ghost ioLock.Lock() @@ -1095,7 +1092,7 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ unfold dp.dp3s_iospec_bio3s_send(s, t) // @ io.TriggerBodyIoSend(newAbsPkt) // @ ghost tN := io.dp3s_iospec_bio3s_send_T(t, newAbsPkt) - _, err = result.OutConn.WriteBatch(writeMsgs, syscall.MSG_DONTWAIT /*@, result.EgressID, t, newAbsPkt, dp @*/) + _, err = result.OutConn.WriteBatch(writeMsgs, syscall.MSG_DONTWAIT /*@, result.EgressID, t, newAbsPkt @*/) // @ ghost *ioSharedArg.Place = tN // @ fold SharedInv!< dp, ioSharedArg !>() // @ ghost ioLock.Unlock() @@ -1446,11 +1443,10 @@ func (p *scionPacketProcessor) reset() (err error) { // contracts for IO-spec // @ requires dp.Valid() // @ requires acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; -// @ requires let absPkt := absIO_val(dp, rawPkt, p.getIngressID()) in +// @ requires let absPkt := absIO_val(rawPkt, p.getIngressID()) in // @ absPkt.isIO_val_Pkt2 ==> ElemWitness(ioSharedArg.IBufY, path.ifsToIO_ifs(p.getIngressID()), absPkt.IO_val_Pkt2_2) -// @ ensures dp.Valid() // @ ensures respr.OutPkt != nil ==> -// @ newAbsPkt == absIO_val(dp, respr.OutPkt, respr.EgressID) +// @ newAbsPkt == absIO_val(respr.OutPkt, respr.EgressID) // @ ensures (respr.OutPkt == nil) == (newAbsPkt == io.IO_val_Unit{}) // @ ensures newAbsPkt.isIO_val_Pkt2 ==> // @ ElemWitness(ioSharedArg.OBufY, newAbsPkt.IO_val_Pkt2_1, newAbsPkt.IO_val_Pkt2_2) @@ -1569,7 +1565,7 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, // @ unfold acc(p.d.Mem(), _) // @ assert reveal p.scionLayer.EqPathType(p.rawPkt) // @ assert !(reveal slayers.IsSupportedPkt(p.rawPkt)) - v1, v2 /*@, aliasesPkt, newAbsPkt @*/ := p.processOHP( /* @ dp @ */ ) + v1, v2 /*@, aliasesPkt, newAbsPkt @*/ := p.processOHP() // @ ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, true, hasHbhLayer, hasE2eLayer) // @ fold p.sInit() return v1, v2 /*@, aliasesPkt, newAbsPkt @*/ @@ -1751,12 +1747,12 @@ func (p *scionPacketProcessor) processIntraBFD(data []byte) (res error) { // @ requires (typeOf(p.scionLayer.GetPath(ub)) == *scion.Raw) ==> p.scionLayer.EqAbsHeader(ub) // @ requires p.scionLayer.EqPathType(ub) // @ requires acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; -// @ requires let absPkt := absIO_val(dp, p.rawPkt, p.ingressID) in +// @ requires let absPkt := absIO_val(p.rawPkt, p.ingressID) in // @ absPkt.isIO_val_Pkt2 ==> ElemWitness(ioSharedArg.IBufY, path.ifsToIO_ifs(p.ingressID), absPkt.IO_val_Pkt2_2) // @ ensures reserr == nil && newAbsPkt.isIO_val_Pkt2 ==> // @ ElemWitness(ioSharedArg.OBufY, newAbsPkt.IO_val_Pkt2_1, newAbsPkt.IO_val_Pkt2_2) // @ ensures respr.OutPkt != nil ==> -// @ newAbsPkt == absIO_val(dp, respr.OutPkt, respr.EgressID) +// @ newAbsPkt == absIO_val(respr.OutPkt, respr.EgressID) // @ ensures reserr != nil && respr.OutPkt != nil ==> // @ newAbsPkt.isIO_val_Unsupported // @ ensures (respr.OutPkt == nil) == (newAbsPkt == io.IO_val_Unit{}) @@ -1881,15 +1877,13 @@ type macBuffersT struct { // @ trusted // @ requires false -// @ requires dp.Valid() // @ ensures reserr != nil && respr.OutPkt != nil ==> -// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported +// @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported func (p *scionPacketProcessor) packSCMP( typ slayers.SCMPType, code slayers.SCMPCode, scmpP gopacket.SerializableLayer, cause error, - /* @ ghost dp io.DataPlaneSpec, @*/ ) (respr processResult, reserr error) { // check invoking packet was an SCMP error: @@ -1933,17 +1927,15 @@ func (p *scionPacketProcessor) packSCMP( // @ p.path.GetCurrINF(ubPath) < p.path.GetNumINF(ubPath)) // @ ensures reserr != nil ==> reserr.ErrorMem() // contracts for IO-spec -// @ requires dp.Valid() // @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) -// @ requires len(absPkt(dp, ub).CurrSeg.Future) > 0 -// @ ensures dp.Valid() +// @ requires len(absPkt(ub).CurrSeg.Future) > 0 // @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) -// @ ensures reserr == nil ==> len(absPkt(dp, ub).CurrSeg.Future) > 0 -// @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(dp, ub)) -// @ ensures reserr == nil ==> p.EqAbsInfoField(absPkt(dp, ub)) +// @ ensures reserr == nil ==> len(absPkt(ub).CurrSeg.Future) > 0 +// @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(ub)) +// @ ensures reserr == nil ==> p.EqAbsInfoField(absPkt(ub)) // @ ensures respr.OutPkt == nil // @ decreases -func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { +func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte @*/ ) (respr processResult, reserr error) { var err error // @ unfold acc(p.scionLayer.Mem(ub), R6) // @ defer fold acc(p.scionLayer.Mem(ub), R6) @@ -1964,9 +1956,9 @@ func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte, ghost dp io.DataP return processResult{}, err } // @ TemporaryAssumeForIO(slayers.ValidPktMetaHdr(ub)) - // @ TemporaryAssumeForIO(len(absPkt(dp, ub).CurrSeg.Future) > 0) - // @ TemporaryAssumeForIO(p.EqAbsHopField(absPkt(dp, ub))) - // @ TemporaryAssumeForIO(p.EqAbsInfoField(absPkt(dp, ub))) + // @ TemporaryAssumeForIO(len(absPkt(ub).CurrSeg.Future) > 0) + // @ TemporaryAssumeForIO(p.EqAbsHopField(absPkt(ub))) + // @ TemporaryAssumeForIO(p.EqAbsInfoField(absPkt(ub))) return processResult{}, nil } @@ -1978,11 +1970,10 @@ func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte, ghost dp io.DataP // @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() // contracts for IO-spec -// @ requires dp.Valid() // @ ensures reserr != nil && respr.OutPkt != nil ==> -// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported +// @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases -func (p *scionPacketProcessor) validateHopExpiry( /*@ ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { +func (p *scionPacketProcessor) validateHopExpiry() (respr processResult, reserr error) { expiration := util.SecsToTime(p.infoField.Timestamp). Add(path.ExpTimeToDuration(p.hopField.ExpTime)) expired := expiration.Before(time.Now()) @@ -2007,7 +1998,6 @@ func (p *scionPacketProcessor) validateHopExpiry( /*@ ghost dp io.DataPlaneSpec &slayers.SCMPParameterProblem{Pointer: p.currentHopPointer( /*@ nil @*/ )}, serrors.New("expired hop", "cons_dir", p.infoField.ConsDir, "if_id", p.ingressID, "curr_inf", p.path.PathMeta.CurrINF, "curr_hf", p.path.PathMeta.CurrHF), - /*@ dp, @*/ ) } @@ -2026,15 +2016,14 @@ func (p *scionPacketProcessor) validateHopExpiry( /*@ ghost dp io.DataPlaneSpec // @ ensures reserr == nil && !p.infoField.ConsDir ==> ( // @ p.ingressID == 0 || p.hopField.ConsEgress == p.ingressID) // contracts for IO-spec -// @ requires dp.Valid() // @ requires len(oldPkt.CurrSeg.Future) > 0 // @ requires p.EqAbsHopField(oldPkt) // @ requires p.EqAbsInfoField(oldPkt) // @ ensures reserr == nil ==> AbsValidateIngressIDConstraint(oldPkt, path.ifsToIO_ifs(p.ingressID)) // @ ensures reserr != nil && respr.OutPkt != nil ==> -// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported +// @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases -func (p *scionPacketProcessor) validateIngressID( /*@ ghost oldPkt io.IO_pkt2, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { +func (p *scionPacketProcessor) validateIngressID( /*@ ghost oldPkt io.IO_pkt2 @*/ ) (respr processResult, reserr error) { pktIngressID := p.hopField.ConsIngress errCode := slayers.SCMPCodeUnknownHopFieldIngress if !p.infoField.ConsDir { @@ -2049,7 +2038,6 @@ func (p *scionPacketProcessor) validateIngressID( /*@ ghost oldPkt io.IO_pkt2, g &slayers.SCMPParameterProblem{Pointer: p.currentHopPointer( /*@ nil @*/ )}, serrors.New("ingress interface invalid", "pkt_ingress", pktIngressID, "router_ingress", p.ingressID), - /*@ dp, @*/ ) } // @ reveal p.EqAbsHopField(oldPkt) @@ -2073,22 +2061,22 @@ func (p *scionPacketProcessor) validateIngressID( /*@ ghost oldPkt io.IO_pkt2, g // @ ensures reserr != nil ==> reserr.ErrorMem() // contracts for IO-spec // @ requires acc(sl.AbsSlice_Bytes(ubScionL, 0, len(ubScionL)), R20) -// @ requires dp.Valid() && slayers.ValidPktMetaHdr(ubScionL) && p.scionLayer.EqAbsHeader(ubScionL) +// @ requires slayers.ValidPktMetaHdr(ubScionL) && p.scionLayer.EqAbsHeader(ubScionL) // @ ensures acc(sl.AbsSlice_Bytes(ubScionL, 0, len(ubScionL)), R20) -// @ ensures reserr == nil ==> dp.Valid() && slayers.ValidPktMetaHdr(ubScionL) +// @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ubScionL) // @ ensures reserr == nil ==> p.DstIsLocalIngressID(ubScionL) -// @ ensures reserr == nil ==> p.LastHopLen(ubScionL, dp) +// @ ensures reserr == nil ==> p.LastHopLen(ubScionL) // @ ensures reserr != nil && respr.OutPkt != nil ==> -// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported +// @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases -func (p *scionPacketProcessor) validateSrcDstIA( /*@ ghost ubScionL []byte, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { +func (p *scionPacketProcessor) validateSrcDstIA( /*@ ghost ubScionL []byte @*/ ) (respr processResult, reserr error) { // @ unfold acc(p.scionLayer.Mem(ubScionL), R20) // @ defer fold acc(p.scionLayer.Mem(ubScionL), R20) // @ ghost startP := p.scionLayer.PathStartIdx(ubScionL) // @ ghost endP := p.scionLayer.PathEndIdx(ubScionL) // @ ghost ubPath := ubScionL[startP:endP] // @ sl.SplitRange_Bytes(ubScionL, startP, endP, R50) - // @ p.AbsPktToSubSliceAbsPkt(ubScionL, startP, endP, dp) + // @ p.AbsPktToSubSliceAbsPkt(ubScionL, startP, endP) // @ p.scionLayer.ValidHeaderOffsetToSubSliceLemma(ubScionL, startP) // @ ghost defer sl.CombineRange_Bytes(ubScionL, startP, endP, R50) // @ unfold acc(p.scionLayer.HeaderMem(ubScionL[slayers.CmnHdrLen:]), R20) @@ -2103,26 +2091,26 @@ func (p *scionPacketProcessor) validateSrcDstIA( /*@ ghost ubScionL []byte, ghos // don't start with the first hop. if p.path.IsFirstHop( /*@ ubPath @*/ ) && !srcIsLocal { // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP - return p.invalidSrcIA( /*@ dp @*/ ) + return p.invalidSrcIA() } if dstIsLocal { // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP - return p.invalidDstIA( /*@ dp @*/ ) + return p.invalidDstIA() } } else { // Inbound if srcIsLocal { // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP - return p.invalidSrcIA( /*@ dp @*/ ) + return p.invalidSrcIA() } if p.path.IsLastHop( /*@ ubPath @*/ ) != dstIsLocal { // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP - return p.invalidDstIA( /*@ dp @*/ ) + return p.invalidDstIA() } // @ ghost if(p.path.IsLastHopSpec(ubPath)) { - // @ p.path.LastHopLemma(ubPath, dp) + // @ p.path.LastHopLemma(ubPath) // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ubScionL, startP) - // @ p.SubSliceAbsPktToAbsPkt(ubScionL, startP, endP, dp) + // @ p.SubSliceAbsPktToAbsPkt(ubScionL, startP, endP) // @ } } // @ fold p.d.validResult(processResult{}, false) @@ -2134,35 +2122,31 @@ func (p *scionPacketProcessor) validateSrcDstIA( /*@ ghost ubScionL []byte, ghos // @ (unfolding acc(p.scionLayer.HeaderMem(ubScionL[slayers.CmnHdrLen:]), R55) in // @ p.scionLayer.DstIA) == (unfolding acc(p.d.Mem(), _) in p.d.localIA)) ==> p.path.IsLastHopSpec(ubPath) // @ assert reveal p.DstIsLocalIngressID(ubScionL) - // @ assert reveal p.LastHopLen(ubScionL, dp) + // @ assert reveal p.LastHopLen(ubScionL) return processResult{}, nil } // invalidSrcIA is a helper to return an SCMP error for an invalid SrcIA. // @ trusted -// @ requires dp.Valid() // @ requires false -func (p *scionPacketProcessor) invalidSrcIA( /*@ ghost dp io.DataPlaneSpec @*/ ) (processResult, error) { +func (p *scionPacketProcessor) invalidSrcIA() (processResult, error) { return p.packSCMP( slayers.SCMPTypeParameterProblem, slayers.SCMPCodeInvalidSourceAddress, &slayers.SCMPParameterProblem{Pointer: uint16(slayers.CmnHdrLen + addr.IABytes)}, invalidSrcIA, - /*@ dp, @*/ ) } // invalidDstIA is a helper to return an SCMP error for an invalid DstIA. // @ trusted -// @ requires dp.Valid() // @ requires false -func (p *scionPacketProcessor) invalidDstIA( /*@ ghost dp io.DataPlaneSpec @*/ ) (processResult, error) { +func (p *scionPacketProcessor) invalidDstIA() (processResult, error) { return p.packSCMP( slayers.SCMPTypeParameterProblem, slayers.SCMPCodeInvalidDestinationAddress, &slayers.SCMPParameterProblem{Pointer: uint16(slayers.CmnHdrLen)}, invalidDstIA, - /*@ dp, @*/ ) } @@ -2259,7 +2243,7 @@ func (p *scionPacketProcessor) validateTransitUnderlaySrc( /*@ ghost ub []byte @ // @ ensures reserr == nil && p.segmentChange ==> oldPkt.RightSeg != none[io.IO_seg2] && len(get(oldPkt.RightSeg).Past) > 0 // @ ensures reserr == nil && p.segmentChange ==> p.ingressID != 0 && AbsValidateEgressIDConstraintXover(oldPkt, dp) // @ ensures reserr != nil && respr.OutPkt != nil ==> -// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported +// @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases func (p *scionPacketProcessor) validateEgressID( /*@ ghost oldPkt io.IO_pkt2, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { pktEgressID := p.egressInterface( /*@ oldPkt @*/ ) @@ -2284,7 +2268,6 @@ func (p *scionPacketProcessor) validateEgressID( /*@ ghost oldPkt io.IO_pkt2, gh errCode, &slayers.SCMPParameterProblem{Pointer: p.currentHopPointer( /*@ nil @*/ )}, cannotRoute, - /*@ dp, @*/ ) } // @ p.d.getDomExternalLemma() @@ -2320,7 +2303,7 @@ func (p *scionPacketProcessor) validateEgressID( /*@ ghost oldPkt io.IO_pkt2, gh slayers.SCMPCodeInvalidPath, // XXX(matzf) new code InvalidHop? &slayers.SCMPParameterProblem{Pointer: p.currentHopPointer( /*@ nil @*/ )}, serrors.WithCtx(cannotRoute, "ingress_id", p.ingressID, "ingress_type", ingress, - "egress_id", pktEgressID, "egress_type", egress) /*@, dp, @*/) + "egress_id", pktEgressID, "egress_type", egress)) } } // @ assert reveal AbsValidateIngressIDConstraintXover(oldPkt, path.ifsToIO_ifs(p.ingressID)) @@ -2346,7 +2329,7 @@ func (p *scionPacketProcessor) validateEgressID( /*@ ghost oldPkt io.IO_pkt2, gh slayers.SCMPCodeInvalidSegmentChange, &slayers.SCMPParameterProblem{Pointer: p.currentInfoPointer( /*@ nil @*/ )}, serrors.WithCtx(cannotRoute, "ingress_id", p.ingressID, "ingress_type", ingress, - "egress_id", pktEgressID, "egress_type", egress) /*@, dp, @*/) + "egress_id", pktEgressID, "egress_type", egress)) } } @@ -2364,21 +2347,21 @@ func (p *scionPacketProcessor) validateEgressID( /*@ ghost oldPkt io.IO_pkt2, gh // @ ensures acc(p.scionLayer.Mem(ub), R19) // @ ensures err != nil ==> err.ErrorMem() // contracts for IO-spec -// @ requires dp.Valid() && slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) -// @ requires len(absPkt(dp, ub).CurrSeg.Future) > 0 +// @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ requires len(absPkt(ub).CurrSeg.Future) > 0 // @ requires acc(&p.d, R55) && acc(p.d.Mem(), _) && acc(&p.ingressID, R55) -// @ requires p.LastHopLen(ub, dp) -// @ requires p.EqAbsHopField(absPkt(dp, ub)) -// @ requires p.EqAbsInfoField(absPkt(dp, ub)) +// @ requires p.LastHopLen(ub) +// @ requires p.EqAbsHopField(absPkt(ub)) +// @ requires p.EqAbsInfoField(absPkt(ub)) // @ ensures acc(&p.d, R55) && acc(p.d.Mem(), _) && acc(&p.ingressID, R55) -// @ ensures err == nil ==> dp.Valid() && slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) -// @ ensures err == nil ==> len(absPkt(dp, ub).CurrSeg.Future) > 0 -// @ ensures err == nil ==> absPkt(dp, ub) == AbsUpdateNonConsDirIngressSegID(old(absPkt(dp, ub)), path.ifsToIO_ifs(p.ingressID)) -// @ ensures err == nil ==> p.LastHopLen(ub, dp) -// @ ensures err == nil ==> p.EqAbsHopField(absPkt(dp, ub)) -// @ ensures err == nil ==> p.EqAbsInfoField(absPkt(dp, ub)) +// @ ensures err == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ ensures err == nil ==> len(absPkt(ub).CurrSeg.Future) > 0 +// @ ensures err == nil ==> absPkt(ub) == AbsUpdateNonConsDirIngressSegID(old(absPkt(ub)), path.ifsToIO_ifs(p.ingressID)) +// @ ensures err == nil ==> p.LastHopLen(ub) +// @ ensures err == nil ==> p.EqAbsHopField(absPkt(ub)) +// @ ensures err == nil ==> p.EqAbsInfoField(absPkt(ub)) // @ decreases -func (p *scionPacketProcessor) updateNonConsDirIngressSegID( /*@ ghost ub []byte, ghost dp io.DataPlaneSpec @*/ ) (err error) { +func (p *scionPacketProcessor) updateNonConsDirIngressSegID( /*@ ghost ub []byte @*/ ) (err error) { // @ ghost ubPath := p.scionLayer.UBPath(ub) // @ ghost start := p.scionLayer.PathStartIdx(ub) // @ ghost end := p.scionLayer.PathEndIdx(ub) @@ -2390,33 +2373,33 @@ func (p *scionPacketProcessor) updateNonConsDirIngressSegID( /*@ ghost ub []byte // means this comes from this AS itself, so nothing has to be done. // TODO(lukedirtwalker): For packets destined to peer links this shouldn't // be updated. - // @ reveal p.EqAbsInfoField(absPkt(dp, ub)) - // @ reveal p.EqAbsHopField(absPkt(dp, ub)) + // @ reveal p.EqAbsInfoField(absPkt(ub)) + // @ reveal p.EqAbsHopField(absPkt(ub)) if !p.infoField.ConsDir && p.ingressID != 0 { p.infoField.UpdateSegID(p.hopField.Mac /*@, p.hopField.ToIO_HF() @*/) - // @ reveal p.LastHopLen(ub, dp) + // @ reveal p.LastHopLen(ub) // @ assert path.AbsUInfoFromUint16(p.infoField.SegID) == old(io.upd_uinfo(path.AbsUInfoFromUint16(p.infoField.SegID), p.hopField.ToIO_HF())) // (VerifiedSCION) the following property is guaranteed by the type system, but Gobra cannot infer it yet // @ assume 0 <= p.path.GetCurrINF(ubPath) // @ sl.SplitRange_Bytes(ub, start, end, HalfPerm) - // @ p.AbsPktToSubSliceAbsPkt(ub, start, end, dp) + // @ p.AbsPktToSubSliceAbsPkt(ub, start, end) // @ p.scionLayer.ValidHeaderOffsetToSubSliceLemma(ub, start) // @ sl.SplitRange_Bytes(ub, start, end, HalfPerm) - if err := p.path.SetInfoField(p.infoField, int( /*@ unfolding acc(p.path.Mem(ubPath), R45) in (unfolding acc(p.path.Base.Mem(), R50) in @*/ p.path.PathMeta.CurrINF) /*@ ) , ubPath , dp@*/); err != nil { + if err := p.path.SetInfoField(p.infoField, int( /*@ unfolding acc(p.path.Mem(ubPath), R45) in (unfolding acc(p.path.Base.Mem(), R50) in @*/ p.path.PathMeta.CurrINF) /*@ ) , ubPath, @*/); err != nil { // @ ghost sl.CombineRange_Bytes(ub, start, end, writePerm) return serrors.WrapStr("update info field", err) } // @ ghost sl.CombineRange_Bytes(ub, start, end, HalfPerm) // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, start) - // @ p.SubSliceAbsPktToAbsPkt(ub, start, end, dp) + // @ p.SubSliceAbsPktToAbsPkt(ub, start, end) // @ ghost sl.CombineRange_Bytes(ub, start, end, HalfPerm) - // @ absPktFutureLemma(dp, ub) - // @ assert absPkt(dp, ub).CurrSeg.UInfo == old(io.upd_uinfo(path.AbsUInfoFromUint16(p.infoField.SegID), p.hopField.ToIO_HF())) - // @ assert reveal p.EqAbsInfoField(absPkt(dp, ub)) - // @ assert reveal p.EqAbsHopField(absPkt(dp, ub)) - // @ assert reveal p.LastHopLen(ub, dp) + // @ absPktFutureLemma(ub) + // @ assert absPkt(ub).CurrSeg.UInfo == old(io.upd_uinfo(path.AbsUInfoFromUint16(p.infoField.SegID), p.hopField.ToIO_HF())) + // @ assert reveal p.EqAbsInfoField(absPkt(ub)) + // @ assert reveal p.EqAbsHopField(absPkt(ub)) + // @ assert reveal p.LastHopLen(ub) } - // @ assert absPkt(dp, ub) == reveal AbsUpdateNonConsDirIngressSegID(old(absPkt(dp, ub)), path.ifsToIO_ifs(p.ingressID)) + // @ assert absPkt(ub) == reveal AbsUpdateNonConsDirIngressSegID(old(absPkt(ub)), path.ifsToIO_ifs(p.ingressID)) return nil } @@ -2474,13 +2457,12 @@ func (p *scionPacketProcessor) currentHopPointer( /*@ ghost ubScionL []byte @*/ // @ ensures sl.AbsSlice_Bytes(p.cachedMac, 0, len(p.cachedMac)) // @ ensures reserr != nil ==> reserr.ErrorMem() // contracts for IO-spec -// @ requires dp.Valid() // @ requires len(oldPkt.CurrSeg.Future) > 0 // @ requires p.EqAbsHopField(oldPkt) // @ requires p.EqAbsInfoField(oldPkt) // @ ensures reserr == nil ==> AbsVerifyCurrentMACConstraint(oldPkt, dp) // @ ensures reserr != nil && respr.OutPkt != nil ==> -// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported +// @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases func (p *scionPacketProcessor) verifyCurrentMAC( /*@ ghost oldPkt io.IO_pkt2, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { fullMac := path.FullMAC(p.mac, p.infoField, p.hopField, p.macBuffers.scionInput) @@ -2500,7 +2482,6 @@ func (p *scionPacketProcessor) verifyCurrentMAC( /*@ ghost oldPkt io.IO_pkt2, gh "cons_dir", p.infoField.ConsDir, "if_id", p.ingressID, "curr_inf", p.path.PathMeta.CurrINF, "curr_hf", p.path.PathMeta.CurrHF, "seg_id", p.infoField.SegID), - /*@ dp, @*/ ) } // Add the full MAC to the SCION packet processor, @@ -2534,11 +2515,10 @@ func (p *scionPacketProcessor) verifyCurrentMAC( /*@ ghost oldPkt io.IO_pkt2, gh // @ ensures reserr != nil ==> !addrAliasesUb // @ ensures reserr != nil ==> reserr.ErrorMem() // contracts for IO-spec -// @ requires dp.Valid() // @ ensures reserr != nil && respr.OutPkt != nil ==> -// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported +// @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases 0 if sync.IgnoreBlockingForTermination() -func (p *scionPacketProcessor) resolveInbound( /*@ ghost ubScionL []byte, ghost dp io.DataPlaneSpec @*/ ) (resaddr *net.UDPAddr, respr processResult, reserr error /*@ , addrAliasesUb bool @*/) { +func (p *scionPacketProcessor) resolveInbound( /*@ ghost ubScionL []byte @*/ ) (resaddr *net.UDPAddr, respr processResult, reserr error /*@ , addrAliasesUb bool @*/) { // (VerifiedSCION) the parameter used to be p.scionLayer, // instead of &p.scionLayer. a, err /*@ , addrAliases @*/ := p.d.resolveLocalDst(&p.scionLayer /*@, ubScionL @*/) @@ -2552,7 +2532,7 @@ func (p *scionPacketProcessor) resolveInbound( /*@ ghost ubScionL []byte, ghost r, err := p.packSCMP( slayers.SCMPTypeDestinationUnreachable, slayers.SCMPCodeNoRoute, - &slayers.SCMPDestinationUnreachable{}, err /*@, dp, @*/) + &slayers.SCMPDestinationUnreachable{}, err) return nil, r, err /*@ , false @*/ default: // @ fold p.d.validResult(respr, addrAliases) @@ -2574,15 +2554,15 @@ func (p *scionPacketProcessor) resolveInbound( /*@ ghost ubScionL []byte, ghost // @ ensures reserr != nil ==> p.scionLayer.NonInitMem() // @ ensures reserr != nil ==> reserr.ErrorMem() // contracts for IO-spec -// @ requires dp.Valid() && slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) -// @ requires len(absPkt(dp, ub).CurrSeg.Future) > 0 -// @ requires p.EqAbsHopField(absPkt(dp, ub)) -// @ requires p.EqAbsInfoField(absPkt(dp, ub)) -// @ ensures reserr == nil ==> dp.Valid() && slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) -// @ ensures reserr == nil ==> len(absPkt(dp, ub).CurrSeg.Future) >= 0 -// @ ensures reserr == nil ==> absPkt(dp, ub) == AbsProcessEgress(old(absPkt(dp, ub))) +// @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ requires len(absPkt(ub).CurrSeg.Future) > 0 +// @ requires p.EqAbsHopField(absPkt(ub)) +// @ requires p.EqAbsInfoField(absPkt(ub)) +// @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ ensures reserr == nil ==> len(absPkt(ub).CurrSeg.Future) >= 0 +// @ ensures reserr == nil ==> absPkt(ub) == AbsProcessEgress(old(absPkt(ub))) // @ decreases -func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte, ghost dp io.DataPlaneSpec @*/ ) (reserr error) { +func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr error) { // @ ghost ubPath := p.scionLayer.UBPath(ub) // @ ghost startP := p.scionLayer.PathStartIdx(ub) // @ ghost endP := p.scionLayer.PathEndIdx(ub) @@ -2590,10 +2570,10 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte, ghost dp io.D // @ unfold acc(p.scionLayer.Mem(ub), 1-R55) // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) - // @ p.AbsPktToSubSliceAbsPkt(ub, startP, endP, dp) + // @ p.AbsPktToSubSliceAbsPkt(ub, startP, endP) // @ p.scionLayer.ValidHeaderOffsetToSubSliceLemma(ub, startP) - // @ reveal p.EqAbsInfoField(absPkt(dp, ub)) - // @ reveal p.EqAbsHopField(absPkt(dp, ub)) + // @ reveal p.EqAbsInfoField(absPkt(ub)) + // @ reveal p.EqAbsHopField(absPkt(ub)) // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) // @ reveal p.scionLayer.ValidHeaderOffset(ub, startP) // @ unfold acc(p.scionLayer.Mem(ub), R55) @@ -2603,7 +2583,7 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte, ghost dp io.D p.infoField.UpdateSegID(p.hopField.Mac /*@, p.hopField.ToIO_HF() @*/) // @ assert path.AbsUInfoFromUint16(p.infoField.SegID) == old(io.upd_uinfo(path.AbsUInfoFromUint16(p.infoField.SegID), p.hopField.ToIO_HF())) // @ assume 0 <= p.path.GetCurrINF(ubPath) - if err := p.path.SetInfoField(p.infoField, int( /*@ unfolding acc(p.path.Mem(ubPath), R45) in (unfolding acc(p.path.Base.Mem(), R50) in @*/ p.path.PathMeta.CurrINF /*@ ) @*/) /*@ , ubPath, dp @*/); err != nil { + if err := p.path.SetInfoField(p.infoField, int( /*@ unfolding acc(p.path.Mem(ubPath), R45) in (unfolding acc(p.path.Base.Mem(), R50) in @*/ p.path.PathMeta.CurrINF /*@ ) @*/) /*@ , ubPath @*/); err != nil { // TODO parameter problem invalid path // @ ghost sl.CombineRange_Bytes(ub, startP, endP, writePerm) // @ p.path.DowngradePerm(ubPath) @@ -2624,12 +2604,12 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte, ghost dp io.D // @ fold acc(p.scionLayer.Mem(ub), R55) // @ assert reveal p.scionLayer.ValidHeaderOffset(ub, startP) // @ ghost sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) - // @ TemporaryAssumeForIO(dp.Valid() && scion.validPktMetaHdr(ubPath) && p.path.EqAbsHeader(ubPath)) + // @ TemporaryAssumeForIO(scion.validPktMetaHdr(ubPath) && p.path.EqAbsHeader(ubPath)) // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, startP) - // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP, dp) + // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP) // @ ghost sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) - // @ absPktFutureLemma(dp, ub) - // @ TemporaryAssumeForIO(absPkt(dp, ub) == AbsProcessEgress(old(absPkt(dp, ub)))) + // @ absPktFutureLemma(ub) + // @ TemporaryAssumeForIO(absPkt(ub) == AbsProcessEgress(old(absPkt(ub)))) // @ fold acc(p.scionLayer.Mem(ub), 1-R55) return nil } @@ -2651,19 +2631,19 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte, ghost dp io.D // @ ensures respr === processResult{} // @ ensures reserr != nil ==> reserr.ErrorMem() // contract for IO-spec -// @ requires dp.Valid() && slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) // @ requires p.GetIsXoverSpec(ub) -// @ ensures reserr == nil ==> len(old(absPkt(dp, ub)).CurrSeg.Future) == 1 -// @ ensures reserr == nil ==> old(absPkt(dp, ub)).LeftSeg != none[io.IO_seg2] -// @ ensures reserr == nil ==> len(get(old(absPkt(dp, ub)).LeftSeg).Future) > 0 -// @ ensures reserr == nil ==> len(get(old(absPkt(dp, ub)).LeftSeg).History) == 0 +// @ ensures reserr == nil ==> len(old(absPkt(ub)).CurrSeg.Future) == 1 +// @ ensures reserr == nil ==> old(absPkt(ub)).LeftSeg != none[io.IO_seg2] +// @ ensures reserr == nil ==> len(get(old(absPkt(ub)).LeftSeg).Future) > 0 +// @ ensures reserr == nil ==> len(get(old(absPkt(ub)).LeftSeg).History) == 0 // @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) -// @ ensures reserr == nil ==> len(absPkt(dp, ub).CurrSeg.Future) > 0 -// @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(dp, ub)) -// @ ensures reserr == nil ==> p.EqAbsInfoField(absPkt(dp, ub)) -// @ ensures reserr == nil ==> absPkt(dp, ub) == AbsDoXover(old(absPkt(dp, ub))) +// @ ensures reserr == nil ==> len(absPkt(ub).CurrSeg.Future) > 0 +// @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(ub)) +// @ ensures reserr == nil ==> p.EqAbsInfoField(absPkt(ub)) +// @ ensures reserr == nil ==> absPkt(ub) == AbsDoXover(old(absPkt(ub))) // @ decreases -func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { +func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte @*/ ) (respr processResult, reserr error) { p.segmentChange = true // @ ghost startP := p.scionLayer.PathStartIdx(ub) // @ ghost endP := p.scionLayer.PathEndIdx(ub) @@ -2671,11 +2651,11 @@ func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte, ghost dp io.DataPla // @ unfold acc(p.scionLayer.Mem(ub), 1-R55) // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) - // @ p.AbsPktToSubSliceAbsPkt(ub, startP, endP, dp) + // @ p.AbsPktToSubSliceAbsPkt(ub, startP, endP) // @ p.scionLayer.ValidHeaderOffsetToSubSliceLemma(ub, startP) - // @ TemporaryAssumeForIO(len(old(absPkt(dp, ub)).CurrSeg.Future) == 1) - // @ reveal p.EqAbsInfoField(absPkt(dp, ub)) - // @ reveal p.EqAbsHopField(absPkt(dp, ub)) + // @ TemporaryAssumeForIO(len(old(absPkt(ub)).CurrSeg.Future) == 1) + // @ reveal p.EqAbsInfoField(absPkt(ub)) + // @ reveal p.EqAbsHopField(absPkt(ub)) // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) // @ unfold acc(p.scionLayer.Mem(ub), R55) if err := p.path.IncPath( /*@ ubPath @*/ ); err != nil { @@ -2704,13 +2684,13 @@ func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte, ghost dp io.DataPla return processResult{}, err } // @ ghost sl.CombineRange_Bytes(ub, startP, endP, writePerm) - // @ TemporaryAssumeForIO(old(absPkt(dp, ub)).LeftSeg != none[io.IO_seg2]) - // @ TemporaryAssumeForIO(len(get(old(absPkt(dp, ub)).LeftSeg).Future) > 0) - // @ TemporaryAssumeForIO(len(get(old(absPkt(dp, ub)).LeftSeg).History) == 0) + // @ TemporaryAssumeForIO(old(absPkt(ub)).LeftSeg != none[io.IO_seg2]) + // @ TemporaryAssumeForIO(len(get(old(absPkt(ub)).LeftSeg).Future) > 0) + // @ TemporaryAssumeForIO(len(get(old(absPkt(ub)).LeftSeg).History) == 0) // @ TemporaryAssumeForIO(slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub)) - // @ TemporaryAssumeForIO(absPkt(dp, ub) == AbsDoXover(old(absPkt(dp, ub)))) - // @ TemporaryAssumeForIO(p.EqAbsHopField(absPkt(dp, ub))) - // @ TemporaryAssumeForIO(p.EqAbsInfoField(absPkt(dp, ub))) + // @ TemporaryAssumeForIO(absPkt(ub) == AbsDoXover(old(absPkt(ub)))) + // @ TemporaryAssumeForIO(p.EqAbsHopField(absPkt(ub))) + // @ TemporaryAssumeForIO(p.EqAbsInfoField(absPkt(ub))) // @ fold acc(p.scionLayer.Mem(ub), 1-R55) return processResult{}, nil } @@ -2780,14 +2760,13 @@ func (p *scionPacketProcessor) egressInterface( /*@ ghost oldPkt io.IO_pkt2 @*/ // @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() // contracts for IO-spec -// @ requires dp.Valid() // @ requires len(oldPkt.CurrSeg.Future) > 0 // @ requires p.EqAbsInfoField(oldPkt) // @ requires p.EqAbsHopField(oldPkt) // @ ensures reserr != nil && respr.OutPkt != nil ==> -// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported +// @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases 0 if sync.IgnoreBlockingForTermination() -func (p *scionPacketProcessor) validateEgressUp( /*@ ghost oldPkt io.IO_pkt2, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { +func (p *scionPacketProcessor) validateEgressUp( /*@ ghost oldPkt io.IO_pkt2 @*/ ) (respr processResult, reserr error) { egressID := p.egressInterface( /*@ oldPkt @ */ ) // @ p.d.getBfdSessionsMem() // @ ghost if p.d.bfdSessions != nil { unfold acc(accBfdSession(p.d.bfdSessions), _) } @@ -2810,7 +2789,7 @@ func (p *scionPacketProcessor) validateEgressUp( /*@ ghost oldPkt io.IO_pkt2, gh } } // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP - return p.packSCMP(typ, 0, scmpP, serrors.New("bfd session down") /*@, dp @*/) + return p.packSCMP(typ, 0, scmpP, serrors.New("bfd session down")) } } // @ fold p.d.validResult(processResult{}, false) @@ -2843,21 +2822,21 @@ func (p *scionPacketProcessor) validateEgressUp( /*@ ghost oldPkt io.IO_pkt2, gh // @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() // constracts for IO-spec -// @ requires dp.Valid() && slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) // @ requires p.DstIsLocalIngressID(ub) -// @ requires p.LastHopLen(ub, dp) -// @ requires len(absPkt(dp, ub).CurrSeg.Future) > 0 -// @ requires p.EqAbsHopField(absPkt(dp, ub)) +// @ requires p.LastHopLen(ub) +// @ requires len(absPkt(ub).CurrSeg.Future) > 0 +// @ requires p.EqAbsHopField(absPkt(ub)) // @ ensures reserr == nil ==> p.DstIsLocalIngressID(ub) // @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) -// @ ensures reserr == nil ==> p.LastHopLen(ub, dp) -// @ ensures reserr == nil ==> len(absPkt(dp, ub).CurrSeg.Future) > 0 -// @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(dp, ub)) -// @ ensures reserr == nil ==> absPkt(dp, ub) == old(absPkt(dp, ub)) +// @ ensures reserr == nil ==> p.LastHopLen(ub) +// @ ensures reserr == nil ==> len(absPkt(ub).CurrSeg.Future) > 0 +// @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(ub)) +// @ ensures reserr == nil ==> absPkt(ub) == old(absPkt(ub)) // @ ensures reserr != nil && respr.OutPkt != nil ==> -// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported +// @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases -func (p *scionPacketProcessor) handleIngressRouterAlert( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { +func (p *scionPacketProcessor) handleIngressRouterAlert( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int @*/ ) (respr processResult, reserr error) { // @ ghost ubPath := p.scionLayer.UBPath(ub) // @ ghost startP := p.scionLayer.PathStartIdx(ub) // @ ghost endP := p.scionLayer.PathEndIdx(ub) @@ -2876,9 +2855,9 @@ func (p *scionPacketProcessor) handleIngressRouterAlert( /*@ ghost ub []byte, gh // @ defer fold acc(p.scionLayer.Mem(ub), R20) // (VerifiedSCION) the following is guaranteed by the type system, but Gobra cannot prove it yet // @ assume 0 <= p.path.GetCurrHF(ubPath) - // @ reveal p.LastHopLen(ub, dp) + // @ reveal p.LastHopLen(ub) // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) - // @ p.AbsPktToSubSliceAbsPkt(ub, startP, endP, dp) + // @ p.AbsPktToSubSliceAbsPkt(ub, startP, endP) // @ p.scionLayer.ValidHeaderOffsetToSubSliceLemma(ub, startP) // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) if err := p.path.SetHopField(p.hopField, int( /*@ unfolding acc(p.path.Mem(ubPath), R50) in (unfolding acc(p.path.Base.Mem(), R55) in @*/ p.path.PathMeta.CurrHF /*@ ) @*/) /*@ , ubPath @*/); err != nil { @@ -2888,16 +2867,15 @@ func (p *scionPacketProcessor) handleIngressRouterAlert( /*@ ghost ub []byte, gh } // @ sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) // @ assert p.DstIsLocalIngressID(ub) - // @ TemporaryAssumeForIO(dp.Valid() && scion.validPktMetaHdr(ubPath) && p.path.EqAbsHeader(ubPath)) // postcondition of SetHopfield + // @ TemporaryAssumeForIO(scion.validPktMetaHdr(ubPath) && p.path.EqAbsHeader(ubPath)) // postcondition of SetHopfield // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, startP) - // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP, dp) - // @ absPktFutureLemma(dp, ub) - // @ TemporaryAssumeForIO(p.EqAbsHopField(absPkt(dp, ub))) // postcondition of SetHopfield - // @ TemporaryAssumeForIO(absPkt(dp, ub) == old(absPkt(dp, ub))) + // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP) + // @ absPktFutureLemma(ub) + // @ TemporaryAssumeForIO(p.EqAbsHopField(absPkt(ub))) // postcondition of SetHopfield + // @ TemporaryAssumeForIO(absPkt(ub) == old(absPkt(ub))) // @ sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) - // @ assert dp.Valid() // @ assert slayers.ValidPktMetaHdr(ub) - // @ assert reveal p.LastHopLen(ub, dp) + // @ assert reveal p.LastHopLen(ub) // @ assert p.scionLayer.EqAbsHeader(ub) /*@ ghost var ubLL []byte @@ -2912,7 +2890,7 @@ func (p *scionPacketProcessor) handleIngressRouterAlert( /*@ ghost ub []byte, gh ghost defer sl.CombineRange_Bytes(ub, startLL, endLL, R1) } @*/ - return p.handleSCMPTraceRouteRequest(p.ingressID /*@ , ubLL, dp @*/) + return p.handleSCMPTraceRouteRequest(p.ingressID /*@, ubLL @*/) } // @ preserves acc(&p.infoField, R20) @@ -2951,19 +2929,19 @@ func (p *scionPacketProcessor) ingressRouterAlertFlag() (res *bool) { // @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() // constracts for IO-spec -// @ requires dp.Valid() && slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) -// @ requires len(absPkt(dp, ub).CurrSeg.Future) > 0 -// @ requires p.EqAbsHopField(absPkt(dp, ub)) -// @ requires p.EqAbsInfoField(absPkt(dp, ub)) +// @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ requires len(absPkt(ub).CurrSeg.Future) > 0 +// @ requires p.EqAbsHopField(absPkt(ub)) +// @ requires p.EqAbsInfoField(absPkt(ub)) // @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) -// @ ensures reserr == nil ==> len(absPkt(dp, ub).CurrSeg.Future) > 0 -// @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(dp, ub)) -// @ ensures reserr == nil ==> p.EqAbsInfoField(absPkt(dp, ub)) -// @ ensures reserr == nil ==> absPkt(dp, ub) == old(absPkt(dp, ub)) +// @ ensures reserr == nil ==> len(absPkt(ub).CurrSeg.Future) > 0 +// @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(ub)) +// @ ensures reserr == nil ==> p.EqAbsInfoField(absPkt(ub)) +// @ ensures reserr == nil ==> absPkt(ub) == old(absPkt(ub)) // @ ensures reserr != nil && respr.OutPkt != nil ==> -// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported +// @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases -func (p *scionPacketProcessor) handleEgressRouterAlert( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int , ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { +func (p *scionPacketProcessor) handleEgressRouterAlert( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int @*/ ) (respr processResult, reserr error) { // @ ghost ubPath := p.scionLayer.UBPath(ub) // @ ghost startP := p.scionLayer.PathStartIdx(ub) // @ ghost endP := p.scionLayer.PathEndIdx(ub) @@ -2974,7 +2952,7 @@ func (p *scionPacketProcessor) handleEgressRouterAlert( /*@ ghost ub []byte, gho // @ fold p.d.validResult(processResult{}, false) return processResult{}, nil } - egressID := p.egressInterface( /*@ absPkt(dp, ub) @*/ ) + egressID := p.egressInterface( /*@ absPkt(ub) @*/ ) // @ p.d.getExternalMem() // @ if p.d.external != nil { unfold acc(accBatchConn(p.d.external), _) } if _, ok := p.d.external[egressID]; !ok { @@ -2988,7 +2966,7 @@ func (p *scionPacketProcessor) handleEgressRouterAlert( /*@ ghost ub []byte, gho // but Gobra cannot prove it yet // @ assume 0 <= p.path.GetCurrHF(ubPath) // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) - // @ p.AbsPktToSubSliceAbsPkt(ub, startP, endP, dp) + // @ p.AbsPktToSubSliceAbsPkt(ub, startP, endP) // @ p.scionLayer.ValidHeaderOffsetToSubSliceLemma(ub, startP) // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) if err := p.path.SetHopField(p.hopField, int( /*@ unfolding acc(p.path.Mem(ubPath), R50) in (unfolding acc(p.path.Base.Mem(), R55) in @*/ p.path.PathMeta.CurrHF /*@ ) @*/) /*@ , ubPath @*/); err != nil { @@ -2997,14 +2975,14 @@ func (p *scionPacketProcessor) handleEgressRouterAlert( /*@ ghost ub []byte, gho return processResult{}, serrors.WrapStr("update hop field", err) } // @ sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) - // @ TemporaryAssumeForIO(dp.Valid() && scion.validPktMetaHdr(ubPath) && p.path.EqAbsHeader(ubPath)) // postcondition of SetHopfield + // @ TemporaryAssumeForIO(scion.validPktMetaHdr(ubPath) && p.path.EqAbsHeader(ubPath)) // postcondition of SetHopfield // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, startP) - // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP, dp) - // @ absPktFutureLemma(dp, ub) - // @ TemporaryAssumeForIO(p.EqAbsHopField(absPkt(dp, ub))) // postcondition of SetHopfield - // @ TemporaryAssumeForIO(p.EqAbsInfoField(absPkt(dp, ub))) + // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP) + // @ absPktFutureLemma(ub) + // @ TemporaryAssumeForIO(p.EqAbsHopField(absPkt(ub))) // postcondition of SetHopfield + // @ TemporaryAssumeForIO(p.EqAbsInfoField(absPkt(ub))) // @ sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) - // @ TemporaryAssumeForIO(absPkt(dp, ub) == old(absPkt(dp, ub))) + // @ TemporaryAssumeForIO(absPkt(ub) == old(absPkt(ub))) /*@ ghost var ubLL []byte ghost if &p.scionLayer === p.lastLayer { @@ -3018,7 +2996,7 @@ func (p *scionPacketProcessor) handleEgressRouterAlert( /*@ ghost ub []byte, gho ghost defer sl.CombineRange_Bytes(ub, startLL, endLL, R1) } @*/ - return p.handleSCMPTraceRouteRequest(egressID /*@ , ubLL, dp @*/) + return p.handleSCMPTraceRouteRequest(egressID /*@, ubLL@*/) } // @ preserves acc(&p.infoField, R21) @@ -3043,12 +3021,11 @@ func (p *scionPacketProcessor) egressRouterAlertFlag() (res *bool) { // @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() // contracts for IO-spec -// @ requires dp.Valid() // @ ensures reserr != nil && respr.OutPkt != nil ==> -// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported +// @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases func (p *scionPacketProcessor) handleSCMPTraceRouteRequest( - interfaceID uint16 /*@ , ghost ubLastLayer []byte, ghost dp io.DataPlaneSpec @*/) (respr processResult, reserr error) { + interfaceID uint16 /*@ , ghost ubLastLayer []byte @*/) (respr processResult, reserr error) { if p.lastLayer.NextLayerType( /*@ ubLastLayer @*/ ) != slayers.LayerTypeSCMP { log.Debug("Packet with router alert, but not SCMP") @@ -3096,7 +3073,7 @@ func (p *scionPacketProcessor) handleSCMPTraceRouteRequest( Interface: uint64(interfaceID), } // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP - return p.packSCMP(slayers.SCMPTypeTracerouteReply, 0, &scmpP, nil /*@, dp @*/) + return p.packSCMP(slayers.SCMPTypeTracerouteReply, 0, &scmpP, nil) } // @ preserves acc(p.scionLayer.Mem(ubScionL), R20) @@ -3106,11 +3083,10 @@ func (p *scionPacketProcessor) handleSCMPTraceRouteRequest( // @ ensures reserr == nil ==> int(p.scionLayer.GetPayloadLen(ubScionL)) == len(p.scionLayer.GetPayload(ubScionL)) // @ ensures reserr != nil ==> reserr.ErrorMem() // contracts for IO-spec -// @ requires dp.Valid() // @ ensures reserr != nil && respr.OutPkt != nil ==> -// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported +// @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases -func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { +func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte @*/ ) (respr processResult, reserr error) { // @ unfold acc(p.scionLayer.Mem(ubScionL), R20) // @ defer fold acc(p.scionLayer.Mem(ubScionL), R20) if int(p.scionLayer.PayloadLen) == len(p.scionLayer.Payload) { @@ -3124,7 +3100,6 @@ func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte, ghost &slayers.SCMPParameterProblem{Pointer: 0}, serrors.New("bad packet size", "header", p.scionLayer.PayloadLen, "actual", len(p.scionLayer.Payload)), - /*@ dp, @*/ ) } @@ -3174,12 +3149,12 @@ func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte, ghost // @ requires dp.Valid() // @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) && p.scionLayer.EqPathType(ub) // @ requires acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; -// @ requires let absPkt := absIO_val(dp, ub, p.ingressID) in +// @ requires let absPkt := absIO_val(ub, p.ingressID) in // @ absPkt.isIO_val_Pkt2 ==> ElemWitness(ioSharedArg.IBufY, path.ifsToIO_ifs(p.ingressID), absPkt.IO_val_Pkt2_2) // @ ensures reserr == nil && newAbsPkt.isIO_val_Pkt2 ==> // @ ElemWitness(ioSharedArg.OBufY, newAbsPkt.IO_val_Pkt2_1, newAbsPkt.IO_val_Pkt2_2) // @ ensures respr.OutPkt != nil ==> -// @ newAbsPkt == absIO_val(dp, respr.OutPkt, respr.EgressID) +// @ newAbsPkt == absIO_val(respr.OutPkt, respr.EgressID) // @ ensures reserr != nil && respr.OutPkt != nil ==> // @ newAbsPkt.isIO_val_Unsupported // @ ensures (respr.OutPkt == nil) == (newAbsPkt == io.IO_val_Unit{}) @@ -3188,67 +3163,67 @@ func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte, ghost func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int, ghost ioLock *sync.Mutex, ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error /*@, addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { // @ ghost var oldPkt io.IO_pkt2 // @ ghost if(slayers.IsSupportedPkt(ub)) { - // @ absIO_valLemma(dp, ub, p.ingressID) - // @ oldPkt = absIO_val(dp, ub, p.ingressID).IO_val_Pkt2_2 + // @ absIO_valLemma(ub, p.ingressID) + // @ oldPkt = absIO_val(ub, p.ingressID).IO_val_Pkt2_2 // @ } else { - // @ absPktFutureLemma(dp, ub) - // @ oldPkt = absPkt(dp, ub) + // @ absPktFutureLemma(ub) + // @ oldPkt = absPkt(ub) // @ } // @ nextPkt := oldPkt - if r, err := p.parsePath( /*@ ub , dp @*/ ); err != nil { + if r, err := p.parsePath( /*@ ub @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false, absReturnErr(dp, r) @*/ + return r, err /*@, false, absReturnErr(r) @*/ } - if r, err := p.validateHopExpiry( /*@ dp @*/ ); err != nil { + if r, err := p.validateHopExpiry(); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false, absReturnErr(dp, r) @*/ + return r, err /*@, false, absReturnErr(r) @*/ } - if r, err := p.validateIngressID( /*@ nextPkt, dp @*/ ); err != nil { + if r, err := p.validateIngressID( /*@ nextPkt @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false, absReturnErr(dp, r) @*/ + return r, err /*@, false, absReturnErr(r) @*/ } // @ assert AbsValidateIngressIDConstraint(nextPkt, path.ifsToIO_ifs(p.ingressID)) - if r, err := p.validatePktLen( /*@ ub, dp @*/ ); err != nil { + if r, err := p.validatePktLen( /*@ ub @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false, absReturnErr(dp, r) @*/ + return r, err /*@, false, absReturnErr(r) @*/ } if r, err := p.validateTransitUnderlaySrc( /*@ ub @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false, absReturnErr(dp, r) @*/ + return r, err /*@, false, absReturnErr(r) @*/ } - if r, err := p.validateSrcDstIA( /*@ ub, dp @*/ ); err != nil { + if r, err := p.validateSrcDstIA( /*@ ub @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false, absReturnErr(dp, r) @*/ + return r, err /*@, false, absReturnErr(r) @*/ } - if err := p.updateNonConsDirIngressSegID( /*@ ub, dp @*/ ); err != nil { + if err := p.updateNonConsDirIngressSegID( /*@ ub @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return processResult{}, err /*@, false, absReturnErr(dp, processResult{}) @*/ + return processResult{}, err /*@, false, absReturnErr(processResult{}) @*/ } - // @ assert absPkt(dp, ub) == AbsUpdateNonConsDirIngressSegID(oldPkt, path.ifsToIO_ifs(p.ingressID)) - // @ nextPkt = absPkt(dp, ub) + // @ assert absPkt(ub) == AbsUpdateNonConsDirIngressSegID(oldPkt, path.ifsToIO_ifs(p.ingressID)) + // @ nextPkt = absPkt(ub) // @ AbsValidateIngressIDLemma(oldPkt, nextPkt, path.ifsToIO_ifs(p.ingressID)) if r, err := p.verifyCurrentMAC( /*@ nextPkt, dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false, absReturnErr(dp, r) @*/ + return r, err /*@, false, absReturnErr(r) @*/ } // @ assert AbsVerifyCurrentMACConstraint(nextPkt, dp) - if r, err := p.handleIngressRouterAlert( /*@ ub, llIsNil, startLL, endLL, dp @*/ ); err != nil { + if r, err := p.handleIngressRouterAlert( /*@ ub, llIsNil, startLL, endLL @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false, absReturnErr(dp, r) @*/ + return r, err /*@, false, absReturnErr(r) @*/ } - // @ assert nextPkt == absPkt(dp, ub) + // @ assert nextPkt == absPkt(ub) // Inbound: pkts destined to the local IA. // @ p.d.getLocalIA() if /*@ unfolding acc(p.scionLayer.Mem(ub), R50) in (unfolding acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R55) in @*/ p.scionLayer.DstIA /*@ ) @*/ == p.d.localIA { // @ assert p.DstIsLocalIngressID(ub) // @ assert unfolding acc(p.scionLayer.Mem(ub), R50) in (unfolding acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R55) in p.scionLayer.DstIA) == p.d.localIA - // @ p.LocalDstLemma(ub, dp) + // @ p.LocalDstLemma(ub) // @ assert p.ingressID != 0 // @ assert len(nextPkt.CurrSeg.Future) == 1 - a, r, err /*@, aliasesUb @*/ := p.resolveInbound( /*@ ub, dp @*/ ) + a, r, err /*@, aliasesUb @*/ := p.resolveInbound( /*@ ub @*/ ) if err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, aliasesUb, absReturnErr(dp, r) @*/ + return r, err /*@, aliasesUb, absReturnErr(r) @*/ } // @ p.d.getInternal() // @ unfold p.d.validResult(r, aliasesUb) @@ -3258,7 +3233,7 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, // @ ghost if(slayers.IsSupportedPkt(ub)) { // @ InternalEnterEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, none[io.IO_ifs], ioLock, ioSharedArg, dp) // @ } - // @ newAbsPkt = reveal absIO_val(dp, p.rawPkt, 0) + // @ newAbsPkt = reveal absIO_val(p.rawPkt, 0) return processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, nil /*@, aliasesUb, newAbsPkt @*/ } // Outbound: pkts leaving the local IA. @@ -3268,21 +3243,21 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, if p.path.IsXover( /*@ ubPath @*/ ) { // @ assert p.GetIsXoverSpec(ub) // @ fold acc(p.scionLayer.Mem(ub), R3) - if r, err := p.doXover( /*@ ub, dp @*/ ); err != nil { + if r, err := p.doXover( /*@ ub @*/ ); err != nil { // @ fold p.d.validResult(processResult{}, false) - return r, err /*@, false, absReturnErr(dp, r) @*/ + return r, err /*@, false, absReturnErr(r) @*/ } - // @ assert absPkt(dp, ub) == AbsDoXover(nextPkt) + // @ assert absPkt(ub) == AbsDoXover(nextPkt) // @ AbsValidateIngressIDXoverLemma(nextPkt, AbsDoXover(nextPkt), path.ifsToIO_ifs(p.ingressID)) - // @ nextPkt = absPkt(dp, ub) - if r, err := p.validateHopExpiry( /*@ dp @*/ ); err != nil { + // @ nextPkt = absPkt(ub) + if r, err := p.validateHopExpiry(); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, serrors.WithCtx(err, "info", "after xover") /*@, false, absReturnErr(dp, r) @*/ + return r, serrors.WithCtx(err, "info", "after xover") /*@, false, absReturnErr(r) @*/ } // verify the new block if r, err := p.verifyCurrentMAC( /*@ nextPkt, dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, serrors.WithCtx(err, "info", "after xover") /*@, false, absReturnErr(dp, r) @*/ + return r, serrors.WithCtx(err, "info", "after xover") /*@, false, absReturnErr(r) @*/ } // @ assert AbsVerifyCurrentMACConstraint(nextPkt, dp) // @ unfold acc(p.scionLayer.Mem(ub), R3) @@ -3291,23 +3266,23 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, // @ assert p.segmentChange ==> nextPkt.RightSeg != none[io.IO_seg2] if r, err := p.validateEgressID( /*@ nextPkt, dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false, absReturnErr(dp, r) @*/ + return r, err /*@, false, absReturnErr(r) @*/ } // @ assert !p.segmentChange ==> AbsValidateEgressIDConstraint(nextPkt, (p.ingressID != 0), dp) // @ assert p.segmentChange ==> p.ingressID != 0 && AbsValidateEgressIDConstraintXover(nextPkt, dp) // handle egress router alert before we check if it's up because we want to // send the reply anyway, so that trace route can pinpoint the exact link // that failed. - if r, err := p.handleEgressRouterAlert( /*@ ub, llIsNil, startLL, endLL , dp @*/ ); err != nil { + if r, err := p.handleEgressRouterAlert( /*@ ub, llIsNil, startLL, endLL @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false, absReturnErr(dp, r) @*/ + return r, err /*@, false, absReturnErr(r) @*/ } - // @ assert nextPkt == absPkt(dp, ub) - if r, err := p.validateEgressUp( /*@ nextPkt, dp @*/ ); err != nil { + // @ assert nextPkt == absPkt(ub) + if r, err := p.validateEgressUp( /*@ nextPkt @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false, absReturnErr(dp, r) @*/ + return r, err /*@, false, absReturnErr(r) @*/ } - // @ assert nextPkt == absPkt(dp, ub) + // @ assert nextPkt == absPkt(ub) egressID := p.egressInterface( /*@ nextPkt @*/ ) // @ assert AbsEgressInterfaceConstraint(nextPkt, path.ifsToIO_ifs(egressID)) // @ p.d.getExternalMem() @@ -3315,13 +3290,13 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, if c, ok := p.d.external[egressID]; ok { // @ p.d.getDomExternalLemma() // @ p.d.EgressIDNotZeroLemma(egressID, dp) - if err := p.processEgress( /*@ ub, dp @*/ ); err != nil { + if err := p.processEgress( /*@ ub @*/ ); err != nil { // @ fold p.d.validResult(processResult{}, false) - return processResult{}, err /*@, false, absReturnErr(dp, processResult{}) @*/ + return processResult{}, err /*@, false, absReturnErr(processResult{}) @*/ } // @ p.d.InDomainExternalInForwardingMetrics(egressID) - // @ assert absPkt(dp, ub) == AbsProcessEgress(nextPkt) - // @ nextPkt = absPkt(dp, ub) + // @ assert absPkt(ub) == AbsProcessEgress(nextPkt) + // @ nextPkt = absPkt(ub) // @ TemporaryAssumeForIO(old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub)) // @ ghost if(slayers.IsSupportedPkt(ub)) { // @ ghost if(!p.segmentChange) { @@ -3332,7 +3307,7 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, // @ XoverEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, path.ifsToIO_ifs(egressID), ioLock, ioSharedArg, dp) // @ } // @ } - // @ newAbsPkt = reveal absIO_val(dp, p.rawPkt, egressID) + // @ newAbsPkt = reveal absIO_val(p.rawPkt, egressID) // @ fold p.d.validResult(processResult{EgressID: egressID, OutConn: c, OutPkt: p.rawPkt}, false) return processResult{EgressID: egressID, OutConn: c, OutPkt: p.rawPkt}, nil /*@, false, newAbsPkt @*/ } @@ -3351,7 +3326,7 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, // @ XoverEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, none[io.IO_ifs], ioLock, ioSharedArg, dp) // @ } // @ } - // @ newAbsPkt = reveal absIO_val(dp, p.rawPkt, 0) + // @ newAbsPkt = reveal absIO_val(p.rawPkt, 0) // @ fold p.d.validResult(processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, false) return processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, nil /*@, false, newAbsPkt @*/ } @@ -3366,9 +3341,8 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, errCode, &slayers.SCMPParameterProblem{Pointer: p.currentHopPointer( /*@ nil @*/ )}, cannotRoute, - /*@ dp, @*/ ) - return tmp, err /*@, false, absReturnErr(dp, tmp) @*/ + return tmp, err /*@, false, absReturnErr(tmp) @*/ } // @ requires acc(&p.rawPkt, R15) @@ -3399,13 +3373,12 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, // contracts for IO-spec // @ requires p.scionLayer.EqPathType(p.rawPkt) // @ requires !slayers.IsSupportedPkt(p.rawPkt) -// @ requires dp.Valid() // @ ensures (respr.OutPkt == nil) == (newAbsPkt == io.IO_val_Unit{}) // @ ensures respr.OutPkt != nil ==> -// @ newAbsPkt == absIO_val(dp, respr.OutPkt, respr.EgressID) && +// @ newAbsPkt == absIO_val(respr.OutPkt, respr.EgressID) && // @ newAbsPkt.isIO_val_Unsupported // @ decreases 0 if sync.IgnoreBlockingForTermination() -func (p *scionPacketProcessor) processOHP( /* @ ghost dp io.DataPlaneSpec @ */ ) (respr processResult, reserr error /*@ , addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { +func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error /*@ , addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { // @ ghost ubScionL := p.rawPkt // @ p.scionLayer.ExtractAcc(ubScionL) s := p.scionLayer @@ -3421,7 +3394,7 @@ func (p *scionPacketProcessor) processOHP( /* @ ghost dp io.DataPlaneSpec @ */ ) // @ establishMemMalformedPath() // @ fold p.scionLayer.Mem(ubScionL) // @ fold p.d.validResult(processResult{}, false) - return processResult{}, malformedPath /*@ , false, absReturnErr(dp, processResult{}) @*/ + return processResult{}, malformedPath /*@ , false, absReturnErr(processResult{}) @*/ } if /*@ unfolding acc(s.Path.Mem(ubPath), R50) in @*/ !ohp.Info.ConsDir { // TODO parameter problem -> invalid path @@ -3430,7 +3403,7 @@ func (p *scionPacketProcessor) processOHP( /* @ ghost dp io.DataPlaneSpec @ */ ) // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr( "OneHop path in reverse construction direction is not allowed", - malformedPath, "srcIA", s.SrcIA, "dstIA", s.DstIA) /*@ , false, absReturnErr(dp, processResult{}) @*/ + malformedPath, "srcIA", s.SrcIA, "dstIA", s.DstIA) /*@ , false, absReturnErr(processResult{}) @*/ } // OHP leaving our IA @@ -3443,7 +3416,7 @@ func (p *scionPacketProcessor) processOHP( /* @ ghost dp io.DataPlaneSpec @ */ ) // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr("bad source IA", cannotRoute, "type", "ohp", "egress", ( /*@ unfolding acc(ohp.Mem(ubPath), R50) in (unfolding acc(ohp.FirstHop.Mem(), R55) in @*/ ohp.FirstHop.ConsEgress /*@ ) @*/), - "localIA", p.d.localIA, "srcIA", s.SrcIA) /*@ , false, absReturnErr(dp, processResult{}) @*/ + "localIA", p.d.localIA, "srcIA", s.SrcIA) /*@ , false, absReturnErr(processResult{}) @*/ } // @ p.d.getNeighborIAs() neighborIA, ok := p.d.neighborIAs[ /*@ unfolding acc(ohp.Mem(ubPath), R50) in (unfolding acc(ohp.FirstHop.Mem(), R55) in @*/ ohp.FirstHop.ConsEgress /*@ ) @*/] @@ -3453,7 +3426,7 @@ func (p *scionPacketProcessor) processOHP( /* @ ghost dp io.DataPlaneSpec @ */ ) // @ defer fold p.scionLayer.Mem(ubScionL) // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WithCtx(cannotRoute, - "type", "ohp", "egress", ( /*@ unfolding acc(ohp.Mem(ubPath), R50) in (unfolding acc(ohp.FirstHop.Mem(), R55) in @*/ ohp.FirstHop.ConsEgress /*@ ) @*/)) /*@ , false, absReturnErr(dp, processResult{}) @*/ + "type", "ohp", "egress", ( /*@ unfolding acc(ohp.Mem(ubPath), R50) in (unfolding acc(ohp.FirstHop.Mem(), R55) in @*/ ohp.FirstHop.ConsEgress /*@ ) @*/)) /*@ , false, absReturnErr(processResult{}) @*/ } if !neighborIA.Equal(s.DstIA) { // @ establishCannotRoute() @@ -3461,7 +3434,7 @@ func (p *scionPacketProcessor) processOHP( /* @ ghost dp io.DataPlaneSpec @ */ ) // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr("bad destination IA", cannotRoute, "type", "ohp", "egress", ( /*@ unfolding acc(ohp.Mem(ubPath), R50) in (unfolding acc(ohp.FirstHop.Mem(), R55) in @*/ ohp.FirstHop.ConsEgress /*@ ) @*/), - "neighborIA", neighborIA, "dstIA", s.DstIA) /*@ , false, absReturnErr(dp, processResult{}) @*/ + "neighborIA", neighborIA, "dstIA", s.DstIA) /*@ , false, absReturnErr(processResult{}) @*/ } // @ unfold s.Path.Mem(ubPath) // @ unfold ohp.FirstHop.Mem() @@ -3486,7 +3459,7 @@ func (p *scionPacketProcessor) processOHP( /* @ ghost dp io.DataPlaneSpec @ */ ) // TODO parameter problem -> invalid MAC // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.New("MAC", "expected", fmt.Sprintf("%x", macCopy), - "actual", fmt.Sprintf("%x", ohp.FirstHop.Mac), "type", "ohp") /*@ , false, absReturnErr(dp, processResult{}) @*/ + "actual", fmt.Sprintf("%x", ohp.FirstHop.Mac), "type", "ohp") /*@ , false, absReturnErr(processResult{}) @*/ } ohp.Info.UpdateSegID(ohp.FirstHop.Mac /*@, ohp.FirstHop.ToIO_HF() @*/) // @ fold ohp.FirstHop.Mem() @@ -3497,7 +3470,7 @@ func (p *scionPacketProcessor) processOHP( /* @ ghost dp io.DataPlaneSpec @ */ ) // changes made to 'updateSCIONLayer'. if err := updateSCIONLayer(p.rawPkt, &p.scionLayer /* s */, p.buffer); err != nil { // @ fold p.d.validResult(processResult{}, false) - return processResult{}, err /*@ , false, absReturnErr(dp, processResult{}) @*/ + return processResult{}, err /*@ , false, absReturnErr(processResult{}) @*/ } // @ unfold p.scionLayer.Mem(ubScionL) // @ defer fold p.scionLayer.Mem(ubScionL) @@ -3515,13 +3488,13 @@ func (p *scionPacketProcessor) processOHP( /* @ ghost dp io.DataPlaneSpec @ */ ) // @ fold p.d.validResult(processResult{EgressID: ohp.FirstHop.ConsEgress, OutConn: c, OutPkt: p.rawPkt}, false) // @ TemporaryAssumeForIO(!slayers.IsSupportedPkt(p.rawPkt)) return processResult{EgressID: ohp.FirstHop.ConsEgress, OutConn: c, OutPkt: p.rawPkt}, - nil /*@ , false, reveal absIO_val(dp, respr.OutPkt, respr.EgressID) @*/ + nil /*@ , false, reveal absIO_val(respr.OutPkt, respr.EgressID) @*/ } // TODO parameter problem invalid interface // @ establishCannotRoute() // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WithCtx(cannotRoute, "type", "ohp", - "egress", ohp.FirstHop.ConsEgress, "consDir", ohp.Info.ConsDir) /*@ , false, absReturnErr(dp, processResult{}) @*/ + "egress", ohp.FirstHop.ConsEgress, "consDir", ohp.Info.ConsDir) /*@ , false, absReturnErr(processResult{}) @*/ } // OHP entering our IA @@ -3532,7 +3505,7 @@ func (p *scionPacketProcessor) processOHP( /* @ ghost dp io.DataPlaneSpec @ */ ) // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr("bad destination IA", cannotRoute, "type", "ohp", "ingress", p.ingressID, - "localIA", p.d.localIA, "dstIA", s.DstIA) /*@ , false, absReturnErr(dp, processResult{}) @*/ + "localIA", p.d.localIA, "dstIA", s.DstIA) /*@ , false, absReturnErr(processResult{}) @*/ } // @ p.d.getNeighborIAs() neighborIA := p.d.neighborIAs[p.ingressID] @@ -3542,7 +3515,7 @@ func (p *scionPacketProcessor) processOHP( /* @ ghost dp io.DataPlaneSpec @ */ ) // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr("bad source IA", cannotRoute, "type", "ohp", "ingress", p.ingressID, - "neighborIA", neighborIA, "srcIA", s.SrcIA) /*@ , false, absReturnErr(dp, processResult{}) @*/ + "neighborIA", neighborIA, "srcIA", s.SrcIA) /*@ , false, absReturnErr(processResult{}) @*/ } // @ unfold s.Path.Mem(ubPath) @@ -3566,7 +3539,7 @@ func (p *scionPacketProcessor) processOHP( /* @ ghost dp io.DataPlaneSpec @ */ ) // @ fold p.scionLayer.Mem(ubScionL) if err := updateSCIONLayer(p.rawPkt, &p.scionLayer /* s */, p.buffer); err != nil { // @ fold p.d.validResult(processResult{}, false) - return processResult{}, err /*@ , false, absReturnErr(dp, processResult{}) @*/ + return processResult{}, err /*@ , false, absReturnErr(processResult{}) @*/ } // (VerifiedSCION) the parameter was changed from 's' to '&p.scionLayer' due to the // changes made to 'resolveLocalDst'. @@ -3576,13 +3549,13 @@ func (p *scionPacketProcessor) processOHP( /* @ ghost dp io.DataPlaneSpec @ */ ) // @ apply acc(a.Mem(), R15) --* acc(sl.AbsSlice_Bytes(ubScionL, 0, len(ubScionL)), R15) // @ } // @ fold p.d.validResult(processResult{}, false) - return processResult{}, err /*@ , false, absReturnErr(dp, processResult{}) @*/ + return processResult{}, err /*@ , false, absReturnErr(processResult{}) @*/ } // @ p.d.getInternal() // @ assert p.d.internal != nil ==> acc(p.d.internal.Mem(), _) // @ fold p.d.validResult(processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, addrAliases) // @ TemporaryAssumeForIO(!slayers.IsSupportedPkt(p.rawPkt)) - return processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, nil /*@ , addrAliases, reveal absIO_val(dp, respr.OutPkt, 0) @*/ + return processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, nil /*@ , addrAliases, reveal absIO_val(respr.OutPkt, 0) @*/ } // @ requires acc(d.Mem(), _) diff --git a/router/io-spec-abstract-transitions.gobra b/router/io-spec-abstract-transitions.gobra index 4faa43b52..17b4a2582 100644 --- a/router/io-spec-abstract-transitions.gobra +++ b/router/io-spec-abstract-transitions.gobra @@ -137,7 +137,6 @@ pure func AbsValidateEgressIDConstraintXover(pkt io.IO_pkt2, dp io.DataPlaneSpec ghost opaque -requires dp.Valid() requires len(pkt.CurrSeg.Future) > 0 decreases pure func AbsVerifyCurrentMACConstraint(pkt io.IO_pkt2, dp io.DataPlaneSpec) bool { @@ -162,7 +161,6 @@ requires AbsValidateIngressIDConstraint(oldPkt, ingressID) requires AbsVerifyCurrentMACConstraint(newPkt, dp) requires len(newPkt.CurrSeg.Future) == 1 || AbsValidateEgressIDConstraint(newPkt, true, dp) preserves acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; -ensures dp.Valid() ensures ElemWitness(ioSharedArg.OBufY, egressID, newPkt) decreases func InternalEnterEvent(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock *sync.Mutex, ioSharedArg SharedArg, dp io.DataPlaneSpec) { @@ -193,7 +191,6 @@ requires AbsValidateEgressIDConstraint(AbsUpdateNonConsDirIngressSegID(oldPkt, requires AbsEgressInterfaceConstraint(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID), egressID) requires newPkt == AbsProcessEgress(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)) preserves acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; -ensures dp.Valid() ensures ElemWitness(ioSharedArg.OBufY, egressID, newPkt) decreases func ExternalEnterOrExitEvent(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock *sync.Mutex, ioSharedArg SharedArg, dp io.DataPlaneSpec) { @@ -232,7 +229,6 @@ requires egressID != none[io.IO_ifs] ==> AbsEgressInterfaceConstraint(AbsDoXove requires egressID == none[io.IO_ifs] ==> newPkt == AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)) requires egressID != none[io.IO_ifs] ==> newPkt == AbsProcessEgress(AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID))) preserves acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; -ensures dp.Valid() ensures ElemWitness(ioSharedArg.OBufY, egressID, newPkt) decreases func XoverEvent(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock *sync.Mutex, ioSharedArg SharedArg, dp io.DataPlaneSpec) { diff --git a/router/io-spec-lemmas.gobra b/router/io-spec-lemmas.gobra index 018c4cec1..b4f52b6ae 100644 --- a/router/io-spec-lemmas.gobra +++ b/router/io-spec-lemmas.gobra @@ -28,34 +28,32 @@ import ( ) ghost -preserves dp.Valid() preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) ensures slayers.ValidPktMetaHdr(raw) && slayers.IsSupportedPkt(raw) ==> - absIO_val(dp, raw, ingressID).isIO_val_Pkt2 && - absIO_val(dp, raw, ingressID).IO_val_Pkt2_2 == absPkt(dp, raw) && - len(absPkt(dp, raw).CurrSeg.Future) > 0 + absIO_val(raw, ingressID).isIO_val_Pkt2 && + absIO_val(raw, ingressID).IO_val_Pkt2_2 == absPkt(raw) && + len(absPkt(raw).CurrSeg.Future) > 0 decreases -func absIO_valLemma(dp io.DataPlaneSpec, raw []byte, ingressID uint16) { +func absIO_valLemma(raw []byte, ingressID uint16) { if(slayers.ValidPktMetaHdr(raw) && slayers.IsSupportedPkt(raw)){ - absIO := reveal absIO_val(dp, raw, ingressID) + absIO := reveal absIO_val(raw, ingressID) assert absIO.isIO_val_Pkt2 - assert absIO_val(dp, raw, ingressID).IO_val_Pkt2_2 == absPkt(dp, raw) - absPktFutureLemma(dp, raw) + assert absIO_val(raw, ingressID).IO_val_Pkt2_2 == absPkt(raw) + absPktFutureLemma(raw) } } ghost -requires dp.Valid() requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) requires slayers.ValidPktMetaHdr(raw) -ensures dp.Valid() ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) ensures slayers.ValidPktMetaHdr(raw) -ensures len(absPkt(dp, raw).CurrSeg.Future) > 0 +ensures len(absPkt(raw).CurrSeg.Future) > 0 decreases -func absPktFutureLemma(dp io.DataPlaneSpec, raw []byte) { +func absPktFutureLemma(raw []byte) { reveal slayers.ValidPktMetaHdr(raw) headerOffset := slayers.GetAddressOffset(raw) + headerOffsetWithMetaLen := headerOffset + scion.MetaLen assert forall k int :: {&raw[headerOffset:headerOffset+scion.MetaLen][k]} 0 <= k && k < scion.MetaLen ==> &raw[headerOffset:headerOffset+scion.MetaLen][k] == &raw[headerOffset + k] hdr := (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in binary.BigEndian.Uint32(raw[headerOffset:headerOffset+scion.MetaLen])) @@ -68,9 +66,9 @@ func absPktFutureLemma(dp io.DataPlaneSpec, raw []byte) { segLen := scion.LengthOfCurrSeg(currHFIdx, seg1Len, seg2Len, seg3Len) prevSegLen := scion.LengthOfPrevSeg(currHFIdx, seg1Len, seg2Len, seg3Len) numINF := scion.NumInfoFields(seg1Len, seg2Len, seg3Len) - offset := scion.HopFieldOffset(numINF, 0, headerOffset) - pkt := reveal absPkt(dp, raw) - assert pkt.CurrSeg == reveal scion.CurrSeg(raw, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, headerOffset) + offset := scion.HopFieldOffset(numINF, 0, headerOffsetWithMetaLen) + pkt := reveal absPkt(raw) + assert pkt.CurrSeg == reveal scion.CurrSeg(raw, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, headerOffsetWithMetaLen) assert len(pkt.CurrSeg.Future) > 0 } @@ -120,13 +118,12 @@ requires acc(&p.d, R55) && acc(p.d.Mem(), _) requires acc(&p.ingressID, R55) requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) requires slayers.ValidPktMetaHdr(ub) -requires dp.Valid() decreases -pure func (p *scionPacketProcessor) LastHopLen(ub []byte, dp io.DataPlaneSpec) bool { +pure func (p *scionPacketProcessor) LastHopLen(ub []byte) bool { return (unfolding acc(p.scionLayer.Mem(ub), R50) in (unfolding acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R55) in p.scionLayer.DstIA) == (unfolding acc(p.d.Mem(), _) in p.d.localIA)) ==> - len(absPkt(dp, ub).CurrSeg.Future) == 1 + len(absPkt(ub).CurrSeg.Future) == 1 } //TODO: Does not work with --disableNL --unsafeWildcardoptimization @@ -134,26 +131,24 @@ ghost requires acc(p.scionLayer.Mem(ub), R50) requires acc(&p.d, R55) && acc(p.d.Mem(), _) requires acc(&p.ingressID, R55) -requires dp.Valid() requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) requires slayers.ValidPktMetaHdr(ub) requires p.DstIsLocalIngressID(ub) -requires p.LastHopLen(ub, dp) +requires p.LastHopLen(ub) requires (unfolding acc(p.scionLayer.Mem(ub), R50) in (unfolding acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R55) in p.scionLayer.DstIA) == (unfolding acc(p.d.Mem(), _) in p.d.localIA)) ensures acc(p.scionLayer.Mem(ub), R50) ensures acc(&p.d, R55) && acc(p.d.Mem(), _) ensures acc(&p.ingressID, R55) -ensures dp.Valid() ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) ensures slayers.ValidPktMetaHdr(ub) ensures p.ingressID != 0 -ensures len(absPkt(dp, ub).CurrSeg.Future) == 1 +ensures len(absPkt(ub).CurrSeg.Future) == 1 decreases -func (p* scionPacketProcessor) LocalDstLemma(ub []byte, dp io.DataPlaneSpec) { +func (p* scionPacketProcessor) LocalDstLemma(ub []byte) { reveal p.DstIsLocalIngressID(ub) - reveal p.LastHopLen(ub, dp) + reveal p.LastHopLen(ub) } ghost @@ -217,7 +212,6 @@ requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R50) requires acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R50) requires acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) requires p.path === p.scionLayer.GetPath(ub) -requires dp.Valid() requires slayers.ValidPktMetaHdr(ub) requires start == p.scionLayer.PathStartIdx(ub) requires end == p.scionLayer.PathEndIdx(ub) @@ -226,7 +220,6 @@ ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R50) ensures acc(p.scionLayer.Mem(ub), R55) ensures acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R50) ensures acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) -ensures dp.Valid() ensures slayers.ValidPktMetaHdr(ub) ensures p.scionLayer.EqAbsHeader(ub) ensures start == p.scionLayer.PathStartIdx(ub) @@ -234,9 +227,9 @@ ensures end == p.scionLayer.PathEndIdx(ub) ensures scion.validPktMetaHdr(ub[start:end]) ensures p.path.EqAbsHeader(ub[start:end]) ensures p.scionLayer.ValidHeaderOffset(ub, len(ub)) -ensures absPkt(dp, ub) == p.path.absPkt(dp, ub[start:end]) +ensures absPkt(ub) == p.path.absPkt(ub[start:end]) decreases -func (p* scionPacketProcessor) AbsPktToSubSliceAbsPkt(ub []byte, start int, end int, dp io.DataPlaneSpec) { +func (p* scionPacketProcessor) AbsPktToSubSliceAbsPkt(ub []byte, start int, end int) { unfold acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) unfold acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R56) reveal slayers.ValidPktMetaHdr(ub) @@ -254,7 +247,7 @@ func (p* scionPacketProcessor) AbsPktToSubSliceAbsPkt(ub []byte, start int, end hdr := hdr1 fold acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R56) fold acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) - + headerOffsetWithMetaLen := start + scion.MetaLen metaHdr := scion.DecodedFrom(hdr) currINFIdx := int(metaHdr.CurrINF) currHFIdx := int(metaHdr.CurrHF) @@ -264,13 +257,13 @@ func (p* scionPacketProcessor) AbsPktToSubSliceAbsPkt(ub []byte, start int, end segLen := scion.LengthOfCurrSeg(currHFIdx, seg1Len, seg2Len, seg3Len) prevSegLen := scion.LengthOfPrevSeg(currHFIdx, seg1Len, seg2Len, seg3Len) numINF := scion.NumInfoFields(seg1Len, seg2Len, seg3Len) - offset := scion.HopFieldOffset(numINF, 0, start) + offset := scion.HopFieldOffset(numINF, 0, headerOffsetWithMetaLen) - currSegWidenLemma(ub, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, start, start, end) - leftSegWidenLemma(ub, currINFIdx + 1, seg1Len, seg2Len, seg3Len, start, start, end) - midSegWidenLemma(ub, currINFIdx + 2, seg1Len, seg2Len, seg3Len, start, start, end) - rightSegWidenLemma(ub, currINFIdx - 1, seg1Len, seg2Len, seg3Len, start, start, end) - assert reveal absPkt(dp, ub) == reveal p.path.absPkt(dp, ub[start:end]) + currSegWidenLemma(ub, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, headerOffsetWithMetaLen, start, end) + leftSegWidenLemma(ub, currINFIdx + 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) + midSegWidenLemma(ub, currINFIdx + 2, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) + rightSegWidenLemma(ub, currINFIdx - 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) + assert reveal absPkt(ub) == reveal p.path.absPkt(ub[start:end]) } ghost @@ -280,7 +273,6 @@ requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R50) requires acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R50) requires acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) requires p.path === p.scionLayer.GetPath(ub) -requires dp.Valid() requires scion.validPktMetaHdr(ub[start:end]) requires start == p.scionLayer.PathStartIdx(ub) requires end == p.scionLayer.PathEndIdx(ub) @@ -290,15 +282,14 @@ ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R50) ensures acc(p.scionLayer.Mem(ub), R55) ensures acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R50) ensures acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) -ensures dp.Valid() ensures slayers.ValidPktMetaHdr(ub) ensures start == p.scionLayer.PathStartIdx(ub) ensures end == p.scionLayer.PathEndIdx(ub) ensures scion.validPktMetaHdr(ub[start:end]) ensures p.scionLayer.EqAbsHeader(ub) -ensures absPkt(dp, ub) == p.path.absPkt(dp, ub[start:end]) +ensures absPkt(ub) == p.path.absPkt(ub[start:end]) decreases -func (p* scionPacketProcessor) SubSliceAbsPktToAbsPkt(ub []byte, start int, end int, dp io.DataPlaneSpec){ +func (p* scionPacketProcessor) SubSliceAbsPktToAbsPkt(ub []byte, start int, end int){ unfold acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) unfold acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R56) unfold acc(p.scionLayer.Mem(ub), R56) @@ -310,7 +301,7 @@ func (p* scionPacketProcessor) SubSliceAbsPktToAbsPkt(ub []byte, start int, end reveal scion.validPktMetaHdr(ub[start:end]) assert reveal slayers.ValidPktMetaHdr(ub) assert start == slayers.GetAddressOffset(ub) - + headerOffsetWithMetaLen := start + scion.MetaLen hdr1 := binary.BigEndian.Uint32(ub[start:start+scion.MetaLen]) hdr2 := binary.BigEndian.Uint32(ub[start:end][:scion.MetaLen]) assert hdr1 == hdr2 @@ -327,13 +318,13 @@ func (p* scionPacketProcessor) SubSliceAbsPktToAbsPkt(ub []byte, start int, end segLen := scion.LengthOfCurrSeg(currHFIdx, seg1Len, seg2Len, seg3Len) prevSegLen := scion.LengthOfPrevSeg(currHFIdx, seg1Len, seg2Len, seg3Len) numINF := scion.NumInfoFields(seg1Len, seg2Len, seg3Len) - offset := scion.HopFieldOffset(numINF, 0, start) + offset := scion.HopFieldOffset(numINF, 0, headerOffsetWithMetaLen) - currSegWidenLemma(ub, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, start, start, end) - leftSegWidenLemma(ub, currINFIdx + 1, seg1Len, seg2Len, seg3Len, start, start, end) - midSegWidenLemma(ub, currINFIdx + 2, seg1Len, seg2Len, seg3Len, start, start, end) - rightSegWidenLemma(ub, currINFIdx - 1, seg1Len, seg2Len, seg3Len, start, start, end) - assert reveal absPkt(dp, ub) == reveal p.path.absPkt(dp, ub[start:end]) + currSegWidenLemma(ub, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, headerOffsetWithMetaLen, start, end) + leftSegWidenLemma(ub, currINFIdx + 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) + midSegWidenLemma(ub, currINFIdx + 2, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) + rightSegWidenLemma(ub, currINFIdx - 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) + assert reveal absPkt(ub) == reveal p.path.absPkt(ub[start:end]) } ghost diff --git a/router/io-spec.gobra b/router/io-spec.gobra index 9b77fe93a..014133963 100644 --- a/router/io-spec.gobra +++ b/router/io-spec.gobra @@ -32,13 +32,13 @@ import ( ghost opaque -requires dp.Valid() requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) requires slayers.ValidPktMetaHdr(raw) decreases -pure func absPkt(dp io.DataPlaneSpec, raw []byte) (res io.IO_pkt2) { +pure func absPkt(raw []byte) (res io.IO_pkt2) { return let _ := reveal slayers.ValidPktMetaHdr(raw) in let headerOffset := slayers.GetAddressOffset(raw) in + let headerOffsetWithMetaLen := headerOffset + scion.MetaLen in let hdr := (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in binary.BigEndian.Uint32(raw[headerOffset:headerOffset+scion.MetaLen])) in let metaHdr := scion.DecodedFrom(hdr) in @@ -50,12 +50,12 @@ pure func absPkt(dp io.DataPlaneSpec, raw []byte) (res io.IO_pkt2) { let segLen := scion.LengthOfCurrSeg(currHFIdx, seg1Len, seg2Len, seg3Len) in let prevSegLen := scion.LengthOfPrevSeg(currHFIdx, seg1Len, seg2Len, seg3Len) in let numINF := scion.NumInfoFields(seg1Len, seg2Len, seg3Len) in - let offset := scion.HopFieldOffset(numINF, 0, headerOffset) in + let offset := scion.HopFieldOffset(numINF, 0, headerOffsetWithMetaLen) in io.IO_pkt2(io.IO_Packet2{ - CurrSeg : scion.CurrSeg(raw, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, headerOffset), - LeftSeg : scion.LeftSeg(raw, currINFIdx + 1, seg1Len, seg2Len , seg3Len, headerOffset), - MidSeg : scion.MidSeg(raw, currINFIdx + 2, seg1Len, seg2Len , seg3Len, headerOffset), - RightSeg : scion.RightSeg(raw, currINFIdx - 1, seg1Len, seg2Len , seg3Len, headerOffset), + CurrSeg : scion.CurrSeg(raw, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, headerOffsetWithMetaLen), + LeftSeg : scion.LeftSeg(raw, currINFIdx + 1, seg1Len, seg2Len , seg3Len, headerOffsetWithMetaLen), + MidSeg : scion.MidSeg(raw, currINFIdx + 2, seg1Len, seg2Len , seg3Len, headerOffsetWithMetaLen), + RightSeg : scion.RightSeg(raw, currINFIdx - 1, seg1Len, seg2Len , seg3Len, headerOffsetWithMetaLen), }) } @@ -73,24 +73,22 @@ pure func absIO_val_Unsupported(raw []byte, ingressID uint16) (val io.IO_val) { ghost opaque -requires dp.Valid() requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) ensures val.isIO_val_Pkt2 || val.isIO_val_Unsupported decreases -pure func absIO_val(dp io.DataPlaneSpec, raw []byte, ingressID uint16) (val io.IO_val) { +pure func absIO_val(raw []byte, ingressID uint16) (val io.IO_val) { return (reveal slayers.ValidPktMetaHdr(raw) && slayers.IsSupportedPkt(raw)) ? - io.IO_val(io.IO_val_Pkt2{path.ifsToIO_ifs(ingressID), absPkt(dp, raw)}) : + io.IO_val(io.IO_val_Pkt2{path.ifsToIO_ifs(ingressID), absPkt(raw)}) : absIO_val_Unsupported(raw, ingressID) } ghost -requires dp.Valid() requires respr.OutPkt != nil ==> acc(sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)), R56) decreases -pure func absReturnErr(dp io.DataPlaneSpec, respr processResult) (val io.IO_val) { +pure func absReturnErr(respr processResult) (val io.IO_val) { return respr.OutPkt == nil ? io.IO_val_Unit{} : - absIO_val(dp, respr.OutPkt, respr.EgressID) + absIO_val(respr.OutPkt, respr.EgressID) } ghost @@ -189,10 +187,9 @@ func (d *DataPlane) getDomExternalLemma() { } ghost -requires dp.Valid() requires acc(msg.Mem(), R50) decreases -pure func MsgToAbsVal(dp io.DataPlaneSpec, msg *ipv4.Message, ingressID uint16) (res io.IO_val) { +pure func MsgToAbsVal(msg *ipv4.Message, ingressID uint16) (res io.IO_val) { return unfolding acc(msg.Mem(), R50) in - absIO_val(dp, msg.Buffers[0], ingressID) + absIO_val(msg.Buffers[0], ingressID) } diff --git a/router/widen-lemma.gobra b/router/widen-lemma.gobra index 9e6673455..8b8f37ba3 100644 --- a/router/widen-lemma.gobra +++ b/router/widen-lemma.gobra @@ -32,13 +32,12 @@ ghost requires 0 <= length && length <= len(raw) requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R49) requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R49) -preserves dp.Valid() ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R49) ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R49) -ensures absIO_val(dp, raw[:length], ingressID).isIO_val_Pkt2 ==> - absIO_val(dp, raw[:length], ingressID) == absIO_val(dp, raw, ingressID) +ensures absIO_val(raw[:length], ingressID).isIO_val_Pkt2 ==> + absIO_val(raw[:length], ingressID) == absIO_val(raw, ingressID) decreases -func absIO_valWidenLemma(dp io.DataPlaneSpec, raw []byte, ingressID uint16, length int) { +func absIO_valWidenLemma(raw []byte, ingressID uint16, length int) { var ret1 io.IO_val var ret2 io.IO_val @@ -47,17 +46,17 @@ func absIO_valWidenLemma(dp io.DataPlaneSpec, raw []byte, ingressID uint16, leng assert slayers.ValidPktMetaHdr(raw) IsSupportedPktWidenLemma(raw, length) assert slayers.IsSupportedPkt(raw) - absPktWidenLemma(dp, raw, length) + absPktWidenLemma(raw, length) - ret1 = io.IO_val(io.IO_val_Pkt2{path.ifsToIO_ifs(ingressID), absPkt(dp, raw)}) - ret2 = io.IO_val(io.IO_val_Pkt2{path.ifsToIO_ifs(ingressID), absPkt(dp, raw[:length])}) - assert ret1 == reveal absIO_val(dp, raw, ingressID) - assert ret2 == reveal absIO_val(dp, raw[:length], ingressID) + ret1 = io.IO_val(io.IO_val_Pkt2{path.ifsToIO_ifs(ingressID), absPkt(raw)}) + ret2 = io.IO_val(io.IO_val_Pkt2{path.ifsToIO_ifs(ingressID), absPkt(raw[:length])}) + assert ret1 == reveal absIO_val(raw, ingressID) + assert ret2 == reveal absIO_val(raw[:length], ingressID) assert ret1 == ret2 - assert absIO_val(dp, raw[:length], ingressID).isIO_val_Pkt2 ==> - absIO_val(dp, raw[:length], ingressID) == absIO_val(dp, raw, ingressID) + assert absIO_val(raw[:length], ingressID).isIO_val_Pkt2 ==> + absIO_val(raw[:length], ingressID) == absIO_val(raw, ingressID) } else { - assert !(reveal absIO_val(dp, raw[:length], ingressID).isIO_val_Pkt2) + assert !(reveal absIO_val(raw[:length], ingressID).isIO_val_Pkt2) } } @@ -103,7 +102,6 @@ func IsSupportedPktWidenLemma(raw []byte, length int) { ghost requires 0 <= length && length <= len(raw) -requires dp.Valid() requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R50) requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R50) requires slayers.ValidPktMetaHdr(raw) @@ -112,9 +110,9 @@ ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R50) ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R50) ensures slayers.ValidPktMetaHdr(raw) ensures slayers.ValidPktMetaHdr(raw[:length]) -ensures absPkt(dp, raw) == absPkt(dp, raw[:length]) +ensures absPkt(raw) == absPkt(raw[:length]) decreases -func absPktWidenLemma(dp io.DataPlaneSpec, raw []byte, length int) { +func absPktWidenLemma(raw []byte, length int) { reveal slayers.ValidPktMetaHdr(raw) reveal slayers.ValidPktMetaHdr(raw[:length]) @@ -124,6 +122,7 @@ func absPktWidenLemma(dp io.DataPlaneSpec, raw []byte, length int) { headerOffset2 := slayers.GetAddressOffset(raw[:length]) assert headerOffset1 == headerOffset2 headerOffset := headerOffset1 + headerOffsetWithMetaLen := headerOffset + scion.MetaLen hdr1 := binary.BigEndian.Uint32(raw[headerOffset:headerOffset+scion.MetaLen]) hdr2 := binary.BigEndian.Uint32(raw[:length][headerOffset:headerOffset+scion.MetaLen]) assert hdr1 == hdr2 @@ -140,14 +139,14 @@ func absPktWidenLemma(dp io.DataPlaneSpec, raw []byte, length int) { segLen := scion.LengthOfCurrSeg(currHFIdx, seg1Len, seg2Len, seg3Len) prevSegLen := scion.LengthOfPrevSeg(currHFIdx, seg1Len, seg2Len, seg3Len) numINF := scion.NumInfoFields(seg1Len, seg2Len, seg3Len) - offset := scion.HopFieldOffset(numINF, 0, headerOffset) + offset := scion.HopFieldOffset(numINF, 0, headerOffsetWithMetaLen) - currSegWidenLemma(raw, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, headerOffset, 0, length) - leftSegWidenLemma(raw, currINFIdx + 1, seg1Len, seg2Len, seg3Len, headerOffset, 0, length) - midSegWidenLemma(raw, currINFIdx + 2, seg1Len, seg2Len, seg3Len, headerOffset, 0, length) - rightSegWidenLemma(raw, currINFIdx - 1, seg1Len, seg2Len, seg3Len, headerOffset, 0, length) + currSegWidenLemma(raw, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, headerOffsetWithMetaLen, 0, length) + leftSegWidenLemma(raw, currINFIdx + 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, 0, length) + midSegWidenLemma(raw, currINFIdx + 2, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, 0, length) + rightSegWidenLemma(raw, currINFIdx - 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, 0, length) - assert reveal absPkt(dp, raw) == reveal absPkt(dp, raw[:length]) + assert reveal absPkt(raw) == reveal absPkt(raw[:length]) } ghost From 6a8228a706a3ffc2a0718a6d2f72a8de40c5e23f Mon Sep 17 00:00:00 2001 From: Markus Limbeck <92801626+mlimbeck@users.noreply.github.com> Date: Wed, 15 May 2024 14:30:40 +0200 Subject: [PATCH 35/57] Proof of incPath (#344) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * progress incPath proof * Apply suggestions from code review Co-authored-by: João Pereira * fmt of widen-lemma * further fmt * feedback * add comment * Update router/dataplane.go * Apply suggestions from code review --------- Co-authored-by: João Pereira --- pkg/slayers/path/scion/raw.go | 113 +++++-- pkg/slayers/path/scion/raw_spec.gobra | 364 ++++++++++++++++++----- pkg/slayers/path/scion/widen-lemma.gobra | 234 +++++++++++++++ pkg/slayers/scion_spec.gobra | 5 +- router/dataplane.go | 26 +- router/io-spec-lemmas.gobra | 42 +-- router/io-spec.gobra | 16 +- router/widen-lemma.gobra | 265 +---------------- 8 files changed, 665 insertions(+), 400 deletions(-) create mode 100644 pkg/slayers/path/scion/widen-lemma.gobra diff --git a/pkg/slayers/path/scion/raw.go b/pkg/slayers/path/scion/raw.go index 8c73fce79..a3ba6a3fb 100644 --- a/pkg/slayers/path/scion/raw.go +++ b/pkg/slayers/path/scion/raw.go @@ -219,53 +219,108 @@ func (s *Raw) ToDecoded( /*@ ghost ubuf []byte @*/ ) (d *Decoded, err error) { // IncPath increments the path and writes it to the buffer. // @ requires s.Mem(ubuf) // @ requires sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) +// pres for IO: // @ requires s.EqAbsHeader(ubuf) +// @ requires validPktMetaHdr(ubuf) +// @ requires len(s.absPkt(ubuf).CurrSeg.Future) > 0 +// @ requires s.GetIsXoverSpec(ubuf) ==> +// @ s.absPkt(ubuf).LeftSeg != none[io.IO_seg3] // @ ensures sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) // @ ensures old(unfolding s.Mem(ubuf) in unfolding // @ s.Base.Mem() in (s.NumINF <= 0 || int(s.PathMeta.CurrHF) >= s.NumHops-1)) ==> r != nil // @ ensures r == nil ==> s.Mem(ubuf) -// @ ensures r == nil && s.InBounds(ubuf) ==> s.EqAbsHeader(ubuf) // @ ensures r != nil ==> s.NonInitMem() // @ ensures r != nil ==> r.ErrorMem() +// post for IO: +// @ ensures r == nil ==> s.EqAbsHeader(ubuf) && validPktMetaHdr(ubuf) +// @ ensures r == nil && old(s.GetIsXoverSpec(ubuf)) ==> +// @ s.absPkt(ubuf) == AbsXover(old(s.absPkt(ubuf))) +// @ ensures r == nil && !old(s.GetIsXoverSpec(ubuf)) ==> +// @ s.absPkt(ubuf) == AbsIncPath(old(s.absPkt(ubuf))) // @ decreases func (s *Raw) IncPath( /*@ ghost ubuf []byte @*/ ) (r error) { //@ unfold s.Mem(ubuf) + //@ reveal validPktMetaHdr(ubuf) + //@ unfold acc(s.Base.Mem(), R56) + //@ oldCurrInfIdx := int(s.PathMeta.CurrINF) + //@ oldCurrHfIdx := int(s.PathMeta.CurrHF) + //@ oldSeg1Len := int(s.PathMeta.SegLen[0]) + //@ oldSeg2Len := int(s.PathMeta.SegLen[1]) + //@ oldSeg3Len := int(s.PathMeta.SegLen[2]) + //@ oldSegLen := LengthOfCurrSeg(oldCurrHfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) + //@ oldPrevSegLen := LengthOfPrevSeg(oldCurrHfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) + //@ oldOffset := HopFieldOffset(s.Base.NumINF, 0, 0) + //@ fold acc(s.Base.Mem(), R56) if err := s.Base.IncPath(); err != nil { //@ fold s.NonInitMem() return err } - //@ sl.SplitByIndex_Bytes(ubuf, 0, len(ubuf), MetaLen, HalfPerm) - //@ sl.SplitByIndex_Bytes(ubuf, 0, len(ubuf), MetaLen, HalfPerm) - //@ sl.Reslice_Bytes(ubuf, 0, MetaLen, HalfPerm) - //@ sl.Reslice_Bytes(ubuf, 0, MetaLen, HalfPerm) - + //@ fold acc(s.Mem(ubuf), HalfPerm) + //@ sl.SplitRange_Bytes(ubuf, 0, MetaLen, HalfPerm) + //@ ValidPktMetaHdrSublice(ubuf, MetaLen) + //@ sl.Reslice_Bytes(ubuf, MetaLen, len(ubuf), HalfPerm) + //@ tail := ubuf[MetaLen:] + //@ unfold acc(sl.AbsSlice_Bytes(tail, 0, len(tail)), R50) + //@ oldoffsetWithHops := oldOffset + path.HopLen * oldPrevSegLen + //@ oldHfIdxSeg := oldCurrHfIdx-oldPrevSegLen + //@ WidenCurrSeg(ubuf, oldoffsetWithHops + MetaLen, oldCurrInfIdx, oldHfIdxSeg, + //@ oldSegLen, MetaLen, MetaLen, len(ubuf)) + //@ WidenLeftSeg(ubuf, oldCurrInfIdx + 1, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) + //@ WidenMidSeg(ubuf, oldCurrInfIdx + 2, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) + //@ WidenRightSeg(ubuf, oldCurrInfIdx - 1, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) + //@ LenCurrSeg(tail, oldoffsetWithHops, oldCurrInfIdx, oldHfIdxSeg, oldSegLen) + //@ oldAbsPkt := reveal s.absPkt(ubuf) + //@ sl.SplitRange_Bytes(ubuf, 0, MetaLen, HalfPerm) //@ unfold acc(s.Base.Mem(), R2) err := s.PathMeta.SerializeTo(s.Raw[:MetaLen]) - //@ ghost if s.PathMeta.InBounds() { - //@ v := s.Raw[:MetaLen] - //@ b0 := sl.GetByte(v, 0, MetaLen, 0) - //@ b1 := sl.GetByte(v, 0, MetaLen, 1) - //@ b2 := sl.GetByte(v, 0, MetaLen, 2) - //@ b3 := sl.GetByte(v, 0, MetaLen, 3) - //@ s.PathMeta.SerializeAndDeserializeLemma(b0, b1, b2, b3) - //@ } - //@ assert s.PathMeta.InBounds() ==> s.PathMeta.EqAbsHeader(s.Raw[:MetaLen]) - //@ fold acc(s.Base.Mem(), R3) - - //@ sl.Unslice_Bytes(ubuf, 0, MetaLen, R2) - //@ sl.CombineAtIndex_Bytes(ubuf, 0, len(ubuf), MetaLen, R2) - //@ fold acc(s.Mem(ubuf), R2) - //@ assert s.InBounds(ubuf) == s.PathMeta.InBounds() + //@ assert s.Base.ValidCurrIdxs() + //@ assert s.PathMeta.InBounds() + //@ v := s.Raw[:MetaLen] + //@ b0 := sl.GetByte(v, 0, MetaLen, 0) + //@ b1 := sl.GetByte(v, 0, MetaLen, 1) + //@ b2 := sl.GetByte(v, 0, MetaLen, 2) + //@ b3 := sl.GetByte(v, 0, MetaLen, 3) + //@ s.PathMeta.SerializeAndDeserializeLemma(b0, b1, b2, b3) + //@ assert s.PathMeta.EqAbsHeader(v) + //@ assert RawBytesToBase(v).ValidCurrIdxsSpec() + //@ sl.CombineRange_Bytes(ubuf, 0, MetaLen, HalfPerm) + //@ ValidPktMetaHdrSublice(ubuf, MetaLen) //@ assert s.EqAbsHeader(ubuf) == s.PathMeta.EqAbsHeader(ubuf) - //@ s.PathMeta.EqAbsHeaderForSublice(ubuf, MetaLen) - //@ assert s.EqAbsHeader(ubuf) == s.PathMeta.EqAbsHeader(s.Raw[:MetaLen]) - //@ assert s.InBounds(ubuf) ==> s.EqAbsHeader(ubuf) + //@ assert reveal validPktMetaHdr(ubuf) + //@ currInfIdx := int(s.PathMeta.CurrINF) + //@ currHfIdx := int(s.PathMeta.CurrHF) + //@ assert currHfIdx == oldCurrHfIdx + 1 - //@ sl.Unslice_Bytes(ubuf, 0, MetaLen, 1-R2) - //@ sl.CombineAtIndex_Bytes(ubuf, 0, len(ubuf), MetaLen, 1-R2) - //@ fold acc(s.Base.Mem(), R3) - //@ fold acc(s.Mem(ubuf), 1-R2) - //@ assert s.InBounds(ubuf) ==> s.EqAbsHeader(ubuf) + //@ ghost if(currInfIdx == oldCurrInfIdx) { + //@ IncCurrSeg(tail, oldoffsetWithHops, oldCurrInfIdx, oldHfIdxSeg, oldSegLen) + //@ WidenCurrSeg(ubuf, oldoffsetWithHops + MetaLen, oldCurrInfIdx, oldHfIdxSeg + 1, + //@ oldSegLen, MetaLen, MetaLen, len(ubuf)) + //@ WidenLeftSeg(ubuf, oldCurrInfIdx + 1, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) + //@ WidenMidSeg(ubuf, oldCurrInfIdx + 2, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) + //@ WidenRightSeg(ubuf, oldCurrInfIdx - 1, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) + //@ assert reveal s.absPkt(ubuf) == AbsIncPath(oldAbsPkt) + //@ } else { + //@ segLen := LengthOfCurrSeg(currHfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) + //@ prevSegLen := LengthOfPrevSeg(currHfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) + //@ offsetWithHops := oldOffset + path.HopLen * prevSegLen + MetaLen + //@ hfIdxSeg := currHfIdx-prevSegLen + //@ XoverSegNotNone(tail, oldCurrInfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) + //@ XoverCurrSeg(tail, oldCurrInfIdx + 1, oldCurrHfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) + //@ XoverLeftSeg(tail, oldCurrInfIdx + 2, oldSeg1Len, oldSeg2Len, oldSeg3Len) + //@ XoverMidSeg(tail, oldCurrInfIdx - 1, oldSeg1Len, oldSeg2Len, oldSeg3Len) + //@ XoverRightSeg(tail, oldCurrInfIdx, oldCurrHfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) + //@ WidenCurrSeg(ubuf, offsetWithHops, currInfIdx, hfIdxSeg, segLen, MetaLen, MetaLen, len(ubuf)) + //@ WidenLeftSeg(ubuf, currInfIdx + 1, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) + //@ WidenMidSeg(ubuf, currInfIdx + 2, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) + //@ WidenRightSeg(ubuf, currInfIdx - 1, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) + //@ assert reveal s.absPkt(ubuf) == AbsXover(oldAbsPkt) + //@ } + + //@ fold acc(sl.AbsSlice_Bytes(tail, 0, len(tail)), R50) + //@ sl.Unslice_Bytes(ubuf, MetaLen, len(ubuf), HalfPerm) + //@ sl.CombineRange_Bytes(ubuf, 0, MetaLen, HalfPerm) + //@ fold acc(s.Base.Mem(), R2) + //@ fold acc(s.Mem(ubuf), HalfPerm) return err } diff --git a/pkg/slayers/path/scion/raw_spec.gobra b/pkg/slayers/path/scion/raw_spec.gobra index 9cec61b93..36018256a 100644 --- a/pkg/slayers/path/scion/raw_spec.gobra +++ b/pkg/slayers/path/scion/raw_spec.gobra @@ -359,62 +359,62 @@ pure func LengthOfPrevSeg(currHF int, seg1Len int, seg2Len int, seg3Len int) (re ghost requires 0 <= offset -requires 0 <= currHFIdx && currHFIdx <= segLen +requires 0 <= currHfIdx && currHfIdx <= segLen requires offset + path.HopLen * segLen <= len(raw) requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) -ensures len(res) == segLen - currHFIdx -decreases segLen - currHFIdx +ensures len(res) == segLen - currHfIdx +decreases segLen - currHfIdx pure func hopFields( raw []byte, offset int, - currHFIdx int, + currHfIdx int, segLen int) (res seq[io.IO_HF]) { - return currHFIdx == segLen ? seq[io.IO_HF]{} : - let hf := path.BytesToIO_HF(raw, 0, offset + path.HopLen * currHFIdx, len(raw)) in - seq[io.IO_HF]{hf} ++ hopFields(raw, offset, currHFIdx + 1, segLen) + return currHfIdx == segLen ? seq[io.IO_HF]{} : + let hf := path.BytesToIO_HF(raw, 0, offset + path.HopLen * currHfIdx, len(raw)) in + seq[io.IO_HF]{hf} ++ hopFields(raw, offset, currHfIdx + 1, segLen) } ghost -requires -1 <= currHFIdx && currHFIdx < len(hopfields) -ensures len(res) == currHFIdx + 1 -decreases currHFIdx + 1 -pure func segPast(hopfields seq[io.IO_HF], currHFIdx int) (res seq[io.IO_HF]) { - return currHFIdx == -1 ? +requires -1 <= currHfIdx && currHfIdx < len(hopfields) +ensures len(res) == currHfIdx + 1 +decreases currHfIdx + 1 +pure func segPast(hopfields seq[io.IO_HF], currHfIdx int) (res seq[io.IO_HF]) { + return currHfIdx == -1 ? seq[io.IO_HF]{} : - seq[io.IO_HF]{hopfields[currHFIdx]} ++ segPast(hopfields, currHFIdx - 1) + seq[io.IO_HF]{hopfields[currHfIdx]} ++ segPast(hopfields, currHfIdx - 1) } ghost -requires 0 <= currHFIdx && currHFIdx <= len(hopfields) -ensures len(res) == len(hopfields) - currHFIdx -decreases len(hopfields) - currHFIdx -pure func segFuture(hopfields seq[io.IO_HF], currHFIdx int) (res seq[io.IO_HF]) { - return currHFIdx == len(hopfields) ? seq[io.IO_HF]{} : - seq[io.IO_HF]{hopfields[currHFIdx]} ++ segFuture(hopfields, currHFIdx + 1) +requires 0 <= currHfIdx && currHfIdx <= len(hopfields) +ensures len(res) == len(hopfields) - currHfIdx +decreases len(hopfields) - currHfIdx +pure func segFuture(hopfields seq[io.IO_HF], currHfIdx int) (res seq[io.IO_HF]) { + return currHfIdx == len(hopfields) ? seq[io.IO_HF]{} : + seq[io.IO_HF]{hopfields[currHfIdx]} ++ segFuture(hopfields, currHfIdx + 1) } ghost -requires -1 <= currHFIdx && currHFIdx < len(hopfields) -ensures len(res) == currHFIdx + 1 -decreases currHFIdx + 1 -pure func segHistory(hopfields seq[io.IO_HF], currHFIdx int) (res seq[io.IO_ahi]) { - return currHFIdx == -1 ? seq[io.IO_ahi]{} : - seq[io.IO_ahi]{hopfields[currHFIdx].Toab()} ++ segHistory(hopfields, currHFIdx - 1) +requires -1 <= currHfIdx && currHfIdx < len(hopfields) +ensures len(res) == currHfIdx + 1 +decreases currHfIdx + 1 +pure func segHistory(hopfields seq[io.IO_HF], currHfIdx int) (res seq[io.IO_ahi]) { + return currHfIdx == -1 ? seq[io.IO_ahi]{} : + seq[io.IO_ahi]{hopfields[currHfIdx].Toab()} ++ segHistory(hopfields, currHfIdx - 1) } ghost requires 0 <= offset requires 0 < segLen -requires 0 <= currHFIdx && currHFIdx <= segLen +requires 0 <= currHfIdx && currHfIdx <= segLen requires offset + path.HopLen * segLen <= len(raw) requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) -ensures len(res.Future) == segLen - currHFIdx -ensures len(res.History) == currHFIdx -ensures len(res.Past) == currHFIdx +ensures len(res.Future) == segLen - currHfIdx +ensures len(res.History) == currHfIdx +ensures len(res.Past) == currHfIdx decreases pure func segment(raw []byte, offset int, - currHFIdx int, + currHfIdx int, ainfo io.IO_ainfo, uinfo set[io.IO_msgterm], consDir bool, @@ -426,33 +426,33 @@ pure func segment(raw []byte, UInfo : uinfo, ConsDir : consDir, Peer : peer, - Past : segPast(hopfields, currHFIdx - 1), - Future : segFuture(hopfields, currHFIdx), - History : segHistory(hopfields, currHFIdx - 1), + Past : segPast(hopfields, currHfIdx - 1), + Future : segFuture(hopfields, currHfIdx), + History : segHistory(hopfields, currHfIdx - 1), }) } ghost opaque requires 0 <= headerOffset -requires path.InfoFieldOffset(currINFIdx, headerOffset) + path.InfoLen <= offset +requires path.InfoFieldOffset(currInfIdx, headerOffset) + path.InfoLen <= offset requires 0 < segLen requires offset + path.HopLen * segLen <= len(raw) -requires 0 <= currHFIdx && currHFIdx <= segLen -requires 0 <= currINFIdx && currINFIdx < 3 +requires 0 <= currHfIdx && currHfIdx <= segLen +requires 0 <= currInfIdx && currInfIdx < 3 requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) decreases pure func CurrSeg(raw []byte, offset int, - currINFIdx int, - currHFIdx int, + currInfIdx int, + currHfIdx int, segLen int, headerOffset int) io.IO_seg3 { - return let ainfo := path.Timestamp(raw, currINFIdx, headerOffset) in - let consDir := path.ConsDir(raw, currINFIdx, headerOffset) in - let peer := path.Peer(raw, currINFIdx, headerOffset) in - let uinfo := path.AbsUinfo(raw, currINFIdx, headerOffset) in - segment(raw, offset, currHFIdx, ainfo, uinfo, consDir, peer, segLen) + return let ainfo := path.Timestamp(raw, currInfIdx, headerOffset) in + let consDir := path.ConsDir(raw, currInfIdx, headerOffset) in + let peer := path.Peer(raw, currInfIdx, headerOffset) in + let uinfo := path.AbsUinfo(raw, currInfIdx, headerOffset) in + segment(raw, offset, currHfIdx, ainfo, uinfo, consDir, peer, segLen) } ghost @@ -462,21 +462,21 @@ requires 0 < seg1Len requires 0 <= seg2Len requires 0 <= seg3Len requires pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= len(raw) -requires 1 <= currINFIdx && currINFIdx < 4 +requires 1 <= currInfIdx && currInfIdx < 4 requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) decreases pure func LeftSeg( raw []byte, - currINFIdx int, + currInfIdx int, seg1Len int, seg2Len int, seg3Len int, headerOffset int) option[io.IO_seg3] { return let offset := HopFieldOffset(NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) in - (currINFIdx == 1 && seg2Len > 0) ? - some(reveal CurrSeg(raw, offset + path.HopLen * seg1Len, currINFIdx, 0, seg2Len, headerOffset)) : - ((currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ? - some(reveal CurrSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, seg3Len, headerOffset)) : + (currInfIdx == 1 && seg2Len > 0) ? + some(CurrSeg(raw, offset + path.HopLen * seg1Len, currInfIdx, 0, seg2Len, headerOffset)) : + ((currInfIdx == 2 && seg2Len > 0 && seg3Len > 0) ? + some(CurrSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currInfIdx, 0, seg3Len, headerOffset)) : none[io.IO_seg3]) } @@ -487,21 +487,21 @@ requires 0 < seg1Len requires 0 <= seg2Len requires 0 <= seg3Len requires pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= len(raw) -requires -1 <= currINFIdx && currINFIdx < 2 +requires -1 <= currInfIdx && currInfIdx < 2 requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) decreases pure func RightSeg( raw []byte, - currINFIdx int, + currInfIdx int, seg1Len int, seg2Len int, seg3Len int, headerOffset int) option[io.IO_seg3] { return let offset := HopFieldOffset(NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) in - (currINFIdx == 1 && seg2Len > 0 && seg3Len > 0) ? - some(CurrSeg(raw, offset + path.HopLen * seg1Len, currINFIdx, seg2Len, seg2Len, headerOffset)) : - (currINFIdx == 0 && seg2Len > 0) ? - some(CurrSeg(raw, offset, currINFIdx, seg1Len, seg1Len, headerOffset)) : + (currInfIdx == 1 && seg2Len > 0 && seg3Len > 0) ? + some(CurrSeg(raw, offset + path.HopLen * seg1Len, currInfIdx, seg2Len, seg2Len, headerOffset)) : + (currInfIdx == 0 && seg2Len > 0) ? + some(CurrSeg(raw, offset, currInfIdx, seg1Len, seg1Len, headerOffset)) : none[io.IO_seg3] } @@ -512,21 +512,21 @@ requires 0 < seg1Len requires 0 <= seg2Len requires 0 <= seg3Len requires pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= len(raw) -requires 2 <= currINFIdx && currINFIdx < 5 +requires 2 <= currInfIdx && currInfIdx < 5 requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) decreases pure func MidSeg( raw []byte, - currINFIdx int, + currInfIdx int, seg1Len int, seg2Len int, seg3Len int, headerOffset int) option[io.IO_seg3] { return let offset := HopFieldOffset(NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) in - (currINFIdx == 4 && seg2Len > 0) ? + (currInfIdx == 4 && seg2Len > 0) ? some(CurrSeg(raw, offset, 0, seg1Len, seg1Len, headerOffset)) : - ((currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ? - some(CurrSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, seg3Len, headerOffset)) : + ((currInfIdx == 2 && seg2Len > 0 && seg3Len > 0) ? + some(CurrSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currInfIdx, 0, seg3Len, headerOffset)) : none[io.IO_seg3]) } @@ -538,20 +538,20 @@ decreases pure func (s *Raw) absPkt(raw []byte) (res io.IO_pkt2) { return let _ := reveal validPktMetaHdr(raw) in let metaHdr := RawBytesToMetaHdr(raw) in - let currINFIdx := int(metaHdr.CurrINF) in - let currHFIdx := int(metaHdr.CurrHF) in + let currInfIdx := int(metaHdr.CurrINF) in + let currHfIdx := int(metaHdr.CurrHF) in let seg1Len := int(metaHdr.SegLen[0]) in let seg2Len := int(metaHdr.SegLen[1]) in let seg3Len := int(metaHdr.SegLen[2]) in - let segLen := LengthOfCurrSeg(currHFIdx, seg1Len, seg2Len, seg3Len) in - let prevSegLen := LengthOfPrevSeg(currHFIdx, seg1Len, seg2Len, seg3Len) in + let segLen := LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) in + let prevSegLen := LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) in let numINF := NumInfoFields(seg1Len, seg2Len, seg3Len) in let offset := HopFieldOffset(numINF, 0, MetaLen) in io.IO_pkt2(io.IO_Packet2{ - CurrSeg : CurrSeg(raw, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, MetaLen), - LeftSeg : LeftSeg(raw, currINFIdx + 1, seg1Len, seg2Len , seg3Len, MetaLen), - MidSeg : MidSeg(raw, currINFIdx + 2, seg1Len, seg2Len , seg3Len, MetaLen), - RightSeg : RightSeg(raw, currINFIdx - 1, seg1Len, seg2Len , seg3Len, MetaLen), + CurrSeg : CurrSeg(raw, offset + path.HopLen * prevSegLen, currInfIdx, currHfIdx-prevSegLen, segLen, MetaLen), + LeftSeg : LeftSeg(raw, currInfIdx + 1, seg1Len, seg2Len , seg3Len, MetaLen), + MidSeg : MidSeg(raw, currInfIdx + 2, seg1Len, seg2Len , seg3Len, MetaLen), + RightSeg : RightSeg(raw, currInfIdx - 1, seg1Len, seg2Len , seg3Len, MetaLen), }) } @@ -594,6 +594,7 @@ pure func validPktMetaHdr(raw []byte) bool { let base := RawBytesToBase(raw) in 0 < metaHdr.SegLen[0] && base.ValidCurrIdxsSpec() && + base.CurrInfMatchesCurrHF() && pktLen(seg1, seg2, seg3, MetaLen) <= len(raw) } @@ -629,6 +630,45 @@ pure func AbsSetInfoField(oldPkt io.IO_pkt2, info path.IntermediateAbsInfoField) io.IO_pkt2(io.IO_Packet2{newCurrSeg, oldPkt.LeftSeg, oldPkt.MidSeg, oldPkt.RightSeg}) } +ghost +requires oldPkt.LeftSeg != none[io.IO_seg2] +requires len(oldPkt.CurrSeg.Future) > 0 +decreases +pure func AbsXover(oldPkt io.IO_pkt2) (newPkt io.IO_pkt2) { + return io.IO_pkt2( + io.IO_Packet2{ + get(oldPkt.LeftSeg), + oldPkt.MidSeg, + oldPkt.RightSeg, + some(absIncPathSeg(oldPkt.CurrSeg))}) +} + +ghost +requires len(oldPkt.CurrSeg.Future) > 0 +decreases +pure func AbsIncPath(oldPkt io.IO_pkt2) (newPkt io.IO_pkt2) { + return io.IO_pkt2( + io.IO_Packet2{ + absIncPathSeg(oldPkt.CurrSeg), + oldPkt.LeftSeg, + oldPkt.MidSeg, + oldPkt.RightSeg}) +} + +ghost +requires len(currseg.Future) > 0 +decreases +pure func absIncPathSeg(currseg io.IO_seg3) io.IO_seg3 { + return io.IO_seg3_ { + AInfo: currseg.AInfo, + UInfo: currseg.UInfo, + ConsDir: currseg.ConsDir, + Peer: currseg.Peer, + Past: seq[io.IO_HF]{currseg.Future[0]} ++ currseg.Past, + Future: currseg.Future[1:], + History: seq[io.IO_ahi]{currseg.Future[0].Toab()} ++ currseg.History} +} + ghost requires acc(s.Mem(ub), _) decreases @@ -649,16 +689,194 @@ decreases func (s *Raw) LastHopLemma(ubuf []byte) { reveal validPktMetaHdr(ubuf) metaHdr := RawBytesToMetaHdr(ubuf) - currINFIdx := int(metaHdr.CurrINF) - currHFIdx := int(metaHdr.CurrHF) + currInfIdx := int(metaHdr.CurrINF) + currHfIdx := int(metaHdr.CurrHF) + seg1Len := int(metaHdr.SegLen[0]) + seg2Len := int(metaHdr.SegLen[1]) + seg3Len := int(metaHdr.SegLen[2]) + segLen := LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) + prevSegLen := LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) + numINF := NumInfoFields(seg1Len, seg2Len, seg3Len) + offset := HopFieldOffset(numINF, 0, MetaLen) + pkt := reveal s.absPkt(ubuf) + assert pkt.CurrSeg == reveal CurrSeg(ubuf, offset + path.HopLen * prevSegLen, currInfIdx, currHfIdx-prevSegLen, segLen, MetaLen) + assert len(pkt.CurrSeg.Future) == 1 +} + +ghost +preserves acc(s.Mem(ubuf), R55) +preserves s.GetIsXoverSpec(ubuf) +preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R56) +preserves validPktMetaHdr(ubuf) +preserves s.EqAbsHeader(ubuf) +ensures s.absPkt(ubuf).LeftSeg != none[io.IO_seg2] +ensures len(s.absPkt(ubuf).CurrSeg.Future) == 1 +decreases +func (s *Raw) XoverLemma(ubuf []byte) { + reveal validPktMetaHdr(ubuf) + metaHdr := RawBytesToMetaHdr(ubuf) + currInfIdx := int(metaHdr.CurrINF) + currHfIdx := int(metaHdr.CurrHF) seg1Len := int(metaHdr.SegLen[0]) seg2Len := int(metaHdr.SegLen[1]) seg3Len := int(metaHdr.SegLen[2]) - segLen := LengthOfCurrSeg(currHFIdx, seg1Len, seg2Len, seg3Len) - prevSegLen := LengthOfPrevSeg(currHFIdx, seg1Len, seg2Len, seg3Len) + segLen := LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) + prevSegLen := LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) numINF := NumInfoFields(seg1Len, seg2Len, seg3Len) offset := HopFieldOffset(numINF, 0, MetaLen) pkt := reveal s.absPkt(ubuf) - assert pkt.CurrSeg == reveal CurrSeg(ubuf, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, MetaLen) + assert pkt.CurrSeg == reveal CurrSeg(ubuf, offset + path.HopLen * prevSegLen, currInfIdx, currHfIdx-prevSegLen, segLen, MetaLen) + assert pkt.LeftSeg == reveal LeftSeg(ubuf, currInfIdx + 1, seg1Len, seg2Len , seg3Len, MetaLen) assert len(pkt.CurrSeg.Future) == 1 + assert pkt.LeftSeg != none[io.IO_seg2] +} + +ghost +requires path.InfoFieldOffset(currInfIdx, 0) + path.InfoLen <= offset +requires 0 < segLen +requires offset + path.HopLen * segLen <= len(raw) +requires 0 <= currHfIdx && currHfIdx < segLen +requires 0 <= currInfIdx && currInfIdx < 3 +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +ensures len(CurrSeg(raw, offset, currInfIdx, currHfIdx, segLen, 0).Future) > 0 +decreases +func LenCurrSeg(raw []byte, offset int, currInfIdx int, currHfIdx int, segLen int) { + reveal CurrSeg(raw, offset, currInfIdx, currHfIdx, segLen, 0) +} + +ghost +requires 0 < seg1Len +requires 0 < seg2Len +requires 0 <= seg3Len +requires pktLen(seg1Len, seg2Len, seg3Len, 0) <= len(raw) +requires 0 <= currInfIdx && currInfIdx < 2 +requires 1 <= currInfIdx ==> 0 < seg3Len +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +ensures LeftSeg(raw, currInfIdx+1, seg1Len, seg2Len, seg3Len, 0) != none[io.IO_seg3] +ensures RightSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) != none[io.IO_seg3] +decreases +func XoverSegNotNone(raw []byte, currInfIdx int, seg1Len int, seg2Len int, seg3Len int) { + reveal LeftSeg(raw, currInfIdx+1, seg1Len, seg2Len, seg3Len, 0) + reveal RightSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) +} + +ghost +requires path.InfoFieldOffset(currInfIdx, 0) + path.InfoLen <= offset +requires 0 < segLen +requires offset + path.HopLen * segLen <= len(raw) +requires 0 <= currHfIdx && currHfIdx < segLen +requires 0 <= currInfIdx && currInfIdx < 3 +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +preserves len(CurrSeg(raw, offset, currInfIdx, currHfIdx, segLen, 0).Future) > 0 +ensures CurrSeg(raw, offset, currInfIdx, currHfIdx+1, segLen, 0) == + absIncPathSeg(CurrSeg(raw, offset, currInfIdx, currHfIdx, segLen, 0)) +decreases +func IncCurrSeg(raw []byte, offset int, currInfIdx int, currHfIdx int, segLen int) { + currseg := reveal CurrSeg(raw, offset, currInfIdx, currHfIdx, segLen, 0) + incseg := reveal CurrSeg(raw, offset, currInfIdx, currHfIdx+1, segLen, 0) + assert currseg.AInfo == incseg.AInfo + assert currseg.UInfo == incseg.UInfo + assert currseg.ConsDir == incseg.ConsDir + assert currseg.Peer == incseg.Peer + assert seq[io.IO_HF]{currseg.Future[0]} ++ currseg.Past == incseg.Past + assert currseg.Future[1:] == incseg.Future + assert seq[io.IO_ahi]{currseg.Future[0].Toab()} ++ currseg.History == incseg.History + assert incseg == absIncPathSeg(currseg) +} + +ghost +requires 0 < seg1Len +requires 0 < seg2Len +requires 0 <= seg3Len +requires pktLen(seg1Len, seg2Len, seg3Len, 0) <= len(raw) +requires 1 <= currInfIdx && currInfIdx < 3 +requires 1 == currInfIdx ==> currHfIdx+1 == seg1Len +requires 2 == currInfIdx ==> 0 < seg3Len && currHfIdx+1 == seg1Len + seg2Len +requires pktLen(seg1Len, seg2Len, seg3Len, 0) <= len(raw) +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +preserves LeftSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) != none[io.IO_seg3] +ensures + let prevSegLen := LengthOfPrevSeg(currHfIdx+1, seg1Len, seg2Len, seg3Len) in + let segLen := LengthOfCurrSeg(currHfIdx+1, seg1Len, seg2Len, seg3Len) in + let numInf := NumInfoFields(seg1Len, seg2Len, seg3Len) in + let offset := HopFieldOffset(numInf, 0, 0) in + CurrSeg(raw, offset + path.HopLen * prevSegLen, currInfIdx, currHfIdx-prevSegLen+1, segLen, 0) == + get(LeftSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0)) +decreases +func XoverCurrSeg(raw []byte, currInfIdx int, currHfIdx int, seg1Len int, seg2Len int, seg3Len int) { + prevSegLen := LengthOfPrevSeg(currHfIdx+1, seg1Len, seg2Len, seg3Len) + segLen := LengthOfCurrSeg(currHfIdx+1, seg1Len, seg2Len, seg3Len) + numInf := NumInfoFields(seg1Len, seg2Len, seg3Len) + offset := HopFieldOffset(numInf, 0, 0) + currseg := reveal CurrSeg(raw, offset + path.HopLen * prevSegLen, currInfIdx, 0, segLen, 0) + leftseg := reveal LeftSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) + assert currseg == get(leftseg) +} + +ghost +requires 0 < seg1Len +requires 0 <= seg2Len +requires 0 <= seg3Len +requires pktLen(seg1Len, seg2Len, seg3Len, 0) <= len(raw) +requires 2 <= currInfIdx && currInfIdx < 4 +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +ensures LeftSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) == + MidSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) +decreases +func XoverLeftSeg(raw []byte, currInfIdx int, seg1Len int, seg2Len int, seg3Len int) { + leftseg := reveal LeftSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) + midseg := reveal MidSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) + assert leftseg == midseg +} + +ghost +requires 0 < seg1Len +requires 0 <= seg2Len +requires 0 <= seg3Len +requires pktLen(seg1Len, seg2Len, seg3Len, 0) <= len(raw) +requires -1 <= currInfIdx && currInfIdx < 1 +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +ensures MidSeg(raw, currInfIdx+4, seg1Len, seg2Len, seg3Len, 0) == + RightSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) +decreases +func XoverMidSeg(raw []byte, currInfIdx int, seg1Len int, seg2Len int, seg3Len int) { + midseg := reveal MidSeg(raw, currInfIdx+4, seg1Len, seg2Len, seg3Len, 0) + rightseg := reveal RightSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) + assert midseg == rightseg +} + +ghost +requires 0 < seg1Len +requires 0 < seg2Len +requires 0 <= seg3Len +requires pktLen(seg1Len, seg2Len, seg3Len, 0) <= len(raw) +requires 0 <= currInfIdx && currInfIdx < 2 +requires 0 == currInfIdx ==> currHfIdx+1 == seg1Len +requires 1 == currInfIdx ==> 0 < seg3Len && currHfIdx+1 == seg1Len + seg2Len +requires pktLen(seg1Len, seg2Len, seg3Len, 0) <= len(raw) +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +preserves RightSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) != none[io.IO_seg3] +ensures + let prevSegLen := LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) in + let segLen := LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) in + let numInf := NumInfoFields(seg1Len, seg2Len, seg3Len) in + let offset := HopFieldOffset(numInf, 0, 0) in + let currseg := CurrSeg(raw, offset + path.HopLen * prevSegLen, currInfIdx, currHfIdx-prevSegLen, segLen, 0) in + len(currseg.Future) > 0 && + get(RightSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0)) == + absIncPathSeg(currseg) +decreases +func XoverRightSeg(raw []byte, currInfIdx int, currHfIdx int, seg1Len int, seg2Len int, seg3Len int) { + prevSegLen := LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) + segLen := LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) + numInf := NumInfoFields(seg1Len, seg2Len, seg3Len) + offset := HopFieldOffset(numInf, 0, 0) + LenCurrSeg(raw, offset + path.HopLen * prevSegLen, currInfIdx, segLen - 1, segLen) + IncCurrSeg(raw, offset + path.HopLen * prevSegLen, currInfIdx, segLen - 1, segLen) + currseg := CurrSeg(raw, offset + path.HopLen * prevSegLen, currInfIdx, segLen - 1, segLen, 0) + nextseg := CurrSeg(raw, offset + path.HopLen * prevSegLen, currInfIdx, segLen, segLen, 0) + rightseg := reveal RightSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) + assert absIncPathSeg(currseg) == nextseg + assert nextseg == get(rightseg) + assert absIncPathSeg(currseg) == get(rightseg) } \ No newline at end of file diff --git a/pkg/slayers/path/scion/widen-lemma.gobra b/pkg/slayers/path/scion/widen-lemma.gobra new file mode 100644 index 000000000..61ba9d1ea --- /dev/null +++ b/pkg/slayers/path/scion/widen-lemma.gobra @@ -0,0 +1,234 @@ +// Copyright 2022 ETH Zurich +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +gobra + +package scion + +import ( + sl "verification/utils/slices" + "verification/io" + . "verification/utils/definitions" + "verification/dependencies/encoding/binary" + "github.com/scionproto/scion/pkg/slayers/path" +) + +ghost +requires 0 <= start && start <= headerOffset +requires path.InfoFieldOffset(currInfIdx, headerOffset) + path.InfoLen <= offset +requires 0 < segLen +requires offset + path.HopLen * segLen <= length +requires length <= len(raw) +requires 0 <= currHfIdx && currHfIdx <= segLen +requires 0 <= currInfIdx && currInfIdx < 3 +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R51) +ensures CurrSeg(raw, offset, currInfIdx, currHfIdx, segLen, headerOffset) == + CurrSeg(raw[start:length], offset-start, currInfIdx, currHfIdx, segLen, headerOffset-start) +decreases +func WidenCurrSeg(raw []byte, + offset int, + currInfIdx int, + currHfIdx int, + segLen int, + headerOffset int, + start int, + length int) { + unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) + unfold acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R53) + + ainfo1 := path.Timestamp(raw, currInfIdx, headerOffset) + ainfo2 := path.Timestamp(raw[start:length], currInfIdx, headerOffset-start) + assert ainfo1 == ainfo2 + + uinfo1 := path.AbsUinfo(raw, currInfIdx, headerOffset) + uinfo2 := path.AbsUinfo(raw[start:length], currInfIdx, headerOffset-start) + assert uinfo1 == uinfo2 + + consDir1 := path.ConsDir(raw, currInfIdx, headerOffset) + consDir2 := path.ConsDir(raw[start:length], currInfIdx, headerOffset-start) + assert consDir1 == consDir2 + + peer1 := path.Peer(raw, currInfIdx, headerOffset) + peer2 := path.Peer(raw[start:length], currInfIdx, headerOffset-start) + assert peer1 == peer2 + + widenSegment(raw, offset, currHfIdx, ainfo1, uinfo1, consDir1, peer1, segLen, start, length) + reveal CurrSeg(raw, offset, currInfIdx, currHfIdx, segLen, headerOffset) + reveal CurrSeg(raw[start:length], offset-start, currInfIdx, currHfIdx, segLen, headerOffset-start) + fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) + fold acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R53) +} + +ghost +requires 0 <= start && start <= offset +requires 0 < segLen +requires 0 <= currHfIdx && currHfIdx <= segLen +requires length <= len(raw) +requires offset + path.HopLen * segLen <= length +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R52) +preserves acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R52) +ensures segment(raw, offset, currHfIdx, ainfo, uinfo, consDir, peer, segLen) == + segment(raw[start:length], offset-start, currHfIdx, ainfo, uinfo, consDir, peer, segLen) +decreases +func widenSegment(raw []byte, + offset int, + currHfIdx int, + ainfo io.IO_ainfo, + uinfo set[io.IO_msgterm], + consDir bool, + peer bool, + segLen int, + start int, + length int) { + newP := (R52 + R53)/2 + widenHopFields(raw, offset, 0, segLen, start, length, newP) +} + +ghost +requires 0 <= start && start <= middle +requires middle + path.HopLen <= length +requires length <= len(raw) +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R54) +preserves acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R54) +ensures path.BytesToIO_HF(raw, 0, middle, len(raw)) == + path.BytesToIO_HF(raw[start:length], 0, middle-start, length-start) +decreases +func widenBytesToIO_HF(raw []byte, middle int, start int, length int) { + unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) + unfold acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R55) + hfBytes1 := path.BytesToIO_HF(raw, 0, middle, len(raw)) + hfBytes2 := path.BytesToIO_HF(raw[start:length], 0, middle-start, length-start) + assert hfBytes1 === hfBytes2 + fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) + fold acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R55) +} + +ghost +requires R53 < p +requires 0 <= start && start <= offset +requires 0 <= currHfIdx && currHfIdx <= segLen +requires offset + path.HopLen * segLen <= length +requires length <= len(raw) +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), p) +preserves acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), p) +ensures hopFields(raw, offset, currHfIdx, segLen) == + hopFields(raw[start:length], offset-start, currHfIdx, segLen) +decreases segLen - currHfIdx +func widenHopFields(raw []byte, offset int, currHfIdx int, segLen int, start int, length int, p perm) { + if (currHfIdx != segLen) { + widenBytesToIO_HF(raw, offset + path.HopLen * currHfIdx, start, length) + hf1 := path.BytesToIO_HF(raw, 0, offset + path.HopLen * currHfIdx, len(raw)) + hf2 := path.BytesToIO_HF(raw[start:length], 0, offset + path.HopLen * currHfIdx - start, length - start) + newP := (p + R53)/2 + widenHopFields(raw, offset, currHfIdx + 1, segLen, start, length, newP) + } +} + +ghost +requires 0 <= start && start <= headerOffset +requires 0 < seg1Len +requires 0 <= seg2Len +requires 0 <= seg3Len +requires 0 <= length && length <= len(raw) +requires pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= length +requires 1 <= currInfIdx && currInfIdx < 4 +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R51) +ensures LeftSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset) == + LeftSeg(raw[start:length], currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset-start) +decreases +func WidenLeftSeg(raw []byte, + currInfIdx int, + seg1Len int, + seg2Len int, + seg3Len int, + headerOffset int, + start int, + length int) { + offset := HopFieldOffset(NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) + if currInfIdx == 1 && seg2Len > 0 { + offsetWithHopfields := offset + path.HopLen * seg1Len + WidenCurrSeg(raw, offsetWithHopfields, currInfIdx, 0, seg2Len, headerOffset, start, length) + } else if currInfIdx == 2 && seg2Len > 0 && seg3Len > 0 { + offsetWithHopfields := offset + path.HopLen * (seg1Len + seg2Len) + WidenCurrSeg(raw, offsetWithHopfields, currInfIdx, 0, seg3Len, headerOffset, start, length) + } + reveal LeftSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset) + reveal LeftSeg(raw[start:length], currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset- start) +} + +ghost +requires 0 <= start && start <= headerOffset +requires 0 < seg1Len +requires 0 <= seg2Len +requires 0 <= seg3Len +requires 0 <= length && length <= len(raw) +requires pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= length +requires -1 <= currInfIdx && currInfIdx < 2 +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R51) +ensures RightSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset) == + RightSeg(raw[start:length], currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset-start) +decreases +func WidenRightSeg(raw []byte, + currInfIdx int, + seg1Len int, + seg2Len int, + seg3Len int, + headerOffset int, + start int, + length int) { + offset := HopFieldOffset(NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) + if currInfIdx == 1 && seg2Len > 0 && seg3Len > 0 { + offsetWithHopfields := offset + path.HopLen * seg1Len + WidenCurrSeg(raw, offsetWithHopfields, currInfIdx, seg2Len, seg2Len, headerOffset, start, length) + } else if currInfIdx == 0 && seg2Len > 0 { + WidenCurrSeg(raw, offset, currInfIdx, seg1Len, seg1Len, headerOffset, start, length) + } + reveal RightSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset) + reveal RightSeg(raw[start:length], currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset - start) +} + +ghost +requires 0 <= start && start <= headerOffset +requires 0 < seg1Len +requires 0 <= seg2Len +requires 0 <= seg3Len +requires 2 <= currInfIdx && currInfIdx < 5 +requires 0 <= length && length <= len(raw) +requires pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= length +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R51) +ensures MidSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset) == + MidSeg(raw[start:length], currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset - start) +decreases +func WidenMidSeg(raw []byte, + currInfIdx int, + seg1Len int, + seg2Len int, + seg3Len int, + headerOffset int, + start int, + length int) { + offset := HopFieldOffset(NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) + if currInfIdx == 4 && seg2Len > 0 { + WidenCurrSeg(raw, offset, 0, seg1Len, seg1Len, headerOffset, start, length) + } else if currInfIdx == 2 && seg2Len > 0 && seg3Len > 0 { + offsetWithHopfields := offset + path.HopLen * (seg1Len + seg2Len) + WidenCurrSeg(raw, offsetWithHopfields, currInfIdx, 0, seg3Len, headerOffset, start, length) + } + reveal MidSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset) + reveal MidSeg(raw[start:length], currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset - start) +} \ No newline at end of file diff --git a/pkg/slayers/scion_spec.gobra b/pkg/slayers/scion_spec.gobra index db41783ec..d4125611c 100644 --- a/pkg/slayers/scion_spec.gobra +++ b/pkg/slayers/scion_spec.gobra @@ -452,8 +452,9 @@ pure func ValidPktMetaHdr(raw []byte) bool { let seg2 := int(metaHdr.SegLen[1]) in let seg3 := int(metaHdr.SegLen[2]) in let base := scion.Base{metaHdr, scion.NumInfoFields(seg1, seg2, seg3), seg1+seg2+seg3} in - 0 < metaHdr.SegLen[0] && - base.ValidCurrIdxsSpec() && + 0 < metaHdr.SegLen[0] && + base.ValidCurrIdxsSpec() && + base.CurrInfMatchesCurrHF() && scion.pktLen(seg1, seg2, seg3, start + scion.MetaLen) <= length } diff --git a/router/dataplane.go b/router/dataplane.go index 3148da9cd..039bfeb4c 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -1956,7 +1956,7 @@ func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte @*/ ) (respr proce return processResult{}, err } // @ TemporaryAssumeForIO(slayers.ValidPktMetaHdr(ub)) - // @ TemporaryAssumeForIO(len(absPkt(ub).CurrSeg.Future) > 0) + // @ absPktFutureLemma(ub) // @ TemporaryAssumeForIO(p.EqAbsHopField(absPkt(ub))) // @ TemporaryAssumeForIO(p.EqAbsInfoField(absPkt(ub))) return processResult{}, nil @@ -2593,6 +2593,9 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr return serrors.WrapStr("update info field", err) } } + // (VerifiedSCION) This assumption will be dropped after clarifying + // https://github.com/scionproto/scion/issues/4524. + //@ TemporaryAssumeForIO(!p.path.GetIsXoverSpec(ubPath)) if err := p.path.IncPath( /*@ ubPath @*/ ); err != nil { // @ ghost sl.CombineRange_Bytes(ub, startP, endP, writePerm) // @ p.scionLayer.PathPoolMemExchange(p.scionLayer.PathType, p.scionLayer.Path) @@ -2604,12 +2607,11 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr // @ fold acc(p.scionLayer.Mem(ub), R55) // @ assert reveal p.scionLayer.ValidHeaderOffset(ub, startP) // @ ghost sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) - // @ TemporaryAssumeForIO(scion.validPktMetaHdr(ubPath) && p.path.EqAbsHeader(ubPath)) // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, startP) // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP) // @ ghost sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) // @ absPktFutureLemma(ub) - // @ TemporaryAssumeForIO(absPkt(ub) == AbsProcessEgress(old(absPkt(ub)))) + // @ assert absPkt(ub) == reveal AbsProcessEgress(old(absPkt(ub))) // @ fold acc(p.scionLayer.Mem(ub), 1-R55) return nil } @@ -2653,10 +2655,11 @@ func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte @*/ ) (respr process // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) // @ p.AbsPktToSubSliceAbsPkt(ub, startP, endP) // @ p.scionLayer.ValidHeaderOffsetToSubSliceLemma(ub, startP) - // @ TemporaryAssumeForIO(len(old(absPkt(ub)).CurrSeg.Future) == 1) + // @ p.path.XoverLemma(ubPath) // @ reveal p.EqAbsInfoField(absPkt(ub)) // @ reveal p.EqAbsHopField(absPkt(ub)) // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) + // @ reveal p.scionLayer.ValidHeaderOffset(ub, startP) // @ unfold acc(p.scionLayer.Mem(ub), R55) if err := p.path.IncPath( /*@ ubPath @*/ ); err != nil { // TODO parameter problem invalid path @@ -2668,6 +2671,14 @@ func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte @*/ ) (respr process return processResult{}, serrors.WrapStr("incrementing path", err) } // @ fold acc(p.scionLayer.Mem(ub), R55) + // @ assert reveal p.scionLayer.ValidHeaderOffset(ub, startP) + // @ ghost sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) + // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, startP) + // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP) + // @ assert len(get(old(absPkt(ub)).LeftSeg).Future) > 0 + // @ assert len(get(old(absPkt(ub)).LeftSeg).History) == 0 + // @ assert slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) + // @ assert absPkt(ub) == reveal AbsDoXover(old(absPkt(ub))) var err error if p.hopField, err = p.path.GetCurrentHopField( /*@ ubPath @*/ ); err != nil { // @ ghost sl.CombineRange_Bytes(ub, startP, endP, writePerm) @@ -2683,12 +2694,7 @@ func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte @*/ ) (respr process // TODO parameter problem invalid path return processResult{}, err } - // @ ghost sl.CombineRange_Bytes(ub, startP, endP, writePerm) - // @ TemporaryAssumeForIO(old(absPkt(ub)).LeftSeg != none[io.IO_seg2]) - // @ TemporaryAssumeForIO(len(get(old(absPkt(ub)).LeftSeg).Future) > 0) - // @ TemporaryAssumeForIO(len(get(old(absPkt(ub)).LeftSeg).History) == 0) - // @ TemporaryAssumeForIO(slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub)) - // @ TemporaryAssumeForIO(absPkt(ub) == AbsDoXover(old(absPkt(ub)))) + // @ ghost sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) // @ TemporaryAssumeForIO(p.EqAbsHopField(absPkt(ub))) // @ TemporaryAssumeForIO(p.EqAbsInfoField(absPkt(ub))) // @ fold acc(p.scionLayer.Mem(ub), 1-R55) diff --git a/router/io-spec-lemmas.gobra b/router/io-spec-lemmas.gobra index b4f52b6ae..53c7167f6 100644 --- a/router/io-spec-lemmas.gobra +++ b/router/io-spec-lemmas.gobra @@ -58,17 +58,17 @@ func absPktFutureLemma(raw []byte) { hdr := (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in binary.BigEndian.Uint32(raw[headerOffset:headerOffset+scion.MetaLen])) metaHdr := scion.DecodedFrom(hdr) - currINFIdx := int(metaHdr.CurrINF) - currHFIdx := int(metaHdr.CurrHF) + currInfIdx := int(metaHdr.CurrINF) + currHfIdx := int(metaHdr.CurrHF) seg1Len := int(metaHdr.SegLen[0]) seg2Len := int(metaHdr.SegLen[1]) seg3Len := int(metaHdr.SegLen[2]) - segLen := scion.LengthOfCurrSeg(currHFIdx, seg1Len, seg2Len, seg3Len) - prevSegLen := scion.LengthOfPrevSeg(currHFIdx, seg1Len, seg2Len, seg3Len) + segLen := scion.LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) + prevSegLen := scion.LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) numINF := scion.NumInfoFields(seg1Len, seg2Len, seg3Len) offset := scion.HopFieldOffset(numINF, 0, headerOffsetWithMetaLen) pkt := reveal absPkt(raw) - assert pkt.CurrSeg == reveal scion.CurrSeg(raw, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, headerOffsetWithMetaLen) + assert pkt.CurrSeg == reveal scion.CurrSeg(raw, offset + path.HopLen * prevSegLen, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen) assert len(pkt.CurrSeg.Future) > 0 } @@ -249,20 +249,20 @@ func (p* scionPacketProcessor) AbsPktToSubSliceAbsPkt(ub []byte, start int, end fold acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) headerOffsetWithMetaLen := start + scion.MetaLen metaHdr := scion.DecodedFrom(hdr) - currINFIdx := int(metaHdr.CurrINF) - currHFIdx := int(metaHdr.CurrHF) + currInfIdx := int(metaHdr.CurrINF) + currHfIdx := int(metaHdr.CurrHF) seg1Len := int(metaHdr.SegLen[0]) seg2Len := int(metaHdr.SegLen[1]) seg3Len := int(metaHdr.SegLen[2]) - segLen := scion.LengthOfCurrSeg(currHFIdx, seg1Len, seg2Len, seg3Len) - prevSegLen := scion.LengthOfPrevSeg(currHFIdx, seg1Len, seg2Len, seg3Len) + segLen := scion.LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) + prevSegLen := scion.LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) numINF := scion.NumInfoFields(seg1Len, seg2Len, seg3Len) offset := scion.HopFieldOffset(numINF, 0, headerOffsetWithMetaLen) - currSegWidenLemma(ub, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, headerOffsetWithMetaLen, start, end) - leftSegWidenLemma(ub, currINFIdx + 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) - midSegWidenLemma(ub, currINFIdx + 2, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) - rightSegWidenLemma(ub, currINFIdx - 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) + scion.WidenCurrSeg(ub, offset + path.HopLen * prevSegLen, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen, start, end) + scion.WidenLeftSeg(ub, currInfIdx + 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) + scion.WidenMidSeg(ub, currInfIdx + 2, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) + scion.WidenRightSeg(ub, currInfIdx - 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) assert reveal absPkt(ub) == reveal p.path.absPkt(ub[start:end]) } @@ -310,20 +310,20 @@ func (p* scionPacketProcessor) SubSliceAbsPktToAbsPkt(ub []byte, start int, end fold acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) metaHdr := scion.DecodedFrom(hdr) - currINFIdx := int(metaHdr.CurrINF) - currHFIdx := int(metaHdr.CurrHF) + currInfIdx := int(metaHdr.CurrINF) + currHfIdx := int(metaHdr.CurrHF) seg1Len := int(metaHdr.SegLen[0]) seg2Len := int(metaHdr.SegLen[1]) seg3Len := int(metaHdr.SegLen[2]) - segLen := scion.LengthOfCurrSeg(currHFIdx, seg1Len, seg2Len, seg3Len) - prevSegLen := scion.LengthOfPrevSeg(currHFIdx, seg1Len, seg2Len, seg3Len) + segLen := scion.LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) + prevSegLen := scion.LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) numINF := scion.NumInfoFields(seg1Len, seg2Len, seg3Len) offset := scion.HopFieldOffset(numINF, 0, headerOffsetWithMetaLen) - currSegWidenLemma(ub, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, headerOffsetWithMetaLen, start, end) - leftSegWidenLemma(ub, currINFIdx + 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) - midSegWidenLemma(ub, currINFIdx + 2, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) - rightSegWidenLemma(ub, currINFIdx - 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) + scion.WidenCurrSeg(ub, offset + path.HopLen * prevSegLen, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen, start, end) + scion.WidenLeftSeg(ub, currInfIdx + 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) + scion.WidenMidSeg(ub, currInfIdx + 2, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) + scion.WidenRightSeg(ub, currInfIdx - 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) assert reveal absPkt(ub) == reveal p.path.absPkt(ub[start:end]) } diff --git a/router/io-spec.gobra b/router/io-spec.gobra index 014133963..e65376a1a 100644 --- a/router/io-spec.gobra +++ b/router/io-spec.gobra @@ -42,20 +42,20 @@ pure func absPkt(raw []byte) (res io.IO_pkt2) { let hdr := (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in binary.BigEndian.Uint32(raw[headerOffset:headerOffset+scion.MetaLen])) in let metaHdr := scion.DecodedFrom(hdr) in - let currINFIdx := int(metaHdr.CurrINF) in - let currHFIdx := int(metaHdr.CurrHF) in + let currInfIdx := int(metaHdr.CurrINF) in + let currHfIdx := int(metaHdr.CurrHF) in let seg1Len := int(metaHdr.SegLen[0]) in let seg2Len := int(metaHdr.SegLen[1]) in let seg3Len := int(metaHdr.SegLen[2]) in - let segLen := scion.LengthOfCurrSeg(currHFIdx, seg1Len, seg2Len, seg3Len) in - let prevSegLen := scion.LengthOfPrevSeg(currHFIdx, seg1Len, seg2Len, seg3Len) in + let segLen := scion.LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) in + let prevSegLen := scion.LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) in let numINF := scion.NumInfoFields(seg1Len, seg2Len, seg3Len) in let offset := scion.HopFieldOffset(numINF, 0, headerOffsetWithMetaLen) in io.IO_pkt2(io.IO_Packet2{ - CurrSeg : scion.CurrSeg(raw, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, headerOffsetWithMetaLen), - LeftSeg : scion.LeftSeg(raw, currINFIdx + 1, seg1Len, seg2Len , seg3Len, headerOffsetWithMetaLen), - MidSeg : scion.MidSeg(raw, currINFIdx + 2, seg1Len, seg2Len , seg3Len, headerOffsetWithMetaLen), - RightSeg : scion.RightSeg(raw, currINFIdx - 1, seg1Len, seg2Len , seg3Len, headerOffsetWithMetaLen), + CurrSeg : scion.CurrSeg(raw, offset + path.HopLen * prevSegLen, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen), + LeftSeg : scion.LeftSeg(raw, currInfIdx + 1, seg1Len, seg2Len , seg3Len, headerOffsetWithMetaLen), + MidSeg : scion.MidSeg(raw, currInfIdx + 2, seg1Len, seg2Len , seg3Len, headerOffsetWithMetaLen), + RightSeg : scion.RightSeg(raw, currInfIdx - 1, seg1Len, seg2Len , seg3Len, headerOffsetWithMetaLen), }) } diff --git a/router/widen-lemma.gobra b/router/widen-lemma.gobra index 8b8f37ba3..c3f6e7ae3 100644 --- a/router/widen-lemma.gobra +++ b/router/widen-lemma.gobra @@ -131,269 +131,20 @@ func absPktWidenLemma(raw []byte, length int) { fold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) metaHdr := scion.DecodedFrom(hdr) - currINFIdx := int(metaHdr.CurrINF) - currHFIdx := int(metaHdr.CurrHF) + currInfIdx := int(metaHdr.CurrINF) + currHfIdx := int(metaHdr.CurrHF) seg1Len := int(metaHdr.SegLen[0]) seg2Len := int(metaHdr.SegLen[1]) seg3Len := int(metaHdr.SegLen[2]) - segLen := scion.LengthOfCurrSeg(currHFIdx, seg1Len, seg2Len, seg3Len) - prevSegLen := scion.LengthOfPrevSeg(currHFIdx, seg1Len, seg2Len, seg3Len) + segLen := scion.LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) + prevSegLen := scion.LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) numINF := scion.NumInfoFields(seg1Len, seg2Len, seg3Len) offset := scion.HopFieldOffset(numINF, 0, headerOffsetWithMetaLen) - currSegWidenLemma(raw, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, headerOffsetWithMetaLen, 0, length) - leftSegWidenLemma(raw, currINFIdx + 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, 0, length) - midSegWidenLemma(raw, currINFIdx + 2, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, 0, length) - rightSegWidenLemma(raw, currINFIdx - 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, 0, length) + scion.WidenCurrSeg(raw, offset + path.HopLen * prevSegLen, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen, 0, length) + scion.WidenLeftSeg(raw, currInfIdx + 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, 0, length) + scion.WidenMidSeg(raw, currInfIdx + 2, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, 0, length) + scion.WidenRightSeg(raw, currInfIdx - 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, 0, length) assert reveal absPkt(raw) == reveal absPkt(raw[:length]) -} - -ghost -requires 0 <= start && start <= headerOffset -requires path.InfoFieldOffset(currINFIdx, headerOffset) + path.InfoLen <= offset -requires 0 < segLen -requires offset + path.HopLen * segLen <= length -requires length <= len(raw) -requires 0 <= currHFIdx && currHFIdx <= segLen -requires 0 <= currINFIdx && currINFIdx < 3 -preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) -preserves acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R51) -ensures scion.CurrSeg(raw, offset, currINFIdx, currHFIdx, segLen, headerOffset) == - scion.CurrSeg(raw[start:length], offset-start, currINFIdx, currHFIdx, segLen, headerOffset-start) -decreases -func currSegWidenLemma(raw []byte, offset int, currINFIdx int, currHFIdx int, segLen int, headerOffset int, start int, length int) { - unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) - unfold acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R53) - - ainfo1 := path.Timestamp(raw, currINFIdx, headerOffset) - ainfo2 := path.Timestamp(raw[start:length], currINFIdx, headerOffset-start) - assert ainfo1 == ainfo2 - - uinfo1 := path.AbsUinfo(raw, currINFIdx, headerOffset) - uinfo2 := path.AbsUinfo(raw[start:length], currINFIdx, headerOffset-start) - assert uinfo1 == uinfo2 - - consDir1 := path.ConsDir(raw, currINFIdx, headerOffset) - consDir2 := path.ConsDir(raw[start:length], currINFIdx, headerOffset-start) - assert consDir1 == consDir2 - - peer1 := path.Peer(raw, currINFIdx, headerOffset) - peer2 := path.Peer(raw[start:length], currINFIdx, headerOffset-start) - assert peer1 == peer2 - - segmentWidenLemma(raw, offset, currHFIdx, ainfo1, uinfo1, consDir1, peer1, segLen, start, length) - ret1 := scion.segment(raw, offset, currHFIdx, ainfo1, uinfo1, consDir1, peer1, segLen) - ret2 := scion.segment(raw[start:length], offset-start, currHFIdx, ainfo2, uinfo2, consDir2, peer2, segLen) - assert ret1 == reveal scion.CurrSeg(raw, offset, currINFIdx, currHFIdx, segLen, headerOffset) - assert ret2 == reveal scion.CurrSeg(raw[start:length], offset-start, currINFIdx, currHFIdx, segLen, headerOffset-start) - assert ret1 == ret2 - fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) - fold acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R53) -} - -ghost -requires 0 <= start && start <= offset -requires 0 < segLen -requires 0 <= currHFIdx && currHFIdx <= segLen -requires length <= len(raw) -requires offset + path.HopLen * segLen <= length -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R52) -requires acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R52) -ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R52) -ensures acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R52) -ensures scion.segment(raw, offset, currHFIdx, ainfo, uinfo, consDir, peer, segLen) == - scion.segment(raw[start:length], offset-start, currHFIdx, ainfo, uinfo, consDir, peer, segLen) -decreases -func segmentWidenLemma(raw []byte, offset int, currHFIdx int, ainfo io.IO_ainfo, uinfo set[io.IO_msgterm], consDir bool, peer bool, segLen int, start int, length int) { - newP := (R52 + R53)/2 - assert R53 < newP && newP < R52 - hopFieldsWidenLemma(raw, offset, 0, segLen, start, length, newP) - hopfields1 := scion.hopFields(raw, offset, 0, segLen) - hopfields2 := scion.hopFields(raw[start:length], offset-start, 0, segLen) - assert hopfields1 == hopfields2 - - ret1 := io.IO_seg2(io.IO_seg3_{ - AInfo :ainfo, - UInfo : uinfo, - ConsDir : consDir, - Peer : peer, - Past : scion.segPast(hopfields1, currHFIdx - 1), - Future : scion.segFuture(hopfields1, currHFIdx), - History : scion.segHistory(hopfields1, currHFIdx - 1), - }) - ret2 := io.IO_seg2(io.IO_seg3_{ - AInfo :ainfo, - UInfo : uinfo, - ConsDir : consDir, - Peer : peer, - Past : scion.segPast(hopfields2, currHFIdx - 1), - Future : scion.segFuture(hopfields2, currHFIdx), - History : scion.segHistory(hopfields2, currHFIdx - 1), - }) - assert ret1 == scion.segment(raw, offset, currHFIdx, ainfo, uinfo, consDir, peer, segLen) - assert ret2 == scion.segment(raw[start:length], offset-start, currHFIdx, ainfo, uinfo, consDir, peer, segLen) - assert ret1 == ret2 -} - -ghost -requires 0 <= start && start <= middle -requires middle + path.HopLen <= length -requires length <= len(raw) -preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R54) -preserves acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R54) -ensures path.BytesToIO_HF(raw, 0, middle, len(raw)) == - path.BytesToIO_HF(raw[start:length], 0, middle-start, length-start) -decreases -func BytesToIO_HFWidenLemma(raw []byte, middle int, start int, length int) { - unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) - unfold acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R55) - hfBytes1 := path.BytesToIO_HF(raw, 0, middle, len(raw)) - hfBytes2 := path.BytesToIO_HF(raw[start:length], 0, middle-start, length-start) - assert hfBytes1.EgIF2 == hfBytes2.EgIF2 - assert hfBytes1.InIF2 == hfBytes2.InIF2 - assert hfBytes1.HVF == hfBytes2.HVF - fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) - fold acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R55) -} - -ghost -requires R53 < p -requires 0 <= start && start <= offset -requires 0 <= currHFIdx && currHFIdx <= segLen -requires offset + path.HopLen * segLen <= length -requires length <= len(raw) -preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), p) -preserves acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), p) -ensures scion.hopFields(raw, offset, currHFIdx, segLen) == - scion.hopFields(raw[start:length], offset-start, currHFIdx, segLen) -decreases segLen - currHFIdx -func hopFieldsWidenLemma(raw []byte, offset int, currHFIdx int, segLen int, start int, length int, p perm) { - if currHFIdx == segLen { - ret := seq[io.IO_HF]{} - assert ret == scion.hopFields(raw, offset, currHFIdx, segLen) - assert ret == scion.hopFields(raw[start:length], offset - start, currHFIdx, segLen) - } else { - BytesToIO_HFWidenLemma(raw, offset + path.HopLen * currHFIdx, start, length) - hf1 := path.BytesToIO_HF(raw, 0, offset + path.HopLen * currHFIdx, len(raw)) - hf2 := path.BytesToIO_HF(raw[start:length], 0, offset + path.HopLen * currHFIdx - start, length - start) - assert hf1 == hf2 - - newP := (p + R53)/2 - assert R53 < newP && newP < p - hopFieldsWidenLemma(raw, offset, currHFIdx + 1, segLen, start, length, newP) - ret1 := seq[io.IO_HF]{hf1} ++ scion.hopFields(raw, offset, currHFIdx + 1, segLen) - ret2 := seq[io.IO_HF]{hf2} ++ scion.hopFields(raw[start:length], offset-start, currHFIdx + 1, segLen) - assert ret1 == scion.hopFields(raw, offset, currHFIdx, segLen) - assert ret2 == scion.hopFields(raw[start:length], offset-start, currHFIdx, segLen) - assert ret1 == ret2 - } -} - -ghost -requires 0 <= start && start <= headerOffset -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len -requires 0 <= length && length <= len(raw) -requires scion.pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= length -requires 1 <= currINFIdx && currINFIdx < 4 -preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) -preserves acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R51) -ensures scion.LeftSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) == - scion.LeftSeg(raw[start:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset-start) -decreases -func leftSegWidenLemma(raw []byte, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, headerOffset int, start int, length int) { - offset := scion.HopFieldOffset(scion.NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) - if currINFIdx == 1 && seg2Len > 0 { - currSegWidenLemma(raw, offset + path.HopLen * seg1Len, currINFIdx, 0, seg2Len, headerOffset, start, length) - ret1 := some(scion.CurrSeg(raw, offset + path.HopLen * seg1Len, currINFIdx, 0, seg2Len, headerOffset)) - ret2 := some(scion.CurrSeg(raw[start:length], offset + path.HopLen * seg1Len - start, currINFIdx, 0, seg2Len, headerOffset - start)) - assert ret1 == reveal scion.LeftSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) - assert ret2 == reveal scion.LeftSeg(raw[start:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset-start) - assert ret1 == ret2 - } else if currINFIdx == 2 && seg2Len > 0 && seg3Len > 0 { - currSegWidenLemma(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, seg3Len, headerOffset, start, length) - ret1 := some(scion.CurrSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, seg3Len, headerOffset)) - ret2 := some(scion.CurrSeg(raw[start:length], offset + path.HopLen * (seg1Len + seg2Len) - start, currINFIdx, 0, seg3Len, headerOffset - start)) - assert ret1 == reveal scion.LeftSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) - assert ret2 == reveal scion.LeftSeg(raw[start:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset - start) - assert ret1 == ret2 - } else { - ret := none[io.IO_seg3] - assert ret == reveal scion.LeftSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) - assert ret == reveal scion.LeftSeg(raw[start:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset- start) - } -} - -ghost -requires 0 <= start && start <= headerOffset -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len -requires 0 <= length && length <= len(raw) -requires scion.pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= length -requires -1 <= currINFIdx && currINFIdx < 2 -preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) -preserves acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R51) -ensures scion.RightSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) == - scion.RightSeg(raw[start:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset-start) -decreases -func rightSegWidenLemma(raw []byte, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, headerOffset int, start int, length int) { - offset := scion.HopFieldOffset(scion.NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) - if currINFIdx == 1 && seg2Len > 0 && seg3Len > 0 { - currSegWidenLemma(raw, offset + path.HopLen * seg1Len, currINFIdx, seg2Len, seg2Len, headerOffset, start, length) - ret1 := some(scion.CurrSeg(raw, offset + path.HopLen * seg1Len, currINFIdx, seg2Len, seg2Len, headerOffset)) - ret2 := some(scion.CurrSeg(raw[start:length], offset + path.HopLen * seg1Len - start, currINFIdx, seg2Len, seg2Len, headerOffset - start)) - assert ret1 == reveal scion.RightSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) - assert ret2 == reveal scion.RightSeg(raw[start:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset - start) - assert ret1 == ret2 - } else if currINFIdx == 0 && seg2Len > 0 { - currSegWidenLemma(raw, offset, currINFIdx, seg1Len, seg1Len, headerOffset, start, length) - ret1 := some(scion.CurrSeg(raw, offset, currINFIdx, seg1Len, seg1Len, headerOffset)) - ret2 := some(scion.CurrSeg(raw[start:length], offset - start, currINFIdx, seg1Len, seg1Len, headerOffset - start)) - assert ret1 == reveal scion.RightSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) - assert ret2 == reveal scion.RightSeg(raw[start:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset - start) - assert ret1 == ret2 - } else { - ret := none[io.IO_seg3] - assert ret == reveal scion.RightSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) - assert ret == reveal scion.RightSeg(raw[start:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset - start) - } -} - -ghost -requires 0 <= start && start <= headerOffset -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len -requires 2 <= currINFIdx && currINFIdx < 5 -requires 0 <= length && length <= len(raw) -requires scion.pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= length -preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) -preserves acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R51) -ensures scion.MidSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) == - scion.MidSeg(raw[start:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset - start) -decreases -func midSegWidenLemma(raw []byte, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, headerOffset int, start int, length int) { - offset := scion.HopFieldOffset(scion.NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) - if currINFIdx == 4 && seg2Len > 0 { - currSegWidenLemma(raw, offset, 0, seg1Len, seg1Len, headerOffset, start, length) - ret1 := some(scion.CurrSeg(raw, offset, 0, seg1Len, seg1Len, headerOffset)) - ret2 := some(scion.CurrSeg(raw[start:length], offset - start, 0, seg1Len, seg1Len, headerOffset - start)) - assert ret1 == reveal scion.MidSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) - assert ret2 == reveal scion.MidSeg(raw[start:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset - start) - assert ret1 == ret2 - } else if currINFIdx == 2 && seg2Len > 0 && seg3Len > 0 { - currSegWidenLemma(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, seg3Len, headerOffset, start, length) - ret1 := some(scion.CurrSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, seg3Len, headerOffset)) - ret2 := some(scion.CurrSeg(raw[start:length], offset + path.HopLen * (seg1Len + seg2Len) - start, currINFIdx, 0, seg3Len, headerOffset - start)) - assert ret1 == reveal scion.MidSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) - assert ret2 == reveal scion.MidSeg(raw[start:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset - start) - assert ret1 == ret2 - } else { - ret := none[io.IO_seg3] - assert ret == reveal scion.MidSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset) - assert ret == reveal scion.MidSeg(raw[start:length], currINFIdx, seg1Len, seg2Len, seg3Len, headerOffset - start) - } } \ No newline at end of file From af719cedf5d2528ceca91a4d9f55e671305c2056 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Tue, 21 May 2024 12:36:29 +0200 Subject: [PATCH 36/57] simplify path/scion (#346) --- pkg/slayers/path/scion/base_spec.gobra | 124 ++----------------------- pkg/slayers/path/scion/decoded.go | 33 +++---- 2 files changed, 23 insertions(+), 134 deletions(-) diff --git a/pkg/slayers/path/scion/base_spec.gobra b/pkg/slayers/path/scion/base_spec.gobra index 75620a731..6310bdaec 100644 --- a/pkg/slayers/path/scion/base_spec.gobra +++ b/pkg/slayers/path/scion/base_spec.gobra @@ -191,57 +191,10 @@ pure func (s Base) IncPathSpec() (res Base) { } } -/*************** AbsBase ***************/ - -// There is a good deal of repition in this section of the file and the similarly -// named functions for the type `Base`. While for now this is not too big of a problem, -// we should find ways to streamline the definitions, ideally by having these defs only -// for the abstraction type only. - -type AbsBase adt { - AbsBase_ { - PathMeta AbsMetaHdr - NumINF int - NumHops int - } -} - -type AbsMetaHdr adt { - AbsMetaHdr_ { - // we should change the types of CurrINF and CurrHF to wider types, - // otherwise we might start getting overflow errors here when they - // are fully enabled. - CurrINF uint8 - CurrHF uint8 - SegLen seq[uint8] - } -} - -ghost -decreases -pure func (b Base) Abs() AbsBase { - return AbsBase_{ - PathMeta: b.PathMeta.Abs(), - NumINF: b.NumINF, - NumHops: b.NumHops, - } -} - -ghost -decreases -pure func (b MetaHdr) Abs() AbsMetaHdr { - return AbsMetaHdr_{ - CurrINF: b.CurrINF, - CurrHF: b.CurrHF, - SegLen: seq[uint8]{ b.SegLen[0], b.SegLen[1], b.SegLen[2] }, - } -} - ghost -requires len(b.PathMeta.SegLen) == 3 decreases -pure func (b AbsBase) ReverseSpec() AbsBase { - return AbsBase_ { +pure func (b Base) ReverseSpec() Base { + return Base { PathMeta: b.ReverseMetaHdrSpec(), NumINF: b.NumINF, NumHops: b.NumHops, @@ -249,10 +202,9 @@ pure func (b AbsBase) ReverseSpec() AbsBase { } ghost -requires len(b.PathMeta.SegLen) == 3 decreases -pure func (b AbsBase) ReverseMetaHdrSpec() AbsMetaHdr { - return AbsMetaHdr_ { +pure func (b Base) ReverseMetaHdrSpec() MetaHdr { + return MetaHdr { CurrINF: uint8(b.NumINF) - b.PathMeta.CurrINF - 1, CurrHF: uint8(b.NumHops) - b.PathMeta.CurrHF - 1, SegLen: b.ReverseSegLen(), @@ -260,83 +212,23 @@ pure func (b AbsBase) ReverseMetaHdrSpec() AbsMetaHdr { } ghost -requires len(b.PathMeta.SegLen) == 3 decreases -pure func (b AbsBase) ReverseSegLen() seq[uint8] { +pure func (b Base) ReverseSegLen() [3]uint8 { return (match b.NumINF { - case 2: seq[uint8]{ b.PathMeta.SegLen[1], b.PathMeta.SegLen[0], b.PathMeta.SegLen[2]} - case 3: seq[uint8]{ b.PathMeta.SegLen[2], b.PathMeta.SegLen[1], b.PathMeta.SegLen[0] } + case 2: [3]uint8{ b.PathMeta.SegLen[1], b.PathMeta.SegLen[0], b.PathMeta.SegLen[2] } + case 3: [3]uint8{ b.PathMeta.SegLen[2], b.PathMeta.SegLen[1], b.PathMeta.SegLen[0] } default: b.PathMeta.SegLen }) } -ghost -decreases -pure func (b AbsBase) ValidCurrIdxsSpec() bool { - return 0 <= b.NumINF && b.NumINF <= MaxINFs && - len(b.PathMeta.SegLen) == 3 && - 0 <= b.NumHops && b.NumHops <= MaxHops && - b.ValidCurrHfSpec() && - b.ValidCurrInfSpec() && - 0 <= b.PathMeta.SegLen[0] && b.PathMeta.SegLen[0] < MaxHops && - 0 <= b.PathMeta.SegLen[1] && b.PathMeta.SegLen[1] < MaxHops && - 0 <= b.PathMeta.SegLen[2] && b.PathMeta.SegLen[2] < MaxHops && - (b.NumINF == 1 ==> b.NumHops == int(b.PathMeta.SegLen[0])) && - (b.NumINF == 2 ==> b.NumHops == int(b.PathMeta.SegLen[0] + b.PathMeta.SegLen[1])) && - (b.NumINF == 3 ==> b.NumHops == int(b.PathMeta.SegLen[0] + b.PathMeta.SegLen[1] + b.PathMeta.SegLen[2])) && - (forall i int :: { b.PathMeta.SegLen[i] } 0 <= i && i < b.NumINF ==> - b.PathMeta.SegLen[i] != 0) && - (forall i int :: { b.PathMeta.SegLen[i] } b.NumINF <= i && i < MaxINFs ==> - b.PathMeta.SegLen[i] == 0) -} - -ghost -opaque -requires b.ValidCurrIdxsSpec() -decreases -pure func (b AbsBase) CurrInfMatchesCurrHF() bool { - return b.PathMeta.CurrINF == b.InfForHfSpec(b.PathMeta.CurrHF) -} - -ghost -decreases -pure func (b AbsBase) ValidCurrInfSpec() bool { - return 0 <= b.PathMeta.CurrINF && b.PathMeta.CurrINF < b.NumINF -} - -ghost -decreases -pure func (b AbsBase) ValidCurrHfSpec() bool { - return 0 <= b.PathMeta.CurrHF && b.PathMeta.CurrHF < b.NumHops -} - -ghost -requires len(s.PathMeta.SegLen) == 3 -ensures 0 <= r && r < 3 -decreases -pure func (s AbsBase) InfForHfSpec(hf uint8) (r uint8) { - return hf < s.PathMeta.SegLen[0] ? - 0 : - (hf < s.PathMeta.SegLen[0] + s.PathMeta.SegLen[1] ? 1 : 2) -} - ghost requires b.ValidCurrIdxsSpec() ensures b.ReverseSpec().ValidCurrIdxsSpec() decreases -pure func (b AbsBase) ReversingValidBaseIsValidBase() Lemma { - return Lemma{} -} - -ghost -ensures b.ValidCurrIdxsSpec() == b.Abs().ValidCurrIdxsSpec() -decreases -pure func (b Base) ValidBaseHasValidAbs() Lemma { +pure func (b Base) ReversingValidBaseIsValidBase() Lemma { return Lemma{} } -/*************** End of AbsBase ***************/ - ghost requires b.Mem() ensures b.NonInitMem() diff --git a/pkg/slayers/path/scion/decoded.go b/pkg/slayers/path/scion/decoded.go index 764a63f4d..979289781 100644 --- a/pkg/slayers/path/scion/decoded.go +++ b/pkg/slayers/path/scion/decoded.go @@ -218,20 +218,17 @@ func (s *Decoded) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { // @ decreases func (s *Decoded) Reverse( /*@ ghost ubuf []byte @*/ ) (p path.Path, r error) { //@ ghost isValid := s.ValidCurrIdxs(ubuf) - /*@ - ghost base := s.GetBase(ubuf) - ghost absBase := base.Abs() - ghost absMetaHdrAferReversingSegLen := AbsMetaHdr_ { - CurrINF: absBase.PathMeta.CurrINF, - CurrHF: absBase.PathMeta.CurrHF, - SegLen: absBase.ReverseSegLen(), - } - ghost absBaseAfterReversingSegLen := AbsBase_ { - PathMeta: absMetaHdrAferReversingSegLen, - NumINF: absBase.NumINF, - NumHops: absBase.NumHops, - } - @*/ + //@ ghost base := s.GetBase(ubuf) + //@ ghost metaHdrAferReversingSegLen := MetaHdr { + //@ CurrINF: base.PathMeta.CurrINF, + //@ CurrHF: base.PathMeta.CurrHF, + //@ SegLen: base.ReverseSegLen(), + //@ } + //@ ghost baseAfterReversingSegLen := Base { + //@ PathMeta: metaHdrAferReversingSegLen, + //@ NumINF: base.NumINF, + //@ NumHops: base.NumHops, + //@ } //@ unfold s.Mem(ubuf) //@ unfold s.Base.Mem() if s.NumINF == 0 { @@ -265,7 +262,7 @@ func (s *Decoded) Reverse( /*@ ghost ubuf []byte @*/ ) (p path.Path, r error) { //@ invariant s.Mem(ubuf) //@ invariant 0 <= i && i <= s.GetNumHops(ubuf) //@ invariant -1 <= j && j < s.GetNumHops(ubuf) - //@ invariant s.GetBase(ubuf).Abs() == absBaseAfterReversingSegLen + //@ invariant s.GetBase(ubuf) == baseAfterReversingSegLen //@ decreases j-i for i, j := 0, ( /*@ unfolding s.Mem(ubuf) in (unfolding s.Base.Mem() in @*/ s.NumHops - 1 /*@ ) @*/); i < j; i, j = i+1, j-1 { //@ unfold s.Mem(ubuf) @@ -284,9 +281,9 @@ func (s *Decoded) Reverse( /*@ ghost ubuf []byte @*/ ) (p path.Path, r error) { //@ unfold s.Base.Mem() s.PathMeta.CurrINF = uint8(s.NumINF) - s.PathMeta.CurrINF - 1 s.PathMeta.CurrHF = uint8(s.NumHops) - s.PathMeta.CurrHF - 1 - //@ assert s.Base.Abs() == absBase.ReverseSpec() - //@ ghost if isValid { absBase.ReversingValidBaseIsValidBase() } - //@ assert isValid ==> s.Base.Abs().ValidCurrIdxsSpec() + //@ assert s.Base == base.ReverseSpec() + //@ ghost if isValid { base.ReversingValidBaseIsValidBase() } + //@ assert isValid ==> s.Base.ValidCurrIdxsSpec() //@ fold s.Base.Mem() //@ fold s.Mem(ubuf) return s, nil From 53f8e30390709a09122f83c942c13deb2647dc99 Mon Sep 17 00:00:00 2001 From: Markus Limbeck <92801626+mlimbeck@users.noreply.github.com> Date: Tue, 21 May 2024 13:18:36 +0200 Subject: [PATCH 37/57] Verify assumptions in SCION.DecodeFromBytes (#345) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * progress scion- decodeFromBytes * revert change of call s.Path.DecodeFromBytes() * fix verification error * fix permission in rawPath * establish validPktHeader in parsePath * fix verification errors * fixed permission and refactored EqAbsHeader * fixed syntax errors * fix verification error * fix permission * introduces additional spec to the Path interface * remove unnecessary preconditions * proof of parse path assumption * simplification in epic.DecodeFromBytes * feedback * Update pkg/slayers/path/scion/raw.go Co-authored-by: João Pereira * add quantifier to GetCurrentHopField() and GetCurrentInfoField() to avoid code changes * Apply suggestions from code review Co-authored-by: João Pereira * formatting * simplify onehop * improve io_msgterm_spec.gobra --------- Co-authored-by: João Pereira --- pkg/slayers/path/empty/empty_spec.gobra | 7 + pkg/slayers/path/epic/epic.go | 69 ++----- pkg/slayers/path/epic/epic_spec.gobra | 7 + pkg/slayers/path/hopfield.go | 20 +- pkg/slayers/path/infofield.go | 2 + pkg/slayers/path/infofield_spec.gobra | 6 +- pkg/slayers/path/io_msgterm_spec.gobra | 42 ++++- pkg/slayers/path/onehop/onehop.go | 45 ++--- pkg/slayers/path/onehop/onehop_spec.gobra | 7 + pkg/slayers/path/path.go | 11 +- pkg/slayers/path/path_spec.gobra | 7 + pkg/slayers/path/scion/base.go | 5 +- pkg/slayers/path/scion/decoded.go | 30 +-- pkg/slayers/path/scion/decoded_spec.gobra | 7 + pkg/slayers/path/scion/raw.go | 78 +++++--- pkg/slayers/path/scion/raw_spec.gobra | 220 +++++++++++++++++++++- pkg/slayers/scion.go | 35 ++-- pkg/slayers/scion_spec.gobra | 26 ++- router/dataplane.go | 48 +++-- router/io-spec-lemmas.gobra | 37 ++++ 20 files changed, 522 insertions(+), 187 deletions(-) diff --git a/pkg/slayers/path/empty/empty_spec.gobra b/pkg/slayers/path/empty/empty_spec.gobra index 99e53a26e..416c4292d 100644 --- a/pkg/slayers/path/empty/empty_spec.gobra +++ b/pkg/slayers/path/empty/empty_spec.gobra @@ -34,6 +34,13 @@ func (e Path) DowngradePerm(buf []byte) { fold e.NonInitMem() } +ghost +pure +decreases +func (p Path) IsValidResultOfDecoding(b []byte, err error) (res bool) { + return true +} + Path implements path.Path // Definitions to allow *Path to be treated as a path.Path diff --git a/pkg/slayers/path/epic/epic.go b/pkg/slayers/path/epic/epic.go index 32c57f596..6a43efc29 100644 --- a/pkg/slayers/path/epic/epic.go +++ b/pkg/slayers/path/epic/epic.go @@ -138,7 +138,7 @@ func (p *Path) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { // DecodeFromBytes deserializes the buffer b into the Path. On failure, an error is returned, // otherwise SerializeTo will return nil. // @ requires p.NonInitMem() -// @ preserves acc(slices.AbsSlice_Bytes(b, 0, len(b)), R40) +// @ preserves acc(slices.AbsSlice_Bytes(b, 0, len(b)), R42) // @ ensures len(b) < MetadataLen ==> r != nil // @ ensures r == nil ==> p.Mem(b) // @ ensures r != nil ==> p.NonInitMem() && r.ErrorMem() @@ -148,67 +148,34 @@ func (p *Path) DecodeFromBytes(b []byte) (r error) { return serrors.New("EPIC Path raw too short", "expected", int(MetadataLen), "actual", int(len(b))) } //@ unfold p.NonInitMem() - //@ slices.SplitByIndex_Bytes(b, 0, len(b), PktIDLen, R40) - //@ preserves acc(slices.AbsSlice_Bytes(b, 0, PktIDLen), R40) - //@ preserves acc(&p.PktID) - //@ preserves acc(&p.PHVF) - //@ preserves acc(&p.LHVF) - //@ ensures p.PHVF != nil && len(p.PHVF) == HVFLen - //@ ensures p.LHVF != nil && len(p.LHVF) == HVFLen - //@ ensures slices.AbsSlice_Bytes(p.PHVF, 0, len(p.PHVF)) - //@ ensures slices.AbsSlice_Bytes(p.LHVF, 0, len(p.LHVF)) - //@ decreases - //@ outline( - //@ ghost slices.Reslice_Bytes(b, 0, PktIDLen, R40) + //@ slices.SplitRange_Bytes(b, 0, PktIDLen, R42) p.PktID.DecodeFromBytes(b[:PktIDLen]) + //@ slices.CombineRange_Bytes(b, 0, PktIDLen, R42) + //@ unfold acc(slices.AbsSlice_Bytes(b, 0, len(b)), R42) p.PHVF = make([]byte, HVFLen) p.LHVF = make([]byte, HVFLen) + //@ assert forall i int :: { &b[PktIDLen:(PktIDLen+HVFLen)][i] } 0 <= i && + //@ i < len(b[PktIDLen:(PktIDLen+HVFLen)]) ==> + //@ &b[PktIDLen:(PktIDLen+HVFLen)][i] == &b[PktIDLen+i] + copy(p.PHVF, b[PktIDLen:(PktIDLen+HVFLen)] /*@, R42 @*/) //@ fold slices.AbsSlice_Bytes(p.PHVF, 0, len(p.PHVF)) + //@ assert forall i int :: { &b[(PktIDLen+HVFLen):MetadataLen][i] } 0 <= i && + //@ i < len(b[(PktIDLen+HVFLen):MetadataLen]) ==> + //@ &b[(PktIDLen+HVFLen):MetadataLen][i] == &b[(PktIDLen+HVFLen)+i] + copy(p.LHVF, b[(PktIDLen+HVFLen):MetadataLen] /*@, R42 @*/) //@ fold slices.AbsSlice_Bytes(p.LHVF, 0, len(p.LHVF)) - //@ slices.Unslice_Bytes(b, 0, PktIDLen, R40) - //@ ) - //@ slices.SplitByIndex_Bytes(b, PktIDLen, len(b), PktIDLen+HVFLen, R40) - //@ preserves acc(&p.PHVF) - //@ preserves slices.AbsSlice_Bytes(p.PHVF, 0, len(p.PHVF)) - //@ preserves acc(slices.AbsSlice_Bytes(b, PktIDLen, PktIDLen + HVFLen), R40) - //@ decreases - //@ outline( - //@ slices.Reslice_Bytes(b, PktIDLen, PktIDLen+HVFLen, R40) - //@ unfold slices.AbsSlice_Bytes(p.PHVF, 0, len(p.PHVF)) - //@ unfold acc(slices.AbsSlice_Bytes(b[PktIDLen:(PktIDLen+HVFLen)], 0, HVFLen), R41) - copy(p.PHVF, b[PktIDLen:(PktIDLen+HVFLen)] /*@, R41 @*/) - //@ fold acc(slices.AbsSlice_Bytes(b[PktIDLen:(PktIDLen+HVFLen)], 0, HVFLen), R41) - //@ fold slices.AbsSlice_Bytes(p.PHVF, 0, len(p.PHVF)) - //@ slices.Unslice_Bytes(b, PktIDLen, PktIDLen+HVFLen, R40) - //@ ) - //@ slices.CombineAtIndex_Bytes(b, 0, PktIDLen+HVFLen, PktIDLen, R40) - //@ slices.SplitByIndex_Bytes(b, PktIDLen+HVFLen, len(b), MetadataLen, R40) - //@ preserves acc(&p.LHVF) - //@ preserves slices.AbsSlice_Bytes(p.LHVF, 0, len(p.LHVF)) - //@ preserves acc(slices.AbsSlice_Bytes(b, PktIDLen+HVFLen, MetadataLen), R40) - //@ decreases - //@ outline( - //@ slices.Reslice_Bytes(b, PktIDLen+HVFLen, MetadataLen, R40) - //@ unfold slices.AbsSlice_Bytes(p.LHVF, 0, len(p.LHVF)) - //@ unfold acc(slices.AbsSlice_Bytes(b[PktIDLen+HVFLen:MetadataLen], 0, HVFLen), R41) - copy(p.LHVF, b[(PktIDLen+HVFLen):MetadataLen] /*@, R41 @*/) - //@ fold acc(slices.AbsSlice_Bytes(b[PktIDLen+HVFLen:MetadataLen], 0, HVFLen), R41) - //@ fold slices.AbsSlice_Bytes(p.LHVF, 0, len(p.LHVF)) - //@ slices.Unslice_Bytes(b, PktIDLen+HVFLen, MetadataLen, R40) - //@ ) - //@ slices.CombineAtIndex_Bytes(b, 0, MetadataLen, PktIDLen+HVFLen, R40) p.ScionPath = &scion.Raw{} //@ fold p.ScionPath.Base.NonInitMem() //@ fold p.ScionPath.NonInitMem() - //@ slices.Reslice_Bytes(b, MetadataLen, len(b), R40) + //@ fold acc(slices.AbsSlice_Bytes(b, 0, len(b)), R42) + //@ slices.SplitRange_Bytes(b, MetadataLen, len(b), R42) ret := p.ScionPath.DecodeFromBytes(b[MetadataLen:]) //@ ghost if ret == nil { //@ fold p.Mem(b) //@ } else { //@ fold p.NonInitMem() //@ } - //@ slices.Unslice_Bytes(b, MetadataLen, len(b), R40) - //@ slices.CombineAtIndex_Bytes(b, 0, len(b), MetadataLen, R40) + //@ slices.CombineRange_Bytes(b, MetadataLen, len(b), R42) return ret } @@ -278,17 +245,17 @@ type PktID struct { // DecodeFromBytes deserializes the buffer (raw) into the PktID. // @ requires len(raw) >= PktIDLen // @ preserves acc(i) -// @ preserves acc(slices.AbsSlice_Bytes(raw, 0, len(raw)), R41) +// @ preserves acc(slices.AbsSlice_Bytes(raw, 0, len(raw)), R42) // @ ensures 0 <= i.Timestamp // @ ensures 0 <= i.Counter // @ decreases func (i *PktID) DecodeFromBytes(raw []byte) { - //@ unfold acc(slices.AbsSlice_Bytes(raw, 0, len(raw)), R41) + //@ unfold acc(slices.AbsSlice_Bytes(raw, 0, len(raw)), R42) //@ assert forall i int :: { &raw[:4][i] } 0 <= i && i < 4 ==> &raw[:4][i] == &raw[i] i.Timestamp = binary.BigEndian.Uint32(raw[:4]) //@ assert forall i int :: { &raw[4:8][i] } 0 <= i && i < 4 ==> &raw[4:8][i] == &raw[4 + i] i.Counter = binary.BigEndian.Uint32(raw[4:8]) - //@ fold acc(slices.AbsSlice_Bytes(raw, 0, len(raw)), R41) + //@ fold acc(slices.AbsSlice_Bytes(raw, 0, len(raw)), R42) } // SerializeTo serializes the PktID into the buffer (b). diff --git a/pkg/slayers/path/epic/epic_spec.gobra b/pkg/slayers/path/epic/epic_spec.gobra index 46666a552..e3b58968e 100644 --- a/pkg/slayers/path/epic/epic_spec.gobra +++ b/pkg/slayers/path/epic/epic_spec.gobra @@ -105,4 +105,11 @@ pure func (p *Path) GetUnderlyingScionPathBuf(buf []byte) []byte { return unfolding acc(p.Mem(buf), _) in buf[MetadataLen:] } +ghost +pure +decreases +func (p *Path) IsValidResultOfDecoding(b []byte, err error) (res bool) { + return true +} + (*Path) implements path.Path \ No newline at end of file diff --git a/pkg/slayers/path/hopfield.go b/pkg/slayers/path/hopfield.go index 89ceaab80..0c3e0787b 100644 --- a/pkg/slayers/path/hopfield.go +++ b/pkg/slayers/path/hopfield.go @@ -79,17 +79,13 @@ type HopField struct { // @ preserves acc(slices.AbsSlice_Bytes(raw, 0, HopLen), R45) // @ ensures h.Mem() // @ ensures err == nil +// @ ensures unfolding h.Mem() in +// @ BytesToIO_HF(raw, 0, 0, HopLen) == h.ToIO_HF() // @ decreases func (h *HopField) DecodeFromBytes(raw []byte) (err error) { if len(raw) < HopLen { return serrors.New("HopField raw too short", "expected", HopLen, "actual", len(raw)) } - //@ preserves acc(h) - //@ preserves acc(slices.AbsSlice_Bytes(raw, 0, HopLen), R46) - //@ ensures h.ConsIngress >= 0 - //@ ensures h.ConsEgress >= 0 - //@ decreases - //@ outline( //@ unfold acc(slices.AbsSlice_Bytes(raw, 0, HopLen), R46) h.EgressRouterAlert = raw[0]&0x1 == 0x1 h.IngressRouterAlert = raw[0]&0x2 == 0x2 @@ -98,20 +94,16 @@ func (h *HopField) DecodeFromBytes(raw []byte) (err error) { h.ConsIngress = binary.BigEndian.Uint16(raw[2:4]) //@ assert &raw[4:6][0] == &raw[4] && &raw[4:6][1] == &raw[5] h.ConsEgress = binary.BigEndian.Uint16(raw[4:6]) - //@ fold acc(slices.AbsSlice_Bytes(raw, 0, HopLen), R46) - //@ ) - //@ preserves acc(&h.Mac) - //@ preserves acc(slices.AbsSlice_Bytes(raw, 0, HopLen), R46) - //@ decreases - //@ outline( - //@ unfold acc(slices.AbsSlice_Bytes(raw, 0, HopLen), R46) //@ assert forall i int :: { &h.Mac[:][i] } 0 <= i && i < len(h.Mac[:]) ==> //@ &h.Mac[i] == &h.Mac[:][i] //@ assert forall i int :: { &raw[6:6+MacLen][i] } 0 <= i && i < len(raw[6:6+MacLen]) ==> //@ &raw[6:6+MacLen][i] == &raw[i+6] copy(h.Mac[:], raw[6:6+MacLen] /*@ , R47 @*/) + //@ assert forall i int :: {&h.Mac[:][i]} 0 <= i && i < MacLen ==> h.Mac[:][i] == raw[6:6+MacLen][i] + //@ assert forall i int :: {&h.Mac[i]} 0 <= i && i < MacLen ==> h.Mac[:][i] == h.Mac[i] + //@ EqualBytesImplyEqualMac(raw[6:6+MacLen], h.Mac) + //@ assert BytesToIO_HF(raw, 0, 0, HopLen) == h.ToIO_HF() //@ fold acc(slices.AbsSlice_Bytes(raw, 0, HopLen), R46) - //@ ) //@ fold h.Mem() return nil } diff --git a/pkg/slayers/path/infofield.go b/pkg/slayers/path/infofield.go index b30edb937..80009c77d 100644 --- a/pkg/slayers/path/infofield.go +++ b/pkg/slayers/path/infofield.go @@ -64,6 +64,8 @@ type InfoField struct { // @ preserves acc(inf) // @ preserves acc(slices.AbsSlice_Bytes(raw, 0, InfoLen), R45) // @ ensures err == nil +// @ ensures BytesToIntermediateAbsInfoField(raw, 0, 0, InfoLen) == +// @ inf.ToIntermediateAbsInfoField() // @ decreases func (inf *InfoField) DecodeFromBytes(raw []byte) (err error) { if len(raw) < InfoLen { diff --git a/pkg/slayers/path/infofield_spec.gobra b/pkg/slayers/path/infofield_spec.gobra index acc19895b..19ddedd33 100644 --- a/pkg/slayers/path/infofield_spec.gobra +++ b/pkg/slayers/path/infofield_spec.gobra @@ -103,8 +103,10 @@ requires forall i int :: { &raw[i] } middle <= i && i < end ==> acc(&raw[i], _) decreases pure func BytesToIntermediateAbsInfoFieldHelper(raw [] byte, middle int, end int) (IntermediateAbsInfoField) { - return let _ := Asserting(forall k int :: {&raw[middle+2:middle+4][k]} 0 <= k && k < 2 ==> &raw[middle+2:middle+4][k] == &raw[middle+2 + k]) in - let _ := Asserting(forall k int :: {&raw[middle+4:middle+8][k]} 0 <= k && k < 4 ==> &raw[middle+4:middle+8][k] == &raw[middle+4 + k]) in + return let _ := Asserting(forall k int :: {&raw[middle+2:middle+4][k]} 0 <= k && k < 2 ==> + &raw[middle+2:middle+4][k] == &raw[middle+2 + k]) in + let _ := Asserting(forall k int :: {&raw[middle+4:middle+8][k]} 0 <= k && k < 4 ==> + &raw[middle+4:middle+8][k] == &raw[middle+4 + k]) in IntermediateAbsInfoField(IntermediateAbsInfoField_{ AInfo : io.IO_ainfo(binary.BigEndian.Uint32(raw[middle+4:middle+8])), UInfo : AbsUInfoFromUint16(binary.BigEndian.Uint16(raw[middle+2:middle+4])), diff --git a/pkg/slayers/path/io_msgterm_spec.gobra b/pkg/slayers/path/io_msgterm_spec.gobra index 41e39093d..64d6bb652 100644 --- a/pkg/slayers/path/io_msgterm_spec.gobra +++ b/pkg/slayers/path/io_msgterm_spec.gobra @@ -16,7 +16,10 @@ package path -import "verification/io" +import ( + "verification/io" + . "verification/utils/definitions" +) // At the moment, we assume that all cryptographic operations performed at the code level // imply the desired properties at the IO spec level because we cannot currently prove in @@ -33,14 +36,41 @@ pure func AbsMac(mac [MacLen]byte) (io.IO_msgterm) // The following function converts a slice with at least `MacLen` elements into // an (exclusive) array containing the mac. Note that there are no permissions -// involved for accessing exclusive arrays. This functions is abstract for now -// because Gobra does not allow for array literals in pure functions, even though -// they are no more side-effectful than creating an instance of a struct type. -// This will soon be fixed in Gobra. +// involved for accessing exclusive arrays. ghost requires MacLen <= len(mac) requires forall i int :: { &mac[i] } 0 <= i && i < MacLen ==> acc(&mac[i], _) ensures len(res) == MacLen ensures forall i int :: { res[i] } 0 <= i && i < MacLen ==> mac[i] == res[i] decreases -pure func FromSliceToMacArray(mac []byte) (res [MacLen]byte) +pure func FromSliceToMacArray(mac []byte) (res [MacLen]byte) { + return [MacLen]byte{ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5] } +} + +ghost +requires len(mac1) == MacLen +requires forall i int :: { &mac1[i] } 0 <= i && i < MacLen ==> acc(&mac1[i], R50) +requires forall i int :: { &mac1[i] } 0 <= i && i < MacLen ==> mac1[i] == mac2[i] +ensures forall i int :: { &mac1[i] } 0 <= i && i < MacLen ==> acc(&mac1[i], R50) +ensures AbsMac(FromSliceToMacArray(mac1)) == AbsMac(mac2) +decreases +func EqualBytesImplyEqualMac(mac1 []byte, mac2 [MacLen]byte) { + mac1Arr := FromSliceToMacArray(mac1) + assert mac1Arr == mac2 + assert mac1Arr[0] == mac2[0] && + mac1Arr[1] == mac2[1] && + mac1Arr[2] == mac2[2] && + mac1Arr[3] == mac2[3] && + mac1Arr[4] == mac2[4] && + mac1Arr[5] == mac2[5] + assert len(mac1Arr) == len(mac2) + arrayCongruence(mac1Arr, mac2) +} + +// The following obviously holds. However, for the time being, it cannot be proven due to an +// incompleteness in the array encoding (https://github.com/viperproject/gobra/issues/770). +ghost +requires mac1 == mac2 +ensures AbsMac(mac1) == AbsMac(mac2) +decreases +func arrayCongruence(mac1 [MacLen]byte, mac2 [MacLen]byte) \ No newline at end of file diff --git a/pkg/slayers/path/onehop/onehop.go b/pkg/slayers/path/onehop/onehop.go index 200013ace..ea906c5f2 100644 --- a/pkg/slayers/path/onehop/onehop.go +++ b/pkg/slayers/path/onehop/onehop.go @@ -66,7 +66,7 @@ type Path struct { } // @ requires o.NonInitMem() -// @ preserves acc(slices.AbsSlice_Bytes(data, 0, len(data)), R40) +// @ preserves acc(slices.AbsSlice_Bytes(data, 0, len(data)), R42) // @ ensures (len(data) >= PathLen) == (r == nil) // @ ensures r == nil ==> o.Mem(data) // @ ensures r != nil ==> o.NonInitMem() @@ -79,29 +79,23 @@ func (o *Path) DecodeFromBytes(data []byte) (r error) { } offset := 0 //@ unfold o.NonInitMem() - //@ slices.SplitByIndex_Bytes(data, 0, len(data), path.InfoLen, R41) - //@ slices.Reslice_Bytes(data, 0, path.InfoLen, R41) + //@ slices.SplitRange_Bytes(data, 0, path.InfoLen, R42) if err := o.Info.DecodeFromBytes(data[:path.InfoLen]); err != nil { // @ Unreachable() return err } - //@ slices.Unslice_Bytes(data, 0, path.InfoLen, R41) + //@ slices.CombineRange_Bytes(data,0, path.InfoLen, R42) offset += path.InfoLen - //@ slices.SplitByIndex_Bytes(data, offset, len(data), offset+path.HopLen, R41) - //@ slices.Reslice_Bytes(data, offset, offset+path.HopLen, R41) + //@ slices.SplitRange_Bytes(data, offset, offset+path.HopLen, R42) if err := o.FirstHop.DecodeFromBytes(data[offset : offset+path.HopLen]); err != nil { // @ Unreachable() return err } - //@ slices.Unslice_Bytes(data, offset, offset+path.HopLen, R41) - //@ slices.CombineAtIndex_Bytes(data, 0, offset+path.HopLen, offset, R41) + //@ slices.CombineRange_Bytes(data, offset, offset+path.HopLen, R42) offset += path.HopLen - //@ slices.SplitByIndex_Bytes(data, offset, len(data), offset+path.HopLen, R41) - //@ slices.Reslice_Bytes(data, offset, offset+path.HopLen, R41) + //@ slices.SplitRange_Bytes(data, offset, offset+path.HopLen, R42) r = o.SecondHop.DecodeFromBytes(data[offset : offset+path.HopLen]) - //@ slices.Unslice_Bytes(data, offset, offset+path.HopLen, R41) - //@ slices.CombineAtIndex_Bytes(data, offset, len(data), offset+path.HopLen, R41) - //@ slices.CombineAtIndex_Bytes(data, 0, len(data), offset, R41) + //@ slices.CombineRange_Bytes(data, offset, offset+path.HopLen, R42) //@ ghost if r == nil { fold o.Mem(data) } else { fold o.NonInitMem() } return r } @@ -120,32 +114,23 @@ func (o *Path) SerializeTo(b []byte /*@, ubuf []byte @*/) (err error) { } offset := 0 //@ unfold acc(o.Mem(ubuf), R1) - //@ slices.SplitByIndex_Bytes(b, 0, len(b), path.InfoLen, writePerm) - //@ slices.Reslice_Bytes(b, 0, path.InfoLen, writePerm) + //@ slices.SplitRange_Bytes(b, 0, offset+path.InfoLen, writePerm) if err := o.Info.SerializeTo(b[:offset+path.InfoLen]); err != nil { - //@ slices.Unslice_Bytes(b, 0, path.InfoLen, writePerm) - //@ slices.CombineAtIndex_Bytes(b, 0, len(b), path.InfoLen, writePerm) + //@ slices.CombineRange_Bytes(b, 0, offset+path.InfoLen, writePerm) return err } - //@ slices.Unslice_Bytes(b, 0, path.InfoLen, writePerm) + //@ slices.CombineRange_Bytes(b, 0, offset+path.InfoLen, writePerm) offset += path.InfoLen - //@ slices.SplitByIndex_Bytes(b, offset, len(b), offset+path.HopLen, writePerm) - //@ slices.Reslice_Bytes(b, offset, offset+path.HopLen, writePerm) + //@ slices.SplitRange_Bytes(b, offset, offset+path.HopLen, writePerm) if err := o.FirstHop.SerializeTo(b[offset : offset+path.HopLen]); err != nil { - //@ slices.Unslice_Bytes(b, offset, offset+path.HopLen, writePerm) - //@ slices.CombineAtIndex_Bytes(b, offset, len(b), offset+path.HopLen, writePerm) - //@ slices.CombineAtIndex_Bytes(b, 0, len(b), offset, writePerm) + //@ slices.CombineRange_Bytes(b, offset, offset+path.HopLen, writePerm) return err } - //@ slices.Unslice_Bytes(b, offset, offset+path.HopLen, writePerm) - //@ slices.CombineAtIndex_Bytes(b, 0, offset+path.HopLen, offset, writePerm) + //@ slices.CombineRange_Bytes(b, offset, offset+path.HopLen, writePerm) offset += path.HopLen - //@ slices.SplitByIndex_Bytes(b, offset, len(b), offset+path.HopLen, writePerm) - //@ slices.Reslice_Bytes(b, offset, offset+path.HopLen, writePerm) + //@ slices.SplitRange_Bytes(b, offset, offset+path.HopLen, writePerm) err = o.SecondHop.SerializeTo(b[offset : offset+path.HopLen]) - //@ slices.Unslice_Bytes(b, offset, offset+path.HopLen, writePerm) - //@ slices.CombineAtIndex_Bytes(b, offset, len(b), offset+path.HopLen, writePerm) - //@ slices.CombineAtIndex_Bytes(b, 0, len(b), offset, writePerm) + //@ slices.CombineRange_Bytes(b, offset, offset+path.HopLen, writePerm) //@ fold acc(o.Mem(ubuf), R1) return err } diff --git a/pkg/slayers/path/onehop/onehop_spec.gobra b/pkg/slayers/path/onehop/onehop_spec.gobra index 9929d3e98..d9bcb54fd 100644 --- a/pkg/slayers/path/onehop/onehop_spec.gobra +++ b/pkg/slayers/path/onehop/onehop_spec.gobra @@ -51,4 +51,11 @@ pure func (o *Path) InferSizeUb(ghost ub []byte) (b bool) { return unfolding acc(o.Mem(ub), _) in o.Len(ub) <= len(ub) } +ghost +pure +decreases +func (p *Path) IsValidResultOfDecoding(b []byte, err error) (res bool) { + return true +} + (*Path) implements path.Path \ No newline at end of file diff --git a/pkg/slayers/path/path.go b/pkg/slayers/path/path.go index c332ce27b..1e5e01df7 100644 --- a/pkg/slayers/path/path.go +++ b/pkg/slayers/path/path.go @@ -81,12 +81,19 @@ type Path interface { // (VerifiedSCION) There are implementations of this interface (e.g., scion.Raw) that // store b and use it as internal data. //@ requires NonInitMem() - //@ preserves acc(sl.AbsSlice_Bytes(b, 0, len(b)), R40) + //@ preserves acc(sl.AbsSlice_Bytes(b, 0, len(b)), R42) //@ ensures err == nil ==> Mem(b) //@ ensures err != nil ==> err.ErrorMem() //@ ensures err != nil ==> NonInitMem() + //@ ensures err == nil ==> IsValidResultOfDecoding(b, err) //@ decreases DecodeFromBytes(b []byte) (err error) + //@ ghost + //@ pure + //@ requires Mem(b) + //@ requires acc(sl.AbsSlice_Bytes(b, 0, len(b)), R42) + //@ decreases + //@ IsValidResultOfDecoding(b []byte, err error) (res bool) // Reverse reverses a path such that it can be used in the reversed direction. // XXX(shitz): This method should possibly be moved to a higher-level path manipulation package. //@ requires Mem(underlyingBuf) @@ -219,7 +226,7 @@ func (p *rawPath) SerializeTo(b []byte /*@, ghost underlyingBuf []byte @*/) (e e } // @ requires p.NonInitMem() -// @ preserves acc(sl.AbsSlice_Bytes(b, 0, len(b)), R40) +// @ preserves acc(sl.AbsSlice_Bytes(b, 0, len(b)), R42) // @ ensures p.Mem(b) // @ ensures e == nil // @ decreases diff --git a/pkg/slayers/path/path_spec.gobra b/pkg/slayers/path/path_spec.gobra index f621c1f6d..060edeb82 100644 --- a/pkg/slayers/path/path_spec.gobra +++ b/pkg/slayers/path/path_spec.gobra @@ -37,6 +37,13 @@ func (p *rawPath) DowngradePerm(ghost buf []byte) { fold p.NonInitMem() } +ghost +pure +decreases +func (p *rawPath) IsValidResultOfDecoding(b []byte, err error) (res bool) { + return true +} + (*rawPath) implements Path /** End of rawPath spec **/ diff --git a/pkg/slayers/path/scion/base.go b/pkg/slayers/path/scion/base.go index 003a16d38..7ab5bb869 100644 --- a/pkg/slayers/path/scion/base.go +++ b/pkg/slayers/path/scion/base.go @@ -85,6 +85,8 @@ type Base struct { // @ ensures r == nil ==> // @ s.Mem() && s.DecodeFromBytesSpec(data) && s.InfsMatchHfs() // @ ensures len(data) < MetaLen ==> r != nil +// posts for IO: +// @ ensures r == nil ==> s.GetBase().EqAbsHeader(data) // @ decreases func (s *Base) DecodeFromBytes(data []byte) (r error) { // PathMeta takes care of bounds check. @@ -140,7 +142,6 @@ func (s *Base) DecodeFromBytes(data []byte) (r error) { //@ assume int(s.PathMeta.SegLen[i]) >= 0 s.NumHops += int(s.PathMeta.SegLen[i]) } - // We must check the validity of NumHops. It is possible to fit more than 64 hops in // the length of a scion header. Yet a path of more than 64 hops cannot be followed to // the end because CurrHF is only 6 bits long. @@ -148,6 +149,8 @@ func (s *Base) DecodeFromBytes(data []byte) (r error) { //@ defer fold s.NonInitMem() return serrors.New("NumHops too large", "NumHops", s.NumHops, "Maximum", MaxHops) } + //@ assert s.PathMeta.EqAbsHeader(data) + //@ assert s.EqAbsHeader(data) //@ fold s.Mem() return nil } diff --git a/pkg/slayers/path/scion/decoded.go b/pkg/slayers/path/scion/decoded.go index 979289781..0e98a614c 100644 --- a/pkg/slayers/path/scion/decoded.go +++ b/pkg/slayers/path/scion/decoded.go @@ -37,7 +37,7 @@ type Decoded struct { // DecodeFromBytes fully decodes the SCION path into the corresponding fields. // @ requires s.NonInitMem() -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) +// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R42) // @ ensures r == nil ==> ( // @ s.Mem(data) && // @ let lenD := len(data) in @@ -68,7 +68,7 @@ func (s *Decoded) DecodeFromBytes(data []byte) (r error) { offset := MetaLen s.InfoFields = make([]path.InfoField, ( /*@ unfolding s.Base.Mem() in @*/ s.NumINF)) //@ assert len(data) >= MetaLen + s.Base.GetNumINF() * path.InfoLen + s.Base.GetNumHops() * path.HopLen - //@ sl.SplitByIndex_Bytes(data, 0, len(data), offset, R41) + //@ sl.SplitByIndex_Bytes(data, 0, len(data), offset, R43) //@ invariant acc(&s.InfoFields) //@ invariant acc(s.Base.Mem(), R1) @@ -77,20 +77,20 @@ func (s *Decoded) DecodeFromBytes(data []byte) (r error) { //@ invariant len(data) >= MetaLen + s.Base.GetNumINF() * path.InfoLen + s.Base.GetNumHops() * path.HopLen //@ invariant offset == MetaLen + i * path.InfoLen //@ invariant forall j int :: { &s.InfoFields[j] } 0 <= j && j < s.Base.GetNumINF() ==> acc(&s.InfoFields[j]) - //@ invariant acc(sl.AbsSlice_Bytes(data, 0, offset), R41) - //@ invariant acc(sl.AbsSlice_Bytes(data, offset, len(data)), R41) + //@ invariant acc(sl.AbsSlice_Bytes(data, 0, offset), R43) + //@ invariant acc(sl.AbsSlice_Bytes(data, offset, len(data)), R43) //@ decreases s.Base.GetNumINF() - i for i := 0; i < /*@ unfolding acc(s.Base.Mem(), _) in @*/ s.NumINF; i++ { - //@ sl.SplitByIndex_Bytes(data, offset, len(data), offset + path.InfoLen, R41) - //@ sl.Reslice_Bytes(data, offset, offset + path.InfoLen, R41) + //@ sl.SplitByIndex_Bytes(data, offset, len(data), offset + path.InfoLen, R43) + //@ sl.Reslice_Bytes(data, offset, offset + path.InfoLen, R43) if err := s.InfoFields[i].DecodeFromBytes(data[offset : offset+path.InfoLen]); err != nil { // (VerifiedSCION) infofield.DecodeFromBytes guarantees that err == nil. // Thus, this branch is not reachable. return err } //@ assert len(data[offset:offset+path.InfoLen]) == path.InfoLen - //@ sl.Unslice_Bytes(data, offset, offset + path.InfoLen, R41) - //@ sl.CombineAtIndex_Bytes(data, 0, offset + path.InfoLen, offset, R41) + //@ sl.Unslice_Bytes(data, offset, offset + path.InfoLen, R43) + //@ sl.CombineAtIndex_Bytes(data, 0, offset + path.InfoLen, offset, R43) offset += path.InfoLen } s.HopFields = make([]path.HopField, ( /*@ unfolding s.Base.Mem() in @*/ s.NumHops)) @@ -102,23 +102,23 @@ func (s *Decoded) DecodeFromBytes(data []byte) (r error) { //@ invariant forall j int :: { &s.HopFields[j] } 0 <= j && j < i ==> s.HopFields[j].Mem() //@ invariant len(data) >= MetaLen + s.Base.GetNumINF() * path.InfoLen + s.Base.GetNumHops() * path.HopLen //@ invariant offset == MetaLen + s.Base.GetNumINF() * path.InfoLen + i * path.HopLen - //@ invariant acc(sl.AbsSlice_Bytes(data, 0, offset), R41) - //@ invariant acc(sl.AbsSlice_Bytes(data, offset, len(data)), R41) + //@ invariant acc(sl.AbsSlice_Bytes(data, 0, offset), R43) + //@ invariant acc(sl.AbsSlice_Bytes(data, offset, len(data)), R43) //@ decreases s.Base.GetNumHops() - i for i := 0; i < /*@ unfolding acc(s.Base.Mem(), R2) in @*/ s.NumHops; i++ { - //@ sl.SplitByIndex_Bytes(data, offset, len(data), offset + path.HopLen, R41) - //@ sl.Reslice_Bytes(data, offset, offset + path.HopLen, R41) + //@ sl.SplitByIndex_Bytes(data, offset, len(data), offset + path.HopLen, R43) + //@ sl.Reslice_Bytes(data, offset, offset + path.HopLen, R43) if err := s.HopFields[i].DecodeFromBytes(data[offset : offset+path.HopLen]); err != nil { // (VerifiedSCION) infofield.DecodeFromBytes guarantees that err == nil. // Thus, this branch should not be reachable. return err } //@ assert len(data[offset:offset+path.HopLen]) == path.HopLen - //@ sl.Unslice_Bytes(data, offset, offset + path.HopLen, R41) - //@ sl.CombineAtIndex_Bytes(data, 0, offset + path.HopLen, offset, R41) + //@ sl.Unslice_Bytes(data, offset, offset + path.HopLen, R43) + //@ sl.CombineAtIndex_Bytes(data, 0, offset + path.HopLen, offset, R43) offset += path.HopLen } - //@ sl.CombineAtIndex_Bytes(data, 0, len(data), offset, R41) + //@ sl.CombineAtIndex_Bytes(data, 0, len(data), offset, R43) //@ fold s.Mem(data) return nil } diff --git a/pkg/slayers/path/scion/decoded_spec.gobra b/pkg/slayers/path/scion/decoded_spec.gobra index fda3419f3..c69d1355a 100644 --- a/pkg/slayers/path/scion/decoded_spec.gobra +++ b/pkg/slayers/path/scion/decoded_spec.gobra @@ -22,6 +22,13 @@ import ( "github.com/scionproto/scion/verification/utils/slices" ) +ghost +pure +decreases +func (p *Decoded) IsValidResultOfDecoding(b []byte, err error) (res bool) { + return true +} + (*Decoded) implements path.Path /**** Predicates ****/ diff --git a/pkg/slayers/path/scion/raw.go b/pkg/slayers/path/scion/raw.go index a3ba6a3fb..afd4fb17c 100644 --- a/pkg/slayers/path/scion/raw.go +++ b/pkg/slayers/path/scion/raw.go @@ -35,9 +35,12 @@ type Raw struct { // DecodeFromBytes only decodes the PathMetaHeader. Otherwise the nothing is decoded and simply kept // as raw bytes. // @ requires s.NonInitMem() -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) +// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R42) // @ ensures res == nil ==> s.Mem(data) // @ ensures res != nil ==> (s.NonInitMem() && res.ErrorMem()) +// posts for IO: +// @ ensures res == nil ==> s.EqAbsHeader(data) && +// @ s.InfsMatchHfs(data) && s.SegsInBounds(data) // @ decreases func (s *Raw) DecodeFromBytes(data []byte) (res error) { //@ unfold s.NonInitMem() @@ -325,40 +328,49 @@ func (s *Raw) IncPath( /*@ ghost ubuf []byte @*/ ) (r error) { } // GetInfoField returns the InfoField at a given index. -// @ requires acc(s.Mem(ubuf), R10) // @ requires 0 <= idx // @ preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R10) -// @ ensures acc(s.Mem(ubuf), R10) -// @ ensures (idx < old(s.GetNumINF(ubuf))) == (err == nil) +// @ preserves acc(s.Mem(ubuf), R10) +// @ ensures (idx < s.GetNumINF(ubuf)) == (err == nil) +// @ ensures err == nil ==> s.CorrectlyDecodedInfWithIdx(ubuf, idx, ifield) // @ ensures err != nil ==> err.ErrorMem() // @ decreases func (s *Raw) GetInfoField(idx int /*@, ghost ubuf []byte @*/) (ifield path.InfoField, err error) { - //@ unfold acc(s.Mem(ubuf), R10) - //@ unfold acc(s.Base.Mem(), R11) + //@ unfold acc(s.Mem(ubuf), R11) + //@ unfold acc(s.Base.Mem(), R12) if idx >= s.NumINF { e := serrors.New("InfoField index out of bounds", "max", s.NumINF-1, "actual", idx) - //@ fold acc(s.Base.Mem(), R11) - //@ fold acc(s.Mem(ubuf), R10) + //@ fold acc(s.Base.Mem(), R12) + //@ fold acc(s.Mem(ubuf), R11) return path.InfoField{}, e } - //@ fold acc(s.Base.Mem(), R11) - //@ fold acc(s.Mem(ubuf), R10) + //@ fold acc(s.Base.Mem(), R12) infOffset := MetaLen + idx*path.InfoLen info /*@@@*/ := path.InfoField{} - //@ s.RawRangePerm(ubuf, infOffset, infOffset+path.InfoLen, R10) + //@ sl.SplitRange_Bytes(ubuf, infOffset, infOffset+path.InfoLen, R20) if err := info.DecodeFromBytes(s.Raw[infOffset : infOffset+path.InfoLen]); err != nil { //@ Unreachable() return path.InfoField{}, err } - //@ s.UndoRawRangePerm(ubuf, infOffset, infOffset+path.InfoLen, R10) + //@ sl.CombineRange_Bytes(ubuf, infOffset, infOffset+path.InfoLen, R21) + //@ unfold acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R56) + //@ unfold acc(sl.AbsSlice_Bytes(ubuf[infOffset : infOffset+path.InfoLen], 0, path.InfoLen), R56) + //@ assert info.ToIntermediateAbsInfoField() == + //@ path.BytesToIntermediateAbsInfoField(ubuf, 0, infOffset, len(ubuf)) + //@ fold acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R56) + //@ fold acc(sl.AbsSlice_Bytes(ubuf[infOffset : infOffset+path.InfoLen], 0, path.InfoLen), R56) + //@ sl.CombineRange_Bytes(ubuf, infOffset, infOffset+path.InfoLen, R21) + //@ fold acc(s.Mem(ubuf), R11) + //@ assert reveal s.CorrectlyDecodedInfWithIdx(ubuf, idx, info) return info, nil } // GetCurrentInfoField is a convenience method that returns the current hop field pointed to by the // CurrINF index in the path meta header. // @ preserves acc(s.Mem(ubuf), R8) -// @ preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R2) -// @ ensures (r == nil) == (s.GetCurrINF(ubuf) < s.GetNumINF(ubuf)) +// @ preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R9) +// @ ensures (r == nil) == s.ValidCurrINF(ubuf) +// @ ensures r == nil ==> s.CorrectlyDecodedInf(ubuf, res) // @ ensures r != nil ==> r.ErrorMem() // @ decreases func (s *Raw) GetCurrentInfoField( /*@ ghost ubuf []byte @*/ ) (res path.InfoField, r error) { @@ -370,6 +382,8 @@ func (s *Raw) GetCurrentInfoField( /*@ ghost ubuf []byte @*/ ) (res path.InfoFie //@ assume 0 <= idx //@ fold acc(s.Base.Mem(), R10) //@ fold acc(s.Mem(ubuf), R9) + //@ assert forall res path.InfoField :: {s.CorrectlyDecodedInf(ubuf, res)} s.ValidCurrINF(ubuf) ==> + //@ reveal s.CorrectlyDecodedInf(ubuf, res) == reveal s.CorrectlyDecodedInfWithIdx(ubuf, idx, res) return s.GetInfoField(idx /*@, ubuf @*/) } @@ -436,39 +450,49 @@ func (s *Raw) SetInfoField(info path.InfoField, idx int /*@, ghost ubuf []byte @ // GetHopField returns the HopField at a given index. // @ requires 0 <= idx -// @ preserves acc(s.Mem(ubuf), R10) // @ preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R10) -// @ ensures (idx < old(s.GetNumHops(ubuf))) == (r == nil) +// @ preserves acc(s.Mem(ubuf), R10) +// @ ensures (idx < s.GetNumHops(ubuf)) == (r == nil) +// @ ensures r == nil ==> s.CorrectlyDecodedHfWithIdx(ubuf, idx, res) // @ ensures r != nil ==> r.ErrorMem() // @ decreases func (s *Raw) GetHopField(idx int /*@, ghost ubuf []byte @*/) (res path.HopField, r error) { - //@ unfold acc(s.Mem(ubuf), R10) - //@ unfold acc(s.Base.Mem(), R11) + //@ unfold acc(s.Mem(ubuf), R11) + //@ unfold acc(s.Base.Mem(), R12) if idx >= s.NumHops { err := serrors.New("HopField index out of bounds", "max", s.NumHops-1, "actual", idx) - //@ fold acc(s.Base.Mem(), R11) - //@ fold acc(s.Mem(ubuf), R10) + //@ fold acc(s.Base.Mem(), R12) + //@ fold acc(s.Mem(ubuf), R11) return path.HopField{}, err } hopOffset := MetaLen + s.NumINF*path.InfoLen + idx*path.HopLen - //@ fold acc(s.Base.Mem(), R11) - //@ fold acc(s.Mem(ubuf), R10) + //@ fold acc(s.Base.Mem(), R12) hop /*@@@*/ := path.HopField{} - //@ s.RawRangePerm(ubuf, hopOffset, hopOffset+path.HopLen, R10) + //@ sl.SplitRange_Bytes(ubuf, hopOffset, hopOffset+path.HopLen, R20) if err := hop.DecodeFromBytes(s.Raw[hopOffset : hopOffset+path.HopLen]); err != nil { //@ Unreachable() return path.HopField{}, err } - //@ s.UndoRawRangePerm(ubuf, hopOffset, hopOffset+path.HopLen, R10) //@ unfold hop.Mem() + //@ sl.CombineRange_Bytes(ubuf, hopOffset, hopOffset+path.HopLen, R21) + //@ unfold acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R56) + //@ unfold acc(sl.AbsSlice_Bytes(ubuf[hopOffset : hopOffset+path.HopLen], 0, path.HopLen), R56) + //@ assert hop.ToIO_HF() == + //@ path.BytesToIO_HF(ubuf, 0, hopOffset, len(ubuf)) + //@ fold acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R56) + //@ fold acc(sl.AbsSlice_Bytes(ubuf[hopOffset : hopOffset+path.HopLen], 0, path.HopLen), R56) + //@ sl.CombineRange_Bytes(ubuf, hopOffset, hopOffset+path.HopLen, R21) + //@ fold acc(s.Mem(ubuf), R11) + //@ assert reveal s.CorrectlyDecodedHfWithIdx(ubuf, idx, hop) return hop, nil } // GetCurrentHopField is a convenience method that returns the current hop field pointed to by the // CurrHF index in the path meta header. // @ preserves acc(s.Mem(ubuf), R8) -// @ preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R2) -// @ ensures (r == nil) == (s.GetCurrHF(ubuf) < s.GetNumHops(ubuf)) +// @ preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R9) +// @ ensures (r == nil) == s.ValidCurrHF(ubuf) +// @ ensures r == nil ==> s.CorrectlyDecodedHf(ubuf, res) // @ ensures r != nil ==> r.ErrorMem() // @ decreases func (s *Raw) GetCurrentHopField( /*@ ghost ubuf []byte @*/ ) (res path.HopField, r error) { @@ -480,6 +504,8 @@ func (s *Raw) GetCurrentHopField( /*@ ghost ubuf []byte @*/ ) (res path.HopField //@ assume 0 <= idx //@ fold acc(s.Base.Mem(), R10) //@ fold acc(s.Mem(ubuf), R9) + //@ assert forall res path.HopField :: {s.CorrectlyDecodedHf(ubuf, res)} s.ValidCurrHF(ubuf) ==> + //@ reveal s.CorrectlyDecodedHf(ubuf, res) == reveal s.CorrectlyDecodedHfWithIdx(ubuf, idx, res) return s.GetHopField(idx /*@, ubuf @*/) } diff --git a/pkg/slayers/path/scion/raw_spec.gobra b/pkg/slayers/path/scion/raw_spec.gobra index 36018256a..a2936b719 100644 --- a/pkg/slayers/path/scion/raw_spec.gobra +++ b/pkg/slayers/path/scion/raw_spec.gobra @@ -41,6 +41,16 @@ pred (s *Raw) Mem(buf []byte) { (*Raw) implements path.Path +ghost +pure +requires acc(s.Mem(buf), _) +requires acc(sl.AbsSlice_Bytes(buf, 0, len(buf)), R42) +decreases +func (s *Raw) IsValidResultOfDecoding(buf []byte, err error) (res bool) { + return s.EqAbsHeader(buf) && + s.InfsMatchHfs(buf) && s.SegsInBounds(buf) +} + /**** Stubs ****/ /** * This method is not part of the original SCION codebase. @@ -125,6 +135,22 @@ pure func (s *Raw) ValidCurrIdxs(ghost ub []byte) bool { s.Base.ValidCurrIdxs() } +ghost +requires acc(s.Mem(ub), _) +decreases +pure func (s *Raw) InfsMatchHfs(ghost ub []byte) bool { + return unfolding acc(s.Mem(ub), _) in + s.Base.InfsMatchHfs() +} + +ghost +requires acc(s.Mem(ub), _) +decreases +pure func (s *Raw) CurrInfMatchesCurrHF(ghost ub []byte) bool { + return unfolding acc(s.Mem(ub), _) in + s.Base.GetBase().CurrInfMatchesCurrHF() +} + ghost requires acc(s.Mem(ub), _) requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) @@ -145,10 +171,10 @@ pure func (s *Raw) GetIsXoverSpec(ub []byte) bool { ghost requires acc(s.Mem(ub), _) decreases -pure func (s *Raw) InBounds(ub []byte) bool { +pure func (s *Raw) SegsInBounds(ub []byte) bool { return unfolding acc(s.Mem(ub), _) in unfolding acc(s.Base.Mem(), _) in - s.PathMeta.InBounds() + s.PathMeta.SegsInBounds() } /**** End of Stubs ****/ @@ -326,7 +352,7 @@ func (s *Raw) RawBufferNonInitMem() []byte { ghost decreases pure func NumInfoFields(seg1Len int, seg2Len int, seg3Len int) int { - return seg3Len > 0 ? 3 : (seg2Len > 0 ? 2 : 1) + return seg3Len > 0 ? 3 : (seg2Len > 0 ? 2 : (seg1Len > 0 ? 1 : 0)) } ghost @@ -616,6 +642,36 @@ func ValidPktMetaHdrSublice(raw []byte, idx int) { fold acc(sl.AbsSlice_Bytes(raw[:idx], 0, idx), R56) } +ghost +requires acc(s.Mem(ub), R54) +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R55) +requires s.InfsMatchHfs(ub) +requires s.ValidCurrINF(ub) +requires s.ValidCurrHF(ub) +requires s.SegsInBounds(ub) +requires s.CurrInfMatchesCurrHF(ub) +requires s.EqAbsHeader(ub) +ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R55) +ensures acc(s.Mem(ub), R54) +ensures validPktMetaHdr(ub) +ensures s.EqAbsHeader(ub) +decreases +func (s *Raw) EstablishValidPktMetaHdr(ghost ub []byte) { + unfold acc(s.Mem(ub), R55) + unfold acc(s.Base.Mem(), R56) + assert MetaLen <= len(ub) + assert s.Base.GetBase() == RawBytesToBase(ub) + seg1 := int(s.Base.PathMeta.SegLen[0]) + seg2 := int(s.Base.PathMeta.SegLen[1]) + seg3 := int(s.Base.PathMeta.SegLen[2]) + assert 0 < seg1 + assert s.ValidCurrIdxs(ub) + assert pktLen(seg1, seg2, seg3, MetaLen) <= len(ub) + assert reveal validPktMetaHdr(ub) + fold acc(s.Base.Mem(), R56) + fold acc(s.Mem(ub), R55) +} + ghost decreases pure func AbsSetInfoField(oldPkt io.IO_pkt2, info path.IntermediateAbsInfoField) (newPkt io.IO_pkt2) { @@ -678,6 +734,65 @@ pure func (s *Raw) IsLastHopSpec(ub []byte) bool { int(s.PathMeta.CurrHF) == (s.NumHops - 1) } +ghost +opaque +requires acc(s.Mem(ub), _) +requires 0 <= idx && idx < s.GetNumINF(ub) +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) +decreases +pure func (s *Raw) CorrectlyDecodedInfWithIdx(ub []byte, idx int, info path.InfoField) bool { + return unfolding acc(s.Mem(ub), _) in + unfolding acc(s.Base.Mem(), _) in + let infOffset := MetaLen + idx*path.InfoLen in + infOffset+path.InfoLen <= len(ub) && + info.ToIntermediateAbsInfoField() == + path.BytesToIntermediateAbsInfoField(ub, 0, infOffset, len(ub)) +} + +ghost +opaque +requires acc(s.Mem(ub), _) +requires s.ValidCurrINF(ub) +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) +decreases +pure func (s *Raw) CorrectlyDecodedInf(ub []byte, info path.InfoField) bool { + return unfolding acc(s.Mem(ub), _) in + unfolding acc(s.Base.Mem(), _) in + let infOffset := MetaLen + int(s.Base.PathMeta.CurrINF)*path.InfoLen in + infOffset+path.InfoLen <= len(ub) && + info.ToIntermediateAbsInfoField() == + path.BytesToIntermediateAbsInfoField(ub, 0, infOffset, len(ub)) +} + +ghost +opaque +requires acc(s.Mem(ub), _) +requires 0 <= idx && idx < s.GetNumHops(ub) +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) +decreases +pure func (s *Raw) CorrectlyDecodedHfWithIdx(ub []byte, idx int, hop path.HopField) bool { + return unfolding acc(s.Mem(ub), _) in + unfolding acc(s.Base.Mem(), _) in + let hopOffset := MetaLen + int(s.NumINF)*path.InfoLen + idx*path.HopLen in + hopOffset+path.HopLen <= len(ub) && + hop.ToIO_HF() == path.BytesToIO_HF(ub, 0, hopOffset, len(ub)) +} + +ghost +opaque +requires acc(s.Mem(ub), _) +requires s.ValidCurrHF(ub) +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) +decreases +pure func (s *Raw) CorrectlyDecodedHf(ub []byte, hop path.HopField) bool { + return unfolding acc(s.Mem(ub), _) in + unfolding acc(s.Base.Mem(), _) in + let hopOffset := MetaLen + int(s.NumINF)*path.InfoLen + + int(s.Base.PathMeta.CurrHF)*path.HopLen in + hopOffset+path.HopLen <= len(ub) && + hop.ToIO_HF() == path.BytesToIO_HF(ub, 0, hopOffset, len(ub)) +} + ghost preserves acc(s.Mem(ubuf), R55) preserves s.IsLastHopSpec(ubuf) @@ -731,6 +846,68 @@ func (s *Raw) XoverLemma(ubuf []byte) { assert pkt.LeftSeg != none[io.IO_seg2] } +ghost +opaque +requires len(pkt.CurrSeg.Future) > 0 +decreases +pure func (s *Raw) EqAbsHopField(pkt io.IO_pkt2, hop io.IO_HF) bool { + return let currHF := pkt.CurrSeg.Future[0] in + hop.InIF2 == currHF.InIF2 && + hop.EgIF2 == currHF.EgIF2 && + hop.HVF == currHF.HVF +} + +ghost +opaque +decreases +pure func (s *Raw) EqAbsInfoField(pkt io.IO_pkt2, info path.IntermediateAbsInfoField) bool { + return let currseg := pkt.CurrSeg in + info.AInfo == currseg.AInfo && + info.UInfo == currseg.UInfo && + info.ConsDir == currseg.ConsDir && + info.Peer == currseg.Peer +} + +ghost +preserves acc(s.Mem(ubuf), R53) +preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R53) +preserves validPktMetaHdr(ubuf) +preserves s.EqAbsHeader(ubuf) +preserves len(s.absPkt(ubuf).CurrSeg.Future) > 0 +preserves s.ValidCurrINF(ubuf) +preserves s.ValidCurrHF(ubuf) +preserves s.CorrectlyDecodedInf(ubuf, info) +preserves s.CorrectlyDecodedHf(ubuf, hop) +ensures s.EqAbsInfoField(s.absPkt(ubuf), info.ToIntermediateAbsInfoField()) +ensures s.EqAbsHopField(s.absPkt(ubuf), hop.ToIO_HF()) +decreases +func (s *Raw) DecodingLemma(ubuf []byte, info path.InfoField, hop path.HopField) { + reveal validPktMetaHdr(ubuf) + metaHdr := RawBytesToMetaHdr(ubuf) + currInfIdx := int(metaHdr.CurrINF) + currHfIdx := int(metaHdr.CurrHF) + seg1Len := int(metaHdr.SegLen[0]) + seg2Len := int(metaHdr.SegLen[1]) + seg3Len := int(metaHdr.SegLen[2]) + segLen := LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) + prevSegLen := LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) + numINF := NumInfoFields(seg1Len, seg2Len, seg3Len) + offset := HopFieldOffset(numINF, 0, MetaLen) + reveal s.CorrectlyDecodedInf(ubuf, info) + reveal s.CorrectlyDecodedHf(ubuf, hop) + pkt := reveal s.absPkt(ubuf) + currseg := reveal CurrSeg(ubuf, offset + path.HopLen * prevSegLen, currInfIdx, currHfIdx-prevSegLen, segLen, MetaLen) + hopFields := hopFields(ubuf, offset + path.HopLen * prevSegLen, 0, segLen) + hopFieldsBytePositionsLemma(ubuf, offset + path.HopLen * prevSegLen, 0, segLen, R54) + reveal hopFieldsBytePositions(ubuf, offset + path.HopLen * prevSegLen, 0, segLen, hopFields) + assert currseg.Future[0] == hopFields[currHfIdx-prevSegLen] + assert hopFields[currHfIdx-prevSegLen] == + path.BytesToIO_HF(ubuf, 0, offset + path.HopLen * currHfIdx, len(ubuf)) + assert currseg.Future[0] == path.BytesToIO_HF(ubuf, 0, offset + path.HopLen * currHfIdx, len(ubuf)) + assert reveal s.EqAbsInfoField(s.absPkt(ubuf), info.ToIntermediateAbsInfoField()) + assert reveal s.EqAbsHopField(s.absPkt(ubuf), hop.ToIO_HF()) +} + ghost requires path.InfoFieldOffset(currInfIdx, 0) + path.InfoLen <= offset requires 0 < segLen @@ -879,4 +1056,41 @@ func XoverRightSeg(raw []byte, currInfIdx int, currHfIdx int, seg1Len int, seg2L assert absIncPathSeg(currseg) == nextseg assert nextseg == get(rightseg) assert absIncPathSeg(currseg) == get(rightseg) +} + +ghost +opaque +requires 0 <= offset +requires 0 <= currHFIdx && currHFIdx <= segLen +requires len(hops) == segLen - currHFIdx +requires offset + path.HopLen * segLen <= len(raw) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +decreases +pure func hopFieldsBytePositions(raw []byte, offset int, currHFIdx int, segLen int, hops seq[io.IO_HF]) bool { + return forall i int :: { hops[i] } 0 <= i && i < len(hops) ==> + hops[i] == path.BytesToIO_HF(raw, 0, offset + path.HopLen * (currHFIdx + i), len(raw)) +} + +ghost +requires R55 < p +requires 0 <= offset +requires 0 <= currHFIdx && currHFIdx <= segLen +requires offset + path.HopLen * segLen <= len(raw) +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), p) +ensures hopFieldsBytePositions(raw, offset, currHFIdx, segLen, hopFields(raw, offset, currHFIdx, segLen)) +decreases segLen - currHFIdx +func hopFieldsBytePositionsLemma( + raw []byte, + offset int, + currHFIdx int, + segLen int, + p perm) { + newP := (p + R55)/2 + hopfields := hopFields(raw, offset, currHFIdx, segLen) + if (currHFIdx != segLen) { + hopFieldsBytePositionsLemma(raw, offset, currHFIdx + 1, segLen, newP) + hopfieldsInc := hopFields(raw, offset, currHFIdx + 1, segLen) + assert reveal hopFieldsBytePositions(raw, offset, currHFIdx + 1, segLen, hopfieldsInc) + } + assert reveal hopFieldsBytePositions(raw, offset, currHFIdx, segLen, hopfields) } \ No newline at end of file diff --git a/pkg/slayers/scion.go b/pkg/slayers/scion.go index cd8cd906c..fb06278f6 100644 --- a/pkg/slayers/scion.go +++ b/pkg/slayers/scion.go @@ -321,9 +321,7 @@ func (s *SCION) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeO // @ preserves df != nil && df.Mem() // @ ensures res == nil ==> s.Mem(data) // @ ensures res == nil && typeOf(s.GetPath(data)) == *scion.Raw ==> -// @ ValidPktMetaHdr(data) -// @ ensures res == nil && typeOf(s.GetPath(data)) == *scion.Raw ==> -// @ s.EqAbsHeader(data) +// @ s.EqAbsHeader(data) && s.ValidScionInitSpec(data) // @ ensures res == nil ==> s.EqPathType(data) // @ ensures res != nil ==> s.NonInitMem() && res.ErrorMem() // @ decreases @@ -352,6 +350,11 @@ func (s *SCION) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res er // @ preserves CmnHdrLen <= len(data) && acc(sl.AbsSlice_Bytes(data, 0, len(data)), R41) // @ ensures s.DstAddrType.Has3Bits() && s.SrcAddrType.Has3Bits() // @ ensures 0 <= s.PathType && s.PathType < 256 + // @ ensures path.Type(GetPathType(data)) == s.PathType + // @ ensures L4ProtocolType(GetNextHdr(data)) == s.NextHdr + // @ ensures GetLength(data) == int(s.HdrLen * LineLen) + // @ ensures GetAddressOffset(data) == + // @ CmnHdrLen + 2*addr.IABytes + s.DstAddrType.Length() + s.SrcAddrType.Length() // @ decreases // @ outline( // @ unfold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R41) @@ -409,10 +412,10 @@ func (s *SCION) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res er // @ fold s.NonInitMem() return err } - // @ sl.SplitRange_Bytes(data, offset, offset+pathLen, R40) + // @ sl.SplitRange_Bytes(data, offset, offset+pathLen, R41) err = s.Path.DecodeFromBytes(data[offset : offset+pathLen]) if err != nil { - // @ sl.CombineRange_Bytes(data, offset, offset+pathLen, R40) + // @ sl.CombineRange_Bytes(data, offset, offset+pathLen, R41) // @ unfold s.HeaderMem(data[CmnHdrLen:]) // @ s.PathPoolMemExchange(s.PathType, s.Path) // @ fold s.NonInitMem() @@ -425,13 +428,21 @@ func (s *SCION) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res er // @ } s.Contents = data[:hdrBytes] s.Payload = data[hdrBytes:] - - // @ sl.CombineRange_Bytes(data, offset, offset+pathLen, R40) - // @ fold s.Mem(data) - - // @ TemporaryAssumeForIO(typeOf(s.GetPath(data)) == *scion.Raw ==> ValidPktMetaHdr(data)) - // @ TemporaryAssumeForIO(typeOf(s.GetPath(data)) == *scion.Raw ==> s.EqAbsHeader(data)) - // @ TemporaryAssumeForIO(s.EqPathType(data)) + // @ fold acc(s.Mem(data), R54) + // @ ghost if(typeOf(s.GetPath(data)) == (*scion.Raw)) { + // @ unfold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R56) + // @ unfold acc(sl.AbsSlice_Bytes(data[offset : offset+pathLen], 0, len(data[offset : offset+pathLen])), R56) + // @ unfold acc(s.Path.(*scion.Raw).Mem(data[offset : offset+pathLen]), R55) + // @ assert reveal s.EqAbsHeader(data) + // @ assert reveal s.ValidScionInitSpec(data) + // @ fold acc(s.Path.Mem(data[offset : offset+pathLen]), R55) + // @ fold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R56) + // @ fold acc(sl.AbsSlice_Bytes(data[offset : offset+pathLen], 0, len(data[offset : offset+pathLen])), R56) + // @ } + // @ sl.CombineRange_Bytes(data, offset, offset+pathLen, R41) + // @ assert typeOf(s.GetPath(data)) == *scion.Raw ==> s.EqAbsHeader(data) && s.ValidScionInitSpec(data) + // @ assert reveal s.EqPathType(data) + // @ fold acc(s.Mem(data), 1-R54) return nil } diff --git a/pkg/slayers/scion_spec.gobra b/pkg/slayers/scion_spec.gobra index d4125611c..7834eb78a 100644 --- a/pkg/slayers/scion_spec.gobra +++ b/pkg/slayers/scion_spec.gobra @@ -413,10 +413,10 @@ func (s *SCION) EqAbsHeader(ub []byte) bool { let low := CmnHdrLen+s.AddrHdrLenSpecInternal() in let high := s.HdrLen*LineLen in GetAddressOffset(ub) == low && - GetLength(ub) == int(high) && + GetLength(ub) == int(high) && // Might be worth introducing EqAbsHeader as an interface method on Path // to avoid doing these casts, especially when we add support for EPIC. - typeOf(s.Path) == (*scion.Raw) && + typeOf(s.Path) == (*scion.Raw) && unfolding acc(s.Path.Mem(ub[low:high]), _) in unfolding acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) in let _ := Asserting(forall k int :: {&ub[low:high][k]} 0 <= k && k < high ==> @@ -424,13 +424,29 @@ func (s *SCION) EqAbsHeader(ub []byte) bool { let _ := Asserting(forall k int :: {&ub[low:high][:scion.MetaLen][k]} 0 <= k && k < scion.MetaLen ==> &ub[low:high][:scion.MetaLen][k] == &ub[low:high][k]) in let metaHdr := scion.DecodedFrom(binary.BigEndian.Uint32(ub[low:high][:scion.MetaLen])) in - let seg1 := int(metaHdr.SegLen[0]) in - let seg2 := int(metaHdr.SegLen[1]) in - let seg3 := int(metaHdr.SegLen[2]) in + let seg1 := int(metaHdr.SegLen[0]) in + let seg2 := int(metaHdr.SegLen[1]) in + let seg3 := int(metaHdr.SegLen[2]) in s.Path.(*scion.Raw).Base.GetBase() == scion.Base{metaHdr, scion.NumInfoFields(seg1, seg2, seg3), seg1 + seg2 + seg3} } +// Describes a SCION packet that was successfully decoded by `DecodeFromBytes`. +ghost +opaque +pure +requires acc(s.Mem(ub), _) +decreases +func (s *SCION) ValidScionInitSpec(ub []byte) bool { + return unfolding acc(s.Mem(ub), _) in + let low := CmnHdrLen+s.AddrHdrLenSpecInternal() in + let high := s.HdrLen*LineLen in + typeOf(s.Path) == (*scion.Raw) && + unfolding acc(s.Path.Mem(ub[low:high]), _) in + s.Path.(*scion.Raw).Base.InfsMatchHfs() && + s.Path.(*scion.Raw).Base.GetMetaHdr().SegsInBounds() +} + // Checks if the common path header is valid in the serialized scion packet. ghost opaque diff --git a/router/dataplane.go b/router/dataplane.go index 039bfeb4c..1e41785eb 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -1743,8 +1743,8 @@ func (p *scionPacketProcessor) processIntraBFD(data []byte) (res error) { // contracts for IO-spec // @ requires p.d.DpAgreesWithSpec(dp) // @ requires dp.Valid() -// @ requires (typeOf(p.scionLayer.GetPath(ub)) == *scion.Raw) ==> slayers.ValidPktMetaHdr(ub) -// @ requires (typeOf(p.scionLayer.GetPath(ub)) == *scion.Raw) ==> p.scionLayer.EqAbsHeader(ub) +// @ requires (typeOf(p.scionLayer.GetPath(ub)) == *scion.Raw) ==> +// @ p.scionLayer.EqAbsHeader(ub) && p.scionLayer.ValidScionInitSpec(ub) // @ requires p.scionLayer.EqPathType(ub) // @ requires acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; // @ requires let absPkt := absIO_val(p.rawPkt, p.ingressID) in @@ -1907,8 +1907,8 @@ func (p *scionPacketProcessor) packSCMP( // @ requires acc(&p.path, R20) // @ requires p.path === p.scionLayer.GetPath(ub) // @ requires acc(&p.hopField) && acc(&p.infoField) -// @ requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R1) -// @ ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R1) +// @ requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R1) +// @ ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R1) // @ ensures acc(&p.d, R50) // @ ensures acc(p.scionLayer.Mem(ub), R6) // @ ensures acc(&p.path, R20) @@ -1927,8 +1927,7 @@ func (p *scionPacketProcessor) packSCMP( // @ p.path.GetCurrINF(ubPath) < p.path.GetNumINF(ubPath)) // @ ensures reserr != nil ==> reserr.ErrorMem() // contracts for IO-spec -// @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) -// @ requires len(absPkt(ub).CurrSeg.Future) > 0 +// @ requires p.scionLayer.EqAbsHeader(ub) && p.scionLayer.ValidScionInitSpec(ub) // @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) // @ ensures reserr == nil ==> len(absPkt(ub).CurrSeg.Future) > 0 // @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(ub)) @@ -1945,6 +1944,9 @@ func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte @*/ ) (respr proce // @ sl.SplitRange_Bytes(ub, startP, endP, R2) // @ ghost defer sl.CombineRange_Bytes(ub, startP, endP, R2) p.hopField, err = p.path.GetCurrentHopField( /*@ ubPath @*/ ) + // (VerifiedSCION) TODO: This is directly the postcondition of the call above and + // should be true but due to an incompleteness we have to assume it for now + // @ TemporaryAssumeForIO(err == nil ==> p.path.CorrectlyDecodedHf(ubPath, p.hopField)) // @ fold p.d.validResult(processResult{}, false) if err != nil { // TODO(lukedirtwalker) parameter problem invalid path? @@ -1955,10 +1957,20 @@ func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte @*/ ) (respr proce // TODO(lukedirtwalker) parameter problem invalid path? return processResult{}, err } - // @ TemporaryAssumeForIO(slayers.ValidPktMetaHdr(ub)) + // (VerifiedSCION) This assumption cannot be proven at this point. + // There might be a check is missing. + // @ TemporaryAssumeForIO(p.path.CurrInfMatchesCurrHF(ubPath)) + // @ p.EstablishEqAbsHeader(ub, startP, endP) + // @ p.path.EstablishValidPktMetaHdr(ubPath) + // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP) // @ absPktFutureLemma(ub) - // @ TemporaryAssumeForIO(p.EqAbsHopField(absPkt(ub))) - // @ TemporaryAssumeForIO(p.EqAbsInfoField(absPkt(ub))) + // @ p.path.DecodingLemma(ubPath, p.infoField, p.hopField) + // @ assert reveal p.path.EqAbsInfoField(p.path.absPkt(ubPath), + // @ p.infoField.ToIntermediateAbsInfoField()) + // @ assert reveal p.path.EqAbsHopField(p.path.absPkt(ubPath), + // @ p.hopField.ToIO_HF()) + // @ assert reveal p.EqAbsHopField(absPkt(ub)) + // @ assert reveal p.EqAbsInfoField(absPkt(ub)) return processResult{}, nil } @@ -3153,7 +3165,7 @@ func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte @*/ ) ( // contracts for IO-spec // @ requires p.d.DpAgreesWithSpec(dp) // @ requires dp.Valid() -// @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) && p.scionLayer.EqPathType(ub) +// @ requires p.scionLayer.EqAbsHeader(ub) && p.scionLayer.EqPathType(ub) && p.scionLayer.ValidScionInitSpec(ub) // @ requires acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; // @ requires let absPkt := absIO_val(ub, p.ingressID) in // @ absPkt.isIO_val_Pkt2 ==> ElemWitness(ioSharedArg.IBufY, path.ifsToIO_ifs(p.ingressID), absPkt.IO_val_Pkt2_2) @@ -3167,6 +3179,10 @@ func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte @*/ ) ( // @ decreases 0 if sync.IgnoreBlockingForTermination() // @ #backend[stateConsolidationMode(6)] func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int, ghost ioLock *sync.Mutex, ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error /*@, addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { + if r, err := p.parsePath( /*@ ub @*/ ); err != nil { + // @ p.scionLayer.DowngradePerm(ub) + return r, err /*@, false, absReturnErr(r) @*/ + } // @ ghost var oldPkt io.IO_pkt2 // @ ghost if(slayers.IsSupportedPkt(ub)) { // @ absIO_valLemma(ub, p.ingressID) @@ -3176,10 +3192,6 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, // @ oldPkt = absPkt(ub) // @ } // @ nextPkt := oldPkt - if r, err := p.parsePath( /*@ ub @*/ ); err != nil { - // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false, absReturnErr(r) @*/ - } if r, err := p.validateHopExpiry(); err != nil { // @ p.scionLayer.DowngradePerm(ub) return r, err /*@, false, absReturnErr(r) @*/ @@ -4025,9 +4037,7 @@ func (p *scionPacketProcessor) prepareSCMP( // @ ensures reterr == nil ==> retl != nil // @ ensures reterr == nil ==> base.Mem(data) // @ ensures reterr == nil && typeOf(base.GetPath(data)) == *scion.Raw ==> -// @ slayers.ValidPktMetaHdr(data) -// @ ensures reterr == nil && typeOf(base.GetPath(data)) == *scion.Raw ==> -// @ base.EqAbsHeader(data) +// @ base.EqAbsHeader(data) && base.ValidScionInitSpec(data) // @ ensures reterr == nil ==> base.EqPathType(data) // @ ensures forall i int :: {&opts[i]}{processed[i]} 0 <= i && i < len(opts) ==> // @ (processed[i] ==> (0 <= offsets[i].start && offsets[i].start <= offsets[i].end && offsets[i].end <= len(data))) @@ -4063,9 +4073,7 @@ func decodeLayers(data []byte, base *slayers.SCION, // @ invariant acc(sl.AbsSlice_Bytes(oldData, 0, len(oldData)), R39) // @ invariant base.Mem(oldData) // @ invariant typeOf(base.GetPath(oldData)) == *scion.Raw ==> - // @ slayers.ValidPktMetaHdr(oldData) - // @ invariant typeOf(base.GetPath(oldData)) == *scion.Raw ==> - // @ base.EqAbsHeader(oldData) + // @ base.EqAbsHeader(oldData) && base.ValidScionInitSpec(oldData) // @ invariant base.EqPathType(oldData) // @ invariant 0 < len(opts) ==> 0 <= i0 && i0 <= len(opts) // @ invariant forall i int :: {&opts[i]} 0 <= i && i < len(opts) ==> acc(&opts[i], R10) diff --git a/router/io-spec-lemmas.gobra b/router/io-spec-lemmas.gobra index 53c7167f6..d1878e906 100644 --- a/router/io-spec-lemmas.gobra +++ b/router/io-spec-lemmas.gobra @@ -205,6 +205,43 @@ func (p *scionPacketProcessor) IngressIDNotZeroLemma(pkt io.IO_pkt2, egressID ui reveal p.NoBouncingPkt(pkt) } +ghost +requires 0 <= start && start <= end && end <= len(ub) +requires acc(p.scionLayer.Mem(ub), R55) +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R50) +requires acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R50) +requires acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) +requires p.path === p.scionLayer.GetPath(ub) +requires start == p.scionLayer.PathStartIdx(ub) +requires end == p.scionLayer.PathEndIdx(ub) +requires p.scionLayer.EqAbsHeader(ub) +requires p.scionLayer.ValidScionInitSpec(ub) +ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R50) +ensures acc(p.scionLayer.Mem(ub), R55) +ensures acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R50) +ensures acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) +ensures start == p.scionLayer.PathStartIdx(ub) +ensures end == p.scionLayer.PathEndIdx(ub) +ensures p.path.EqAbsHeader(ub[start:end]) +ensures p.path.InfsMatchHfs(ub[start:end]) +ensures p.path.SegsInBounds(ub[start:end]) +ensures p.scionLayer.ValidHeaderOffset(ub, len(ub)) +decreases +func (p* scionPacketProcessor) EstablishEqAbsHeader(ub []byte, start int, end int) { + unfold acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) + unfold acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R56) + unfold acc(p.scionLayer.Mem(ub), R56) + unfold acc(p.path.Mem(ub[start:end]), R56) + reveal p.scionLayer.EqAbsHeader(ub) + reveal p.scionLayer.ValidScionInitSpec(ub) + assert reveal p.scionLayer.ValidHeaderOffset(ub, len(ub)) + assert p.path.EqAbsHeader(ub[start:end]) + fold acc(p.path.Mem(ub[start:end]), R56) + fold acc(p.scionLayer.Mem(ub), R56) + fold acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R56) + fold acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) +} + ghost requires 0 <= start && start <= end && end <= len(ub) requires acc(p.scionLayer.Mem(ub), R55) From 2c5998716453569dad59a8c7162616770dc962f1 Mon Sep 17 00:00:00 2001 From: Markus Limbeck <92801626+mlimbeck@users.noreply.github.com> Date: Tue, 21 May 2024 20:02:20 +0200 Subject: [PATCH 38/57] Drop assumption in parsePath (#348) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * drop assumption in parsePath * Update router/dataplane.go Co-authored-by: João Pereira * rename arrayCongruence() to AbsMacArrayCongruence() --------- Co-authored-by: João Pereira --- pkg/slayers/path/io_msgterm_spec.gobra | 4 ++-- router/dataplane.go | 13 +++++++++---- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/pkg/slayers/path/io_msgterm_spec.gobra b/pkg/slayers/path/io_msgterm_spec.gobra index 64d6bb652..601db4004 100644 --- a/pkg/slayers/path/io_msgterm_spec.gobra +++ b/pkg/slayers/path/io_msgterm_spec.gobra @@ -64,7 +64,7 @@ func EqualBytesImplyEqualMac(mac1 []byte, mac2 [MacLen]byte) { mac1Arr[4] == mac2[4] && mac1Arr[5] == mac2[5] assert len(mac1Arr) == len(mac2) - arrayCongruence(mac1Arr, mac2) + AbsMacArrayCongruence(mac1Arr, mac2) } // The following obviously holds. However, for the time being, it cannot be proven due to an @@ -73,4 +73,4 @@ ghost requires mac1 == mac2 ensures AbsMac(mac1) == AbsMac(mac2) decreases -func arrayCongruence(mac1 [MacLen]byte, mac2 [MacLen]byte) \ No newline at end of file +func AbsMacArrayCongruence(mac1 [MacLen]byte, mac2 [MacLen]byte) \ No newline at end of file diff --git a/router/dataplane.go b/router/dataplane.go index 1e41785eb..d83723848 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -1943,10 +1943,15 @@ func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte @*/ ) (respr proce // @ ghost ubPath := ub[startP:endP] // @ sl.SplitRange_Bytes(ub, startP, endP, R2) // @ ghost defer sl.CombineRange_Bytes(ub, startP, endP, R2) - p.hopField, err = p.path.GetCurrentHopField( /*@ ubPath @*/ ) - // (VerifiedSCION) TODO: This is directly the postcondition of the call above and - // should be true but due to an incompleteness we have to assume it for now - // @ TemporaryAssumeForIO(err == nil ==> p.path.CorrectlyDecodedHf(ubPath, p.hopField)) + // (VerifiedSCION) Due to an incompleteness (https://github.com/viperproject/gobra/issues/770), + // we introduce a temporary variable to be able to call `path.AbsMacArrayCongruence()`. + var tmpHopField path.HopField + tmpHopField, err = p.path.GetCurrentHopField( /*@ ubPath @*/ ) + p.hopField = tmpHopField + // @ path.AbsMacArrayCongruence(p.hopField.Mac, tmpHopField.Mac) + // @ assert p.hopField.ToIO_HF() == tmpHopField.ToIO_HF() + // @ assert err == nil ==> reveal p.path.CorrectlyDecodedHf(ubPath, tmpHopField) + // @ assert err == nil ==> reveal p.path.CorrectlyDecodedHf(ubPath, p.hopField) // @ fold p.d.validResult(processResult{}, false) if err != nil { // TODO(lukedirtwalker) parameter problem invalid path? From 9dd6b9e94ba69d21e89c12f3beedc4e418b262e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Sat, 25 May 2024 10:44:00 +0200 Subject: [PATCH 39/57] Use Gobra's built-in ghost fields (#337) * add ghost fields * fix type errors so far * backup * clean-up * convert more adt types to structs * make read not trusted * Update router/dataplane_concurrency_model.gobra * cleanup * change equality * rever changes to === * clean-up --- pkg/slayers/path/hopfield_spec.gobra | 20 +-- pkg/slayers/path/scion/raw.go | 48 +++---- pkg/slayers/path/scion/raw_spec.gobra | 45 +++--- pkg/slayers/path/scion/widen-lemma.gobra | 2 +- pkg/slayers/scion.go | 12 +- router/dataplane.go | 134 +++++++++++------- router/dataplane_concurrency_model.gobra | 18 +-- router/dataplane_spec.gobra | 2 +- router/dataplane_spec_test.gobra | 6 +- router/io-spec-abstract-transitions.gobra | 63 ++++---- router/io-spec-atomic-events.gobra | 42 +++--- router/io-spec-lemmas.gobra | 4 +- router/io-spec.gobra | 6 +- router/widen-lemma.gobra | 2 +- .../dependencies/errors/errors_spec.gobra | 1 - .../x/net/internal/socket/socket.gobra | 5 +- .../dependencies/syscall/syscall_unix.gobra | 1 - verification/io/hopfields.gobra | 2 +- verification/io/io-spec.gobra | 20 +-- verification/io/other_defs.gobra | 4 +- verification/io/packets.gobra | 3 +- verification/io/router.gobra | 6 +- verification/io/router_events.gobra | 2 +- verification/io/segments.gobra | 3 +- verification/io/xover.gobra | 12 +- .../utils/ghost_sync/ghost-mutex.gobra | 60 ++++++++ 26 files changed, 307 insertions(+), 216 deletions(-) create mode 100644 verification/utils/ghost_sync/ghost-mutex.gobra diff --git a/pkg/slayers/path/hopfield_spec.gobra b/pkg/slayers/path/hopfield_spec.gobra index 311a60a66..543ea1185 100644 --- a/pkg/slayers/path/hopfield_spec.gobra +++ b/pkg/slayers/path/hopfield_spec.gobra @@ -54,19 +54,19 @@ pure func BytesToIO_HF(raw [] byte, start int, middle int, end int) (io.IO_HF) { let egif2 := binary.BigEndian.Uint16(raw[middle+4:middle+6]) in let op_inif2 := ifsToIO_ifs(inif2) in let op_egif2 := ifsToIO_ifs(egif2) in - io.IO_HF(io.IO_HF_{ - InIF2 : op_inif2, - EgIF2 : op_egif2, - HVF : AbsMac(FromSliceToMacArray(raw[middle+6:middle+6+MacLen])), - }) + io.IO_HF_ { + InIF2: op_inif2, + EgIF2: op_egif2, + HVF: AbsMac(FromSliceToMacArray(raw[middle+6:middle+6+MacLen])), + } } ghost decreases pure func (h HopField) ToIO_HF() (io.IO_HF) { - return io.IO_HF(io.IO_HF_{ - InIF2 : ifsToIO_ifs(h.ConsIngress), - EgIF2 : ifsToIO_ifs(h.ConsEgress), - HVF : AbsMac(h.Mac), - }) + return io.IO_HF_ { + InIF2: ifsToIO_ifs(h.ConsIngress), + EgIF2: ifsToIO_ifs(h.ConsEgress), + HVF: AbsMac(h.Mac), + } } diff --git a/pkg/slayers/path/scion/raw.go b/pkg/slayers/path/scion/raw.go index afd4fb17c..48998c4aa 100644 --- a/pkg/slayers/path/scion/raw.go +++ b/pkg/slayers/path/scion/raw.go @@ -227,19 +227,19 @@ func (s *Raw) ToDecoded( /*@ ghost ubuf []byte @*/ ) (d *Decoded, err error) { // @ requires validPktMetaHdr(ubuf) // @ requires len(s.absPkt(ubuf).CurrSeg.Future) > 0 // @ requires s.GetIsXoverSpec(ubuf) ==> -// @ s.absPkt(ubuf).LeftSeg != none[io.IO_seg3] +// @ s.absPkt(ubuf).LeftSeg != none[io.IO_seg3] // @ ensures sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) // @ ensures old(unfolding s.Mem(ubuf) in unfolding -// @ s.Base.Mem() in (s.NumINF <= 0 || int(s.PathMeta.CurrHF) >= s.NumHops-1)) ==> r != nil +// @ s.Base.Mem() in (s.NumINF <= 0 || int(s.PathMeta.CurrHF) >= s.NumHops-1)) ==> r != nil // @ ensures r == nil ==> s.Mem(ubuf) // @ ensures r != nil ==> s.NonInitMem() // @ ensures r != nil ==> r.ErrorMem() // post for IO: // @ ensures r == nil ==> s.EqAbsHeader(ubuf) && validPktMetaHdr(ubuf) // @ ensures r == nil && old(s.GetIsXoverSpec(ubuf)) ==> -// @ s.absPkt(ubuf) == AbsXover(old(s.absPkt(ubuf))) +// @ s.absPkt(ubuf) == AbsXover(old(s.absPkt(ubuf))) // @ ensures r == nil && !old(s.GetIsXoverSpec(ubuf)) ==> -// @ s.absPkt(ubuf) == AbsIncPath(old(s.absPkt(ubuf))) +// @ s.absPkt(ubuf) == AbsIncPath(old(s.absPkt(ubuf))) // @ decreases func (s *Raw) IncPath( /*@ ghost ubuf []byte @*/ ) (r error) { //@ unfold s.Mem(ubuf) @@ -267,7 +267,7 @@ func (s *Raw) IncPath( /*@ ghost ubuf []byte @*/ ) (r error) { //@ oldoffsetWithHops := oldOffset + path.HopLen * oldPrevSegLen //@ oldHfIdxSeg := oldCurrHfIdx-oldPrevSegLen //@ WidenCurrSeg(ubuf, oldoffsetWithHops + MetaLen, oldCurrInfIdx, oldHfIdxSeg, - //@ oldSegLen, MetaLen, MetaLen, len(ubuf)) + //@ oldSegLen, MetaLen, MetaLen, len(ubuf)) //@ WidenLeftSeg(ubuf, oldCurrInfIdx + 1, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) //@ WidenMidSeg(ubuf, oldCurrInfIdx + 2, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) //@ WidenRightSeg(ubuf, oldCurrInfIdx - 1, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) @@ -295,28 +295,28 @@ func (s *Raw) IncPath( /*@ ghost ubuf []byte @*/ ) (r error) { //@ assert currHfIdx == oldCurrHfIdx + 1 //@ ghost if(currInfIdx == oldCurrInfIdx) { - //@ IncCurrSeg(tail, oldoffsetWithHops, oldCurrInfIdx, oldHfIdxSeg, oldSegLen) - //@ WidenCurrSeg(ubuf, oldoffsetWithHops + MetaLen, oldCurrInfIdx, oldHfIdxSeg + 1, - //@ oldSegLen, MetaLen, MetaLen, len(ubuf)) - //@ WidenLeftSeg(ubuf, oldCurrInfIdx + 1, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) - //@ WidenMidSeg(ubuf, oldCurrInfIdx + 2, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) - //@ WidenRightSeg(ubuf, oldCurrInfIdx - 1, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) - //@ assert reveal s.absPkt(ubuf) == AbsIncPath(oldAbsPkt) + //@ IncCurrSeg(tail, oldoffsetWithHops, oldCurrInfIdx, oldHfIdxSeg, oldSegLen) + //@ WidenCurrSeg(ubuf, oldoffsetWithHops + MetaLen, oldCurrInfIdx, oldHfIdxSeg + 1, + //@ oldSegLen, MetaLen, MetaLen, len(ubuf)) + //@ WidenLeftSeg(ubuf, oldCurrInfIdx + 1, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) + //@ WidenMidSeg(ubuf, oldCurrInfIdx + 2, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) + //@ WidenRightSeg(ubuf, oldCurrInfIdx - 1, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) + //@ assert reveal s.absPkt(ubuf) == AbsIncPath(oldAbsPkt) //@ } else { - //@ segLen := LengthOfCurrSeg(currHfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) - //@ prevSegLen := LengthOfPrevSeg(currHfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) + //@ segLen := LengthOfCurrSeg(currHfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) + //@ prevSegLen := LengthOfPrevSeg(currHfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) //@ offsetWithHops := oldOffset + path.HopLen * prevSegLen + MetaLen //@ hfIdxSeg := currHfIdx-prevSegLen - //@ XoverSegNotNone(tail, oldCurrInfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) - //@ XoverCurrSeg(tail, oldCurrInfIdx + 1, oldCurrHfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) - //@ XoverLeftSeg(tail, oldCurrInfIdx + 2, oldSeg1Len, oldSeg2Len, oldSeg3Len) - //@ XoverMidSeg(tail, oldCurrInfIdx - 1, oldSeg1Len, oldSeg2Len, oldSeg3Len) - //@ XoverRightSeg(tail, oldCurrInfIdx, oldCurrHfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) - //@ WidenCurrSeg(ubuf, offsetWithHops, currInfIdx, hfIdxSeg, segLen, MetaLen, MetaLen, len(ubuf)) - //@ WidenLeftSeg(ubuf, currInfIdx + 1, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) - //@ WidenMidSeg(ubuf, currInfIdx + 2, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) - //@ WidenRightSeg(ubuf, currInfIdx - 1, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) - //@ assert reveal s.absPkt(ubuf) == AbsXover(oldAbsPkt) + //@ XoverSegNotNone(tail, oldCurrInfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) + //@ XoverCurrSeg(tail, oldCurrInfIdx + 1, oldCurrHfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) + //@ XoverLeftSeg(tail, oldCurrInfIdx + 2, oldSeg1Len, oldSeg2Len, oldSeg3Len) + //@ XoverMidSeg(tail, oldCurrInfIdx - 1, oldSeg1Len, oldSeg2Len, oldSeg3Len) + //@ XoverRightSeg(tail, oldCurrInfIdx, oldCurrHfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) + //@ WidenCurrSeg(ubuf, offsetWithHops, currInfIdx, hfIdxSeg, segLen, MetaLen, MetaLen, len(ubuf)) + //@ WidenLeftSeg(ubuf, currInfIdx + 1, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) + //@ WidenMidSeg(ubuf, currInfIdx + 2, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) + //@ WidenRightSeg(ubuf, currInfIdx - 1, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) + //@ assert reveal s.absPkt(ubuf) == AbsXover(oldAbsPkt) //@ } //@ fold acc(sl.AbsSlice_Bytes(tail, 0, len(tail)), R50) diff --git a/pkg/slayers/path/scion/raw_spec.gobra b/pkg/slayers/path/scion/raw_spec.gobra index a2936b719..1c5822a91 100644 --- a/pkg/slayers/path/scion/raw_spec.gobra +++ b/pkg/slayers/path/scion/raw_spec.gobra @@ -447,7 +447,7 @@ pure func segment(raw []byte, peer bool, segLen int) (res io.IO_seg2) { return let hopfields := hopFields(raw, offset, 0, segLen) in - io.IO_seg2(io.IO_seg3_{ + io.IO_seg3_ { AInfo :ainfo, UInfo : uinfo, ConsDir : consDir, @@ -455,7 +455,7 @@ pure func segment(raw []byte, Past : segPast(hopfields, currHfIdx - 1), Future : segFuture(hopfields, currHfIdx), History : segHistory(hopfields, currHfIdx - 1), - }) + } } ghost @@ -573,12 +573,12 @@ pure func (s *Raw) absPkt(raw []byte) (res io.IO_pkt2) { let prevSegLen := LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) in let numINF := NumInfoFields(seg1Len, seg2Len, seg3Len) in let offset := HopFieldOffset(numINF, 0, MetaLen) in - io.IO_pkt2(io.IO_Packet2{ + io.IO_Packet2 { CurrSeg : CurrSeg(raw, offset + path.HopLen * prevSegLen, currInfIdx, currHfIdx-prevSegLen, segLen, MetaLen), LeftSeg : LeftSeg(raw, currInfIdx + 1, seg1Len, seg2Len , seg3Len, MetaLen), MidSeg : MidSeg(raw, currInfIdx + 2, seg1Len, seg2Len , seg3Len, MetaLen), RightSeg : RightSeg(raw, currInfIdx - 1, seg1Len, seg2Len , seg3Len, MetaLen), - }) + } } // In the future, this should probably use AbsMetaHdr as @@ -675,15 +675,15 @@ func (s *Raw) EstablishValidPktMetaHdr(ghost ub []byte) { ghost decreases pure func AbsSetInfoField(oldPkt io.IO_pkt2, info path.IntermediateAbsInfoField) (newPkt io.IO_pkt2) { - return let newCurrSeg := io.IO_seg3(io.IO_seg3_{ + return let newCurrSeg := io.IO_seg3_ { info.AInfo, info.UInfo, info.ConsDir, info.Peer, oldPkt.CurrSeg.Past, oldPkt.CurrSeg.Future, - oldPkt.CurrSeg.History}) in - io.IO_pkt2(io.IO_Packet2{newCurrSeg, oldPkt.LeftSeg, oldPkt.MidSeg, oldPkt.RightSeg}) + oldPkt.CurrSeg.History} in + io.IO_Packet2{newCurrSeg, oldPkt.LeftSeg, oldPkt.MidSeg, oldPkt.RightSeg} } ghost @@ -691,24 +691,24 @@ requires oldPkt.LeftSeg != none[io.IO_seg2] requires len(oldPkt.CurrSeg.Future) > 0 decreases pure func AbsXover(oldPkt io.IO_pkt2) (newPkt io.IO_pkt2) { - return io.IO_pkt2( - io.IO_Packet2{ - get(oldPkt.LeftSeg), - oldPkt.MidSeg, - oldPkt.RightSeg, - some(absIncPathSeg(oldPkt.CurrSeg))}) + return io.IO_Packet2 { + get(oldPkt.LeftSeg), + oldPkt.MidSeg, + oldPkt.RightSeg, + some(absIncPathSeg(oldPkt.CurrSeg)), + } } ghost requires len(oldPkt.CurrSeg.Future) > 0 decreases pure func AbsIncPath(oldPkt io.IO_pkt2) (newPkt io.IO_pkt2) { - return io.IO_pkt2( - io.IO_Packet2{ - absIncPathSeg(oldPkt.CurrSeg), - oldPkt.LeftSeg, - oldPkt.MidSeg, - oldPkt.RightSeg}) + return io.IO_Packet2 { + absIncPathSeg(oldPkt.CurrSeg), + oldPkt.LeftSeg, + oldPkt.MidSeg, + oldPkt.RightSeg, + } } ghost @@ -722,7 +722,8 @@ pure func absIncPathSeg(currseg io.IO_seg3) io.IO_seg3 { Peer: currseg.Peer, Past: seq[io.IO_HF]{currseg.Future[0]} ++ currseg.Past, Future: currseg.Future[1:], - History: seq[io.IO_ahi]{currseg.Future[0].Toab()} ++ currseg.History} + History: seq[io.IO_ahi]{currseg.Future[0].Toab()} ++ currseg.History, + } } ghost @@ -852,9 +853,7 @@ requires len(pkt.CurrSeg.Future) > 0 decreases pure func (s *Raw) EqAbsHopField(pkt io.IO_pkt2, hop io.IO_HF) bool { return let currHF := pkt.CurrSeg.Future[0] in - hop.InIF2 == currHF.InIF2 && - hop.EgIF2 == currHF.EgIF2 && - hop.HVF == currHF.HVF + hop == currHF } ghost diff --git a/pkg/slayers/path/scion/widen-lemma.gobra b/pkg/slayers/path/scion/widen-lemma.gobra index 61ba9d1ea..1d14e8013 100644 --- a/pkg/slayers/path/scion/widen-lemma.gobra +++ b/pkg/slayers/path/scion/widen-lemma.gobra @@ -110,7 +110,7 @@ func widenBytesToIO_HF(raw []byte, middle int, start int, length int) { unfold acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R55) hfBytes1 := path.BytesToIO_HF(raw, 0, middle, len(raw)) hfBytes2 := path.BytesToIO_HF(raw[start:length], 0, middle-start, length-start) - assert hfBytes1 === hfBytes2 + assert hfBytes1 == hfBytes2 fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) fold acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R55) } diff --git a/pkg/slayers/scion.go b/pkg/slayers/scion.go index fb06278f6..6abb98b89 100644 --- a/pkg/slayers/scion.go +++ b/pkg/slayers/scion.go @@ -233,7 +233,7 @@ func (s *SCION) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeO // @ unfold acc(s.Mem(ubuf), R0) // @ defer fold acc(s.Mem(ubuf), R0) // @ sl.SplitRange_Bytes(ubuf, int(CmnHdrLen+s.AddrHdrLen(nil, true)), int(s.HdrLen*LineLen), writePerm) - // @ ghost defer sl.CombineRange_Bytes(ubuf, int(CmnHdrLen+s.AddrHdrLen(nil, true)), int(s.HdrLen*LineLen), writePerm) + // @ ghost defer sl.CombineRange_Bytes(ubuf, int(CmnHdrLen+s.AddrHdrLenSpecInternal()), int(s.HdrLen*LineLen), writePerm) scnLen := CmnHdrLen + s.AddrHdrLen( /*@ nil, true @*/ ) + s.Path.Len( /*@ ubuf[CmnHdrLen+s.AddrHdrLen(nil, true) : s.HdrLen*LineLen] @*/ ) if scnLen > MaxHdrLen { return serrors.New("header length exceeds maximum", @@ -284,8 +284,8 @@ func (s *SCION) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeO // @ sl.CombineRange_Bytes(buf, 10, 12, writePerm) // @ ghost sPath := s.Path - // @ ghost pathSlice := ubuf[CmnHdrLen+s.AddrHdrLen(nil, true) : s.HdrLen*LineLen] - // @ sl.CombineRange_Bytes(ubuf, CmnHdrLen+s.AddrHdrLen(nil, true), int(s.HdrLen*LineLen), R10) + // @ ghost pathSlice := ubuf[CmnHdrLen+s.AddrHdrLenSpecInternal() : s.HdrLen*LineLen] + // @ sl.CombineRange_Bytes(ubuf, CmnHdrLen+s.AddrHdrLenSpecInternal(), int(s.HdrLen*LineLen), R10) // Serialize address header. // @ sl.SplitRange_Bytes(buf, CmnHdrLen, len(buf), writePerm) @@ -293,7 +293,7 @@ func (s *SCION) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeO if err := s.SerializeAddrHdr(buf[CmnHdrLen:] /*@ , ubuf[CmnHdrLen:] @*/); err != nil { // @ sl.CombineRange_Bytes(buf, CmnHdrLen, len(buf), writePerm) // @ sl.CombineRange_Bytes(ubuf, CmnHdrLen, len(ubuf), R10) - // @ sl.SplitRange_Bytes(ubuf, CmnHdrLen+s.AddrHdrLen(nil, true), int(s.HdrLen*LineLen), R10) + // @ sl.SplitRange_Bytes(ubuf, CmnHdrLen+s.AddrHdrLenSpecInternal(), int(s.HdrLen*LineLen), R10) // @ sl.CombineRange_Bytes(uSerBufN, 0, scnLen, writePerm) // @ b.RestoreMem(uSerBufN) return err @@ -302,7 +302,7 @@ func (s *SCION) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeO // @ sl.CombineRange_Bytes(buf, CmnHdrLen, len(buf), writePerm) // @ sl.CombineRange_Bytes(ubuf, CmnHdrLen, len(ubuf), R10) - // @ sl.SplitRange_Bytes(ubuf, CmnHdrLen+s.AddrHdrLen(nil, true), int(s.HdrLen*LineLen), R10) + // @ sl.SplitRange_Bytes(ubuf, CmnHdrLen+s.AddrHdrLenSpecInternal(), int(s.HdrLen*LineLen), R10) // Serialize path header. // @ sl.SplitRange_Bytes(buf, offset, len(buf), writePerm) tmp := s.Path.SerializeTo(buf[offset:] /*@, pathSlice @*/) @@ -926,7 +926,7 @@ func (s *SCION) SerializeAddrHdr(buf []byte /*@ , ghost ubuf []byte @*/) (err er // @ acc(&s.RawSrcAddr) && acc(&s.RawDstAddr)) // @ decreases func (s *SCION) DecodeAddrHdr(data []byte) (res error) { - // @ ghost l := s.AddrHdrLen(nil, true) + // @ ghost l := s.AddrHdrLenSpecInternal() if len(data) < s.AddrHdrLen( /*@ nil, true @*/ ) { return serrors.New("provided buffer is too small", "expected", s.AddrHdrLen( /*@ nil, true @*/ ), "actual", len(data)) diff --git a/router/dataplane.go b/router/dataplane.go index d83723848..cefcb0cc0 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -67,6 +67,7 @@ import ( "github.com/scionproto/scion/router/control" // @ . "github.com/scionproto/scion/verification/utils/definitions" // @ fl "github.com/scionproto/scion/verification/utils/floats" + // @ gsync "github.com/scionproto/scion/verification/utils/ghost_sync" // @ sl "github.com/scionproto/scion/verification/utils/slices" // @ "github.com/scionproto/scion/verification/utils/seqs" // @ socketspec "golang.org/x/net/internal/socket/" @@ -140,7 +141,6 @@ type BatchConn interface { // @ ensures err == nil ==> // @ forall i int :: { &msgs[i] } 0 <= i && i < n ==> // @ MsgToAbsVal(&msgs[i], ingressID) == old(MultiReadBioIO_val(place, n)[i]) - // TODO (VerifiedSCION): uint16 or option[io.IO_ifs] for ingress ReadBatch(msgs underlayconn.Messages /*@, ghost ingressID uint16, ghost prophecyM int, ghost place io.Place @*/) (n int, err error) // @ requires acc(addr.Mem(), _) // @ requires acc(Mem(), _) @@ -153,14 +153,15 @@ type BatchConn interface { // performance reasons. // @ requires len(msgs) == 1 // @ requires acc(msgs[0].Mem(), R50) && msgs[0].HasActiveAddr() + // preconditions for IO-spec: + // @ requires MsgToAbsVal(&msgs[0], egressID) == ioAbsPkts + // @ requires io.token(place) && io.CBioIO_bio3s_send(place, ioAbsPkts) // @ ensures acc(msgs[0].Mem(), R50) && msgs[0].HasActiveAddr() // @ ensures err == nil ==> 0 <= n && n <= len(msgs) // @ ensures err != nil ==> err.ErrorMem() - // contracts for IO-spec - // @ requires MsgToAbsVal(&msgs[0], egressID) == ioAbsPkts - // @ requires io.token(place) && io.CBioIO_bio3s_send(place, ioAbsPkts) - // (VerifiedSCION) the permission to the protocol must always be returned, otherwise the router could not continue - // after failing to send a packet. + // postconditions for IO-spec: + // (VerifiedSCION) the permission to the protocol must always be returned, + // otherwise the router cannot continue after failing to send a packet. // @ ensures io.token(old(io.dp3s_iospec_bio3s_send_T(place, ioAbsPkts))) WriteBatch(msgs underlayconn.Messages, flags int /*@, ghost egressID uint16, ghost place io.Place, ghost ioAbsPkts io.IO_val @*/) (n int, err error) // @ requires Mem() @@ -177,10 +178,9 @@ type BatchConn interface { // Currently, only the following features are supported: // - initializing connections; MUST be done prior to calling Run type DataPlane struct { - // (VerifiedSCION) this is morally ghost - // It is stored in the dataplane in order to retain - // knowledge that macFactory will not fail - // @ key *[]byte + // (VerifiedSCION) This is stored in the dataplane in order to retain + // knowledge that macFactory will not fail. + // @ ghost key *[]byte external map[uint16]BatchConn linkTypes map[uint16]topology.LinkType neighborIAs map[uint16]addr.IA @@ -238,7 +238,7 @@ func (e scmpError) Error() string { // @ requires d.LocalIA().IsZero() // @ requires !ia.IsZero() // @ preserves d.mtx.LockP() -// @ preserves d.mtx.LockInv() == MutexInvariant!; +// @ preserves d.mtx.LockInv() == MutexInvariant! // @ ensures acc(d.Mem(), OutMutexPerm) // @ ensures !d.IsRunning() // @ ensures e == nil @@ -276,7 +276,7 @@ func (d *DataPlane) SetIA(ia addr.IA) (e error) { // @ requires len(key) > 0 // @ requires sl.AbsSlice_Bytes(key, 0, len(key)) // @ preserves d.mtx.LockP() -// @ preserves d.mtx.LockInv() == MutexInvariant!; +// @ preserves d.mtx.LockInv() == MutexInvariant! // @ ensures acc(d.Mem(), OutMutexPerm) // @ ensures !d.IsRunning() // @ ensures res == nil ==> d.KeyIsSet() @@ -337,7 +337,7 @@ func (d *DataPlane) SetKey(key []byte) (res error) { // @ requires conn != nil && conn.Mem() // @ requires ip.Mem() // @ preserves d.mtx.LockP() -// @ preserves d.mtx.LockInv() == MutexInvariant!; +// @ preserves d.mtx.LockInv() == MutexInvariant! // @ ensures acc(d.Mem(), OutMutexPerm) // @ ensures !d.IsRunning() // @ decreases 0 if sync.IgnoreBlockingForTermination() @@ -376,7 +376,7 @@ func (d *DataPlane) AddInternalInterface(conn BatchConn, ip net.IP) error { // @ preserves acc(d.Mem(), OutMutexPerm) // @ preserves !d.IsRunning() // @ preserves d.mtx.LockP() -// @ preserves d.mtx.LockInv() == MutexInvariant!; +// @ preserves d.mtx.LockInv() == MutexInvariant! // @ decreases 0 if sync.IgnoreBlockingForTermination() func (d *DataPlane) AddExternalInterface(ifID uint16, conn BatchConn) error { d.mtx.Lock() @@ -421,7 +421,7 @@ func (d *DataPlane) AddExternalInterface(ifID uint16, conn BatchConn) error { // @ preserves acc(d.Mem(), OutMutexPerm) // @ preserves !d.IsRunning() // @ preserves d.mtx.LockP() -// @ preserves d.mtx.LockInv() == MutexInvariant!; +// @ preserves d.mtx.LockInv() == MutexInvariant! // @ decreases 0 if sync.IgnoreBlockingForTermination() func (d *DataPlane) AddNeighborIA(ifID uint16, remote addr.IA) error { d.mtx.Lock() @@ -577,7 +577,7 @@ func (d *DataPlane) addBFDController(ifID uint16, s *bfdSend, cfg control.BFD, // @ preserves acc(d.Mem(), OutMutexPerm) // @ preserves !d.IsRunning() // @ preserves d.mtx.LockP() -// @ preserves d.mtx.LockInv() == MutexInvariant!; +// @ preserves d.mtx.LockInv() == MutexInvariant! // @ decreases 0 if sync.IgnoreBlockingForTermination() func (d *DataPlane) AddSvc(svc addr.HostSVC, a *net.UDPAddr) error { d.mtx.Lock() @@ -661,7 +661,7 @@ func (d *DataPlane) DelSvc(svc addr.HostSVC, a *net.UDPAddr) error { // @ preserves acc(d.Mem(), OutMutexPerm) // @ preserves !d.IsRunning() // @ preserves d.mtx.LockP() -// @ preserves d.mtx.LockInv() == MutexInvariant!; +// @ preserves d.mtx.LockInv() == MutexInvariant! // @ decreases 0 if sync.IgnoreBlockingForTermination() func (d *DataPlane) AddNextHop(ifID uint16, a *net.UDPAddr) error { d.mtx.Lock() @@ -743,7 +743,7 @@ func (d *DataPlane) AddNextHopBFD(ifID uint16, src, dst *net.UDPAddr, cfg contro // @ requires d.PreWellConfigured() // (VerifiedSCION) here, the spec still uses a private field. // @ requires d.mtx.LockP() -// @ requires d.mtx.LockInv() == MutexInvariant!; +// @ requires d.mtx.LockInv() == MutexInvariant! // @ requires ctx != nil && ctx.Mem() // contracts for IO-spec // @ requires dp.Valid() @@ -810,9 +810,10 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ requires dp.Valid() // @ requires let d := *dPtr in // @ d.DpAgreesWithSpec(dp) - // @ requires acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; + // @ requires acc(ioLock.LockP(), _) + // @ requires ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> // @ #backend[moreJoins()] - func /*@ rc @*/ (ingressID uint16, rd BatchConn, dPtr **DataPlane /*@, ghost ioLock *sync.Mutex, ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/) { + func /*@ rc @*/ (ingressID uint16, rd BatchConn, dPtr **DataPlane /*@, ghost ioLock gpointer[gsync.GhostMutex], ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/) { d := *dPtr msgs := conn.NewReadMessages(inputBatchCnt) // @ requires forall i int :: { &msgs[i] } 0 <= i && i < len(msgs) ==> @@ -876,7 +877,8 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ invariant acc(rd.Mem(), _) // @ invariant processor.sInit() && processor.sInitD() === d // @ invariant processor.getIngressID() == ingressID - // @ invariant acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> + // @ invariant acc(ioLock.LockP(), _) + // @ invariant ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> // @ invariant d.DpAgreesWithSpec(dp) && dp.Valid() for d.running { // @ ghost ioIngressID := path.ifsToIO_ifs(ingressID) @@ -955,7 +957,8 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ invariant pkts <= len(ioValSeq) // @ invariant d.DpAgreesWithSpec(dp) && dp.Valid() // @ invariant ioIngressID == path.ifsToIO_ifs(ingressID) - // @ invariant acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; + // @ invariant acc(ioLock.LockP(), _) + // @ invariant ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> // @ invariant forall i int :: { &msgs[i] } i0 <= i && i < pkts ==> // @ MsgToAbsVal(&msgs[i], ingressID) == ioValSeq[i] // @ invariant MultiElemWitnessWithIndex(ioSharedArg.IBufY, ioIngressID, ioValSeq, i0) @@ -1076,8 +1079,10 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta writeMsgs[0].Addr = result.OutAddr } // @ sl.NilAcc_Bytes() - // @ assert absIO_val(result.OutPkt, result.EgressID) == absIO_val(writeMsgs[0].Buffers[0], result.EgressID) - // @ assert result.OutPkt != nil ==> newAbsPkt == absIO_val(writeMsgs[0].Buffers[0], result.EgressID) + // @ assert absIO_val(result.OutPkt, result.EgressID) == + // @ absIO_val(writeMsgs[0].Buffers[0], result.EgressID) + // @ assert result.OutPkt != nil ==> newAbsPkt == + // @ absIO_val(writeMsgs[0].Buffers[0], result.EgressID) // @ fold acc(writeMsgs[0].Mem(), R50) // @ ghost ioLock.Lock() @@ -1199,7 +1204,8 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ invariant d.getMacFactory() != nil // @ invariant dp.Valid() // @ invariant d.DpAgreesWithSpec(dp) - // @ invariant acc(ioLockRun.LockP(), _) && ioLockRun.LockInv() == SharedInv!< dp, ioSharedArgRun !>; + // @ invariant acc(ioLockRun.LockP(), _) + // @ invariant ioLockRun.LockInv() == SharedInv!< dp, ioSharedArgRun !> // @ decreases len(externals) - len(visited) for ifID, v := range externals /*@ with visited @*/ { cl := @@ -1215,8 +1221,9 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // contracts for IO-spec // @ requires dp.Valid() // @ requires d.DpAgreesWithSpec(dp) - // @ requires acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; - func /*@ closure2 @*/ (i uint16, c BatchConn /*@, ghost ioLock *sync.Mutex, ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/) { + // @ requires acc(ioLock.LockP(), _) + // @ requires ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> + func /*@ closure2 @*/ (i uint16, c BatchConn /*@, ghost ioLock gpointer[gsync.GhostMutex], ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/) { defer log.HandlePanic() read(i, c, &d /*@, ioLock, ioSharedArg, dp @*/) //@ as rc } @@ -1239,8 +1246,9 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // contracts for IO-spec // @ requires dp.Valid() // @ requires d.DpAgreesWithSpec(dp) - // @ requires acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; - func /*@ closure3 @*/ (c BatchConn /*@, ghost ioLock *sync.Mutex, ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/) { + // @ requires acc(ioLock.LockP(), _) + // @ requires ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> + func /*@ closure3 @*/ (c BatchConn /*@, ghost ioLock gpointer[gsync.GhostMutex], ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/) { defer log.HandlePanic() read(0, c, &d /*@, ioLock, ioSharedArg, dp @*/) //@ as rc } @@ -1442,7 +1450,8 @@ func (p *scionPacketProcessor) reset() (err error) { // @ ensures reserr != nil ==> reserr.ErrorMem() // contracts for IO-spec // @ requires dp.Valid() -// @ requires acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; +// @ requires acc(ioLock.LockP(), _) +// @ requires ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> // @ requires let absPkt := absIO_val(rawPkt, p.getIngressID()) in // @ absPkt.isIO_val_Pkt2 ==> ElemWitness(ioSharedArg.IBufY, path.ifsToIO_ifs(p.getIngressID()), absPkt.IO_val_Pkt2_2) // @ ensures respr.OutPkt != nil ==> @@ -1454,7 +1463,7 @@ func (p *scionPacketProcessor) reset() (err error) { // @ decreases 0 if sync.IgnoreBlockingForTermination() // @ #backend[moreJoins(1)] func (p *scionPacketProcessor) processPkt(rawPkt []byte, - srcAddr *net.UDPAddr /*@, ghost ioLock *sync.Mutex, ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/) (respr processResult, reserr error /*@ , addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { + srcAddr *net.UDPAddr /*@, ghost ioLock gpointer[gsync.GhostMutex], ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/) (respr processResult, reserr error /*@ , addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { if err := p.reset(); err != nil { // @ fold p.sInitD().validResult(processResult{}, false) @@ -1746,7 +1755,8 @@ func (p *scionPacketProcessor) processIntraBFD(data []byte) (res error) { // @ requires (typeOf(p.scionLayer.GetPath(ub)) == *scion.Raw) ==> // @ p.scionLayer.EqAbsHeader(ub) && p.scionLayer.ValidScionInitSpec(ub) // @ requires p.scionLayer.EqPathType(ub) -// @ requires acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; +// @ requires acc(ioLock.LockP(), _) +// @ requires ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> // @ requires let absPkt := absIO_val(p.rawPkt, p.ingressID) in // @ absPkt.isIO_val_Pkt2 ==> ElemWitness(ioSharedArg.IBufY, path.ifsToIO_ifs(p.ingressID), absPkt.IO_val_Pkt2_2) // @ ensures reserr == nil && newAbsPkt.isIO_val_Pkt2 ==> @@ -1757,7 +1767,7 @@ func (p *scionPacketProcessor) processIntraBFD(data []byte) (res error) { // @ newAbsPkt.isIO_val_Unsupported // @ ensures (respr.OutPkt == nil) == (newAbsPkt == io.IO_val_Unit{}) // @ decreases 0 if sync.IgnoreBlockingForTermination() -func (p *scionPacketProcessor) processSCION( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int, ghost ioLock *sync.Mutex, ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error /*@ , addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { +func (p *scionPacketProcessor) processSCION( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int, ghost ioLock gpointer[gsync.GhostMutex], ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error /*@ , addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { var ok bool // @ unfold acc(p.scionLayer.Mem(ub), R20) @@ -2026,7 +2036,8 @@ func (p *scionPacketProcessor) validateHopExpiry() (respr processResult, reserr // @ ensures acc(&p.hopField, R20) // @ ensures acc(&p.ingressID, R21) // @ ensures p.d.validResult(respr, false) -// @ ensures respr.OutPkt != nil ==> reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ ensures respr.OutPkt != nil ==> +// @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() // @ ensures reserr == nil && p.infoField.ConsDir ==> ( // @ p.ingressID == 0 || p.hopField.ConsIngress == p.ingressID) @@ -2036,7 +2047,8 @@ func (p *scionPacketProcessor) validateHopExpiry() (respr processResult, reserr // @ requires len(oldPkt.CurrSeg.Future) > 0 // @ requires p.EqAbsHopField(oldPkt) // @ requires p.EqAbsInfoField(oldPkt) -// @ ensures reserr == nil ==> AbsValidateIngressIDConstraint(oldPkt, path.ifsToIO_ifs(p.ingressID)) +// @ ensures reserr == nil ==> +// @ AbsValidateIngressIDConstraint(oldPkt, path.ifsToIO_ifs(p.ingressID)) // @ ensures reserr != nil && respr.OutPkt != nil ==> // @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases @@ -2256,9 +2268,12 @@ func (p *scionPacketProcessor) validateTransitUnderlaySrc( /*@ ghost ub []byte @ // @ requires !p.segmentChange ==> AbsValidateIngressIDConstraint(oldPkt, path.ifsToIO_ifs(p.ingressID)) // @ requires p.segmentChange ==> AbsValidateIngressIDConstraintXover(oldPkt, path.ifsToIO_ifs(p.ingressID)) // @ ensures reserr == nil ==> p.NoBouncingPkt(oldPkt) -// @ ensures reserr == nil && !p.segmentChange ==> AbsValidateEgressIDConstraint(oldPkt, (p.ingressID != 0), dp) -// @ ensures reserr == nil && p.segmentChange ==> oldPkt.RightSeg != none[io.IO_seg2] && len(get(oldPkt.RightSeg).Past) > 0 -// @ ensures reserr == nil && p.segmentChange ==> p.ingressID != 0 && AbsValidateEgressIDConstraintXover(oldPkt, dp) +// @ ensures reserr == nil && !p.segmentChange ==> +// @ AbsValidateEgressIDConstraint(oldPkt, (p.ingressID != 0), dp) +// @ ensures reserr == nil && p.segmentChange ==> +// @ oldPkt.RightSeg != none[io.IO_seg2] && len(get(oldPkt.RightSeg).Past) > 0 +// @ ensures reserr == nil && p.segmentChange ==> +// @ p.ingressID != 0 && AbsValidateEgressIDConstraintXover(oldPkt, dp) // @ ensures reserr != nil && respr.OutPkt != nil ==> // @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases @@ -2356,24 +2371,27 @@ func (p *scionPacketProcessor) validateEgressID( /*@ ghost oldPkt io.IO_pkt2, gh // @ requires p.path === p.scionLayer.GetPath(ub) // @ requires acc(&p.hopField, R20) // @ requires sl.AbsSlice_Bytes(ub, 0, len(ub)) -// @ preserves acc(&p.ingressID, R21) -// @ ensures acc(&p.hopField, R20) -// @ ensures sl.AbsSlice_Bytes(ub, 0, len(ub)) -// @ ensures acc(&p.infoField) -// @ ensures acc(&p.path, R20) -// @ ensures acc(p.scionLayer.Mem(ub), R19) -// @ ensures err != nil ==> err.ErrorMem() -// contracts for IO-spec +// @ requires acc(&p.ingressID, R21) +// preconditions for IO: // @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) // @ requires len(absPkt(ub).CurrSeg.Future) > 0 // @ requires acc(&p.d, R55) && acc(p.d.Mem(), _) && acc(&p.ingressID, R55) // @ requires p.LastHopLen(ub) // @ requires p.EqAbsHopField(absPkt(ub)) // @ requires p.EqAbsInfoField(absPkt(ub)) +// @ ensures acc(&p.ingressID, R21) +// @ ensures acc(&p.hopField, R20) +// @ ensures sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ ensures acc(&p.infoField) +// @ ensures acc(&p.path, R20) +// @ ensures acc(p.scionLayer.Mem(ub), R19) +// @ ensures err != nil ==> err.ErrorMem() +// posconditions for IO: // @ ensures acc(&p.d, R55) && acc(p.d.Mem(), _) && acc(&p.ingressID, R55) // @ ensures err == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) // @ ensures err == nil ==> len(absPkt(ub).CurrSeg.Future) > 0 -// @ ensures err == nil ==> absPkt(ub) == AbsUpdateNonConsDirIngressSegID(old(absPkt(ub)), path.ifsToIO_ifs(p.ingressID)) +// @ ensures err == nil ==> +// @ absPkt(ub) == AbsUpdateNonConsDirIngressSegID(old(absPkt(ub)), path.ifsToIO_ifs(p.ingressID)) // @ ensures err == nil ==> p.LastHopLen(ub) // @ ensures err == nil ==> p.EqAbsHopField(absPkt(ub)) // @ ensures err == nil ==> p.EqAbsInfoField(absPkt(ub)) @@ -2395,7 +2413,8 @@ func (p *scionPacketProcessor) updateNonConsDirIngressSegID( /*@ ghost ub []byte if !p.infoField.ConsDir && p.ingressID != 0 { p.infoField.UpdateSegID(p.hopField.Mac /*@, p.hopField.ToIO_HF() @*/) // @ reveal p.LastHopLen(ub) - // @ assert path.AbsUInfoFromUint16(p.infoField.SegID) == old(io.upd_uinfo(path.AbsUInfoFromUint16(p.infoField.SegID), p.hopField.ToIO_HF())) + // @ assert path.AbsUInfoFromUint16(p.infoField.SegID) == + // @ old(io.upd_uinfo(path.AbsUInfoFromUint16(p.infoField.SegID), p.hopField.ToIO_HF())) // (VerifiedSCION) the following property is guaranteed by the type system, but Gobra cannot infer it yet // @ assume 0 <= p.path.GetCurrINF(ubPath) // @ sl.SplitRange_Bytes(ub, start, end, HalfPerm) @@ -2411,7 +2430,8 @@ func (p *scionPacketProcessor) updateNonConsDirIngressSegID( /*@ ghost ub []byte // @ p.SubSliceAbsPktToAbsPkt(ub, start, end) // @ ghost sl.CombineRange_Bytes(ub, start, end, HalfPerm) // @ absPktFutureLemma(ub) - // @ assert absPkt(ub).CurrSeg.UInfo == old(io.upd_uinfo(path.AbsUInfoFromUint16(p.infoField.SegID), p.hopField.ToIO_HF())) + // @ assert absPkt(ub).CurrSeg.UInfo == + // @ old(io.upd_uinfo(path.AbsUInfoFromUint16(p.infoField.SegID), p.hopField.ToIO_HF())) // @ assert reveal p.EqAbsInfoField(absPkt(ub)) // @ assert reveal p.EqAbsHopField(absPkt(ub)) // @ assert reveal p.LastHopLen(ub) @@ -2598,7 +2618,8 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr // need to update the SegID. if p.infoField.ConsDir { p.infoField.UpdateSegID(p.hopField.Mac /*@, p.hopField.ToIO_HF() @*/) - // @ assert path.AbsUInfoFromUint16(p.infoField.SegID) == old(io.upd_uinfo(path.AbsUInfoFromUint16(p.infoField.SegID), p.hopField.ToIO_HF())) + // @ assert path.AbsUInfoFromUint16(p.infoField.SegID) == + // @ old(io.upd_uinfo(path.AbsUInfoFromUint16(p.infoField.SegID), p.hopField.ToIO_HF())) // @ assume 0 <= p.path.GetCurrINF(ubPath) if err := p.path.SetInfoField(p.infoField, int( /*@ unfolding acc(p.path.Mem(ubPath), R45) in (unfolding acc(p.path.Base.Mem(), R50) in @*/ p.path.PathMeta.CurrINF /*@ ) @*/) /*@ , ubPath @*/); err != nil { // TODO parameter problem invalid path @@ -2644,7 +2665,7 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr // @ ensures acc(&p.path, R20) // @ ensures reserr == nil ==> p.scionLayer.Mem(ub) // @ ensures reserr == nil ==> p.scionLayer.UBPath(ub) === old(p.scionLayer.UBPath(ub)) -// @ ensures reserr == nil ==> p.scionLayer.GetPath(ub) === old(p.scionLayer.GetPath(ub)) +// @ ensures reserr == nil ==> p.scionLayer.GetPath(ub) == old(p.scionLayer.GetPath(ub)) // @ ensures reserr != nil ==> p.scionLayer.NonInitMem() // @ ensures p.segmentChange // @ ensures respr === processResult{} @@ -3102,8 +3123,10 @@ func (p *scionPacketProcessor) handleSCMPTraceRouteRequest( // @ preserves acc(p.scionLayer.Mem(ubScionL), R20) // @ preserves acc(&p.d, R50) && acc(p.d.Mem(), _) // @ ensures p.d.validResult(respr, false) -// @ ensures respr.OutPkt != nil ==> reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) -// @ ensures reserr == nil ==> int(p.scionLayer.GetPayloadLen(ubScionL)) == len(p.scionLayer.GetPayload(ubScionL)) +// @ ensures respr.OutPkt != nil ==> +// @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ ensures reserr == nil ==> +// @ int(p.scionLayer.GetPayloadLen(ubScionL)) == len(p.scionLayer.GetPayload(ubScionL)) // @ ensures reserr != nil ==> reserr.ErrorMem() // contracts for IO-spec // @ ensures reserr != nil && respr.OutPkt != nil ==> @@ -3171,7 +3194,8 @@ func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte @*/ ) ( // @ requires p.d.DpAgreesWithSpec(dp) // @ requires dp.Valid() // @ requires p.scionLayer.EqAbsHeader(ub) && p.scionLayer.EqPathType(ub) && p.scionLayer.ValidScionInitSpec(ub) -// @ requires acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; +// @ requires acc(ioLock.LockP(), _) +// @ requires ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> // @ requires let absPkt := absIO_val(ub, p.ingressID) in // @ absPkt.isIO_val_Pkt2 ==> ElemWitness(ioSharedArg.IBufY, path.ifsToIO_ifs(p.ingressID), absPkt.IO_val_Pkt2_2) // @ ensures reserr == nil && newAbsPkt.isIO_val_Pkt2 ==> @@ -3183,7 +3207,7 @@ func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte @*/ ) ( // @ ensures (respr.OutPkt == nil) == (newAbsPkt == io.IO_val_Unit{}) // @ decreases 0 if sync.IgnoreBlockingForTermination() // @ #backend[stateConsolidationMode(6)] -func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int, ghost ioLock *sync.Mutex, ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error /*@, addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { +func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int, ghost ioLock gpointer[gsync.GhostMutex], ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error /*@, addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { if r, err := p.parsePath( /*@ ub @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) return r, err /*@, false, absReturnErr(r) @*/ diff --git a/router/dataplane_concurrency_model.gobra b/router/dataplane_concurrency_model.gobra index ae89a26a8..4fdbc540d 100644 --- a/router/dataplane_concurrency_model.gobra +++ b/router/dataplane_concurrency_model.gobra @@ -17,14 +17,16 @@ package router import ( - "sync" - io "github.com/scionproto/scion/verification/io" + gsync "verification/utils/ghost_sync" + io "verification/io" ) +// Never use `==` for comparisons! Because this is a ghost structure, only the ghost comparison (`===`) +// is meaningful. type SharedArg struct { - Place *io.Place // Existential for the current place - State *io.IO_dp3s_state_local // Existential for the current model state - IBufY, OBufY ElemRA // Parameters of the algebra + ghost Place gpointer[io.Place] // Existential for the current place + ghost State gpointer[io.IO_dp3s_state_local] // Existential for the current model state + ghost IBufY, OBufY ElemRA // Parameters of the algebra } pred SharedInv(ghost dp io.DataPlaneSpec, ghost y SharedArg) { @@ -36,13 +38,13 @@ pred SharedInv(ghost dp io.DataPlaneSpec, ghost y SharedArg) { // initialize the shared invariant: ghost requires io.token(p) && dp.dp3s_iospec_ordered(s, p) -ensures m.LockP() && m.LockInv() == SharedInv!< dp, y !>; +ensures m.LockP() && m.LockInv() == SharedInv!< dp, y !> decreases func InitSharedInv( dp io.DataPlaneSpec, p io.Place, - s io.IO_dp3s_state_local) (m *sync.Mutex, y SharedArg) { - mV@ := sync.Mutex{} + s io.IO_dp3s_state_local) (m gpointer[gsync.GhostMutex], y SharedArg) { + mV@ := gsync.GhostMutex{} m = &mV pE@ := p sE@ := s diff --git a/router/dataplane_spec.gobra b/router/dataplane_spec.gobra index 5a5e28fc5..12b4c77af 100644 --- a/router/dataplane_spec.gobra +++ b/router/dataplane_spec.gobra @@ -145,6 +145,7 @@ pred (p *scionPacketProcessor) initMem() { acc(&p.bfdLayer) } +// This is used as a signature, not as an assumed function. requires acc(key, _) && acc(sl.AbsSlice_Bytes(*key, 0, len(*key)), _) requires scrypto.ValidKeyForHash(*key) ensures acc(key, _) && acc(sl.AbsSlice_Bytes(*key, 0, len(*key)), _) @@ -533,7 +534,6 @@ pred (s *scmpError) Mem() { acc(s) } -ghost preserves s.Mem() && s.CanSet(e) ensures s.Get() === e decreases diff --git a/router/dataplane_spec_test.gobra b/router/dataplane_spec_test.gobra index 49b421d64..a38be0c54 100644 --- a/router/dataplane_spec_test.gobra +++ b/router/dataplane_spec_test.gobra @@ -136,7 +136,7 @@ func testRun( fold accForwardingMetrics(d.forwardingMetrics) ensures dp.Valid() - ensures dp === io.DataPlaneSpec_{ + ensures dp === io.DataPlaneSpec_ { linkTypes: dict[io.IO_ifs]io.IO_Link{ 1: io.IO_ProvCust{}, 2: io.IO_ProvCust{}, @@ -161,7 +161,7 @@ func testRun( pair4 := io.AsIfsPair{1001, 7} pair5 := io.AsIfsPair{1002, 8} - dp := io.DataPlaneSpec_{ + dp := io.DataPlaneSpec_ { linkTypes: dict[io.IO_ifs]io.IO_Link{ 1: io.IO_ProvCust{}, 2: io.IO_ProvCust{}, @@ -223,7 +223,7 @@ func testRun( assert d.MetricsAreSet() d.mtx.SetInv(MutexInvariant!) assert d.mtx.LockP() - assert d.mtx.LockInv() == MutexInvariant!; + assert d.mtx.LockInv() == MutexInvariant! // io-spec needs to be inhaled inhale io.token(place) diff --git a/router/io-spec-abstract-transitions.gobra b/router/io-spec-abstract-transitions.gobra index 17b4a2582..b238a4ddd 100644 --- a/router/io-spec-abstract-transitions.gobra +++ b/router/io-spec-abstract-transitions.gobra @@ -17,12 +17,12 @@ package router import ( - "sync" "github.com/scionproto/scion/pkg/slayers/path" "github.com/scionproto/scion/pkg/slayers" - io "verification/io" - sl "github.com/scionproto/scion/verification/utils/slices" . "verification/utils/definitions" + io "verification/io" + gsync "verification/utils/ghost_sync" + sl "verification/utils/slices" ) ghost @@ -40,12 +40,12 @@ ensures len(newPkt.CurrSeg.Future) > 0 ensures len(newPkt.CurrSeg.Future) == len(oldPkt.CurrSeg.Future) decreases pure func AbsUpdateNonConsDirIngressSegID(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs]) (newPkt io.IO_pkt2) { - return ingressID == none[io.IO_ifs] ? oldPkt : io.IO_pkt2( - io.IO_Packet2{ - io.establishGuardTraversedseg(oldPkt.CurrSeg, !oldPkt.CurrSeg.ConsDir), - oldPkt.LeftSeg, - oldPkt.MidSeg, - oldPkt.RightSeg}) + return ingressID == none[io.IO_ifs] ? oldPkt : io.IO_Packet2 { + io.establishGuardTraversedseg(oldPkt.CurrSeg, !oldPkt.CurrSeg.ConsDir), + oldPkt.LeftSeg, + oldPkt.MidSeg, + oldPkt.RightSeg, + } } ghost @@ -94,12 +94,12 @@ requires len(oldPkt.CurrSeg.Future) > 0 ensures len(newPkt.CurrSeg.Future) >= 0 decreases pure func AbsProcessEgress(oldPkt io.IO_pkt2) (newPkt io.IO_pkt2) { - return io.IO_pkt2( - io.IO_Packet2{ - io.establishGuardTraversedsegInc(oldPkt.CurrSeg, oldPkt.CurrSeg.ConsDir), - oldPkt.LeftSeg, - oldPkt.MidSeg, - oldPkt.RightSeg}) + return io.IO_Packet2 { + io.establishGuardTraversedsegInc(oldPkt.CurrSeg, oldPkt.CurrSeg.ConsDir), + oldPkt.LeftSeg, + oldPkt.MidSeg, + oldPkt.RightSeg, + } } ghost @@ -113,12 +113,12 @@ ensures newPkt.RightSeg != none[io.IO_seg2] ensures len(get(newPkt.RightSeg).Past) > 0 decreases pure func AbsDoXover(oldPkt io.IO_pkt2) (newPkt io.IO_pkt2) { - return io.IO_pkt2( - io.IO_Packet2{ - get(oldPkt.LeftSeg), - oldPkt.MidSeg, - oldPkt.RightSeg, - some(io.establishGuardTraversedsegInc(oldPkt.CurrSeg, false))}) + return io.IO_Packet2 { + get(oldPkt.LeftSeg), + oldPkt.MidSeg, + oldPkt.RightSeg, + some(io.establishGuardTraversedsegInc(oldPkt.CurrSeg, false)), + } } ghost @@ -160,10 +160,11 @@ requires newPkt == AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID) requires AbsValidateIngressIDConstraint(oldPkt, ingressID) requires AbsVerifyCurrentMACConstraint(newPkt, dp) requires len(newPkt.CurrSeg.Future) == 1 || AbsValidateEgressIDConstraint(newPkt, true, dp) -preserves acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; +preserves acc(ioLock.LockP(), _) +preserves ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> ensures ElemWitness(ioSharedArg.OBufY, egressID, newPkt) decreases -func InternalEnterEvent(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock *sync.Mutex, ioSharedArg SharedArg, dp io.DataPlaneSpec) { +func InternalEnterEvent(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock gpointer[gsync.GhostMutex], ioSharedArg SharedArg, dp io.DataPlaneSpec) { reveal AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID) reveal AbsValidateIngressIDConstraint(oldPkt, ingressID) reveal AbsVerifyCurrentMACConstraint(newPkt, dp) @@ -190,10 +191,11 @@ requires AbsVerifyCurrentMACConstraint(AbsUpdateNonConsDirIngressSegID(oldPkt, requires AbsValidateEgressIDConstraint(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID), (ingressID != none[io.IO_ifs]), dp) requires AbsEgressInterfaceConstraint(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID), egressID) requires newPkt == AbsProcessEgress(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)) -preserves acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; +preserves acc(ioLock.LockP(), _) +preserves ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> ensures ElemWitness(ioSharedArg.OBufY, egressID, newPkt) decreases -func ExternalEnterOrExitEvent(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock *sync.Mutex, ioSharedArg SharedArg, dp io.DataPlaneSpec) { +func ExternalEnterOrExitEvent(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock gpointer[gsync.GhostMutex], ioSharedArg SharedArg, dp io.DataPlaneSpec) { reveal dp.Valid() nextPkt := reveal AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID) reveal AbsValidateIngressIDConstraint(oldPkt, ingressID) @@ -226,12 +228,15 @@ requires AbsVerifyCurrentMACConstraint(AbsDoXover(AbsUpdateNonConsDirIngressSeg requires AbsValidateEgressIDConstraintXover(AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)), dp) requires egressID != none[io.IO_ifs] ==> get(egressID) in domain(dp.GetNeighborIAs()) requires egressID != none[io.IO_ifs] ==> AbsEgressInterfaceConstraint(AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)), egressID) -requires egressID == none[io.IO_ifs] ==> newPkt == AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)) -requires egressID != none[io.IO_ifs] ==> newPkt == AbsProcessEgress(AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID))) -preserves acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; +requires egressID == none[io.IO_ifs] ==> + newPkt == AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)) +requires egressID != none[io.IO_ifs] ==> + newPkt == AbsProcessEgress(AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID))) +preserves acc(ioLock.LockP(), _) +preserves ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> ensures ElemWitness(ioSharedArg.OBufY, egressID, newPkt) decreases -func XoverEvent(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock *sync.Mutex, ioSharedArg SharedArg, dp io.DataPlaneSpec) { +func XoverEvent(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock gpointer[gsync.GhostMutex], ioSharedArg SharedArg, dp io.DataPlaneSpec) { reveal dp.Valid() intermediatePkt1 := reveal AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID) intermediatePkt2 := reveal AbsDoXover(intermediatePkt1) diff --git a/router/io-spec-atomic-events.gobra b/router/io-spec-atomic-events.gobra index 4b7309caa..fca20b964 100644 --- a/router/io-spec-atomic-events.gobra +++ b/router/io-spec-atomic-events.gobra @@ -15,9 +15,9 @@ // +gobra // This file contains the definition of operations that perform the atomic transitions of state -// in the IO spec. They all take a *sync.Mutex, which acts as a logical invariant, because Gobra +// in the IO spec. They all take a gpointer[gsync.GhostMutex], which acts as a logical invariant, because Gobra // does not support invariants natively. As such, we can only get access to the invariants if we -// first lock the mutex, which is a blocking operation. Even though all these operations are +// first lock the mutex. Even though all these operations are // terminating, Gobra cannot currently prove this and thus, we assume termination for all methods // in this file. @@ -26,6 +26,7 @@ package router import ( "sync" io "verification/io" + gsync "verification/utils/ghost_sync" ) ghost @@ -42,18 +43,19 @@ requires dp.dp2_enter_guard( get(ingressID), oldPkt.CurrSeg.Future[1:]) requires dp.dp3s_forward( - io.IO_pkt2( - io.IO_Packet2{ - io.establishGuardTraversedseg(oldPkt.CurrSeg, !oldPkt.CurrSeg.ConsDir), - oldPkt.LeftSeg, - oldPkt.MidSeg, - oldPkt.RightSeg}), + io.IO_Packet2 { + io.establishGuardTraversedseg(oldPkt.CurrSeg, !oldPkt.CurrSeg.ConsDir), + oldPkt.LeftSeg, + oldPkt.MidSeg, + oldPkt.RightSeg, + }, newPkt, egressID) -preserves acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; +preserves acc(ioLock.LockP(), _) +preserves ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> ensures ElemWitness(ioSharedArg.OBufY, egressID, newPkt) decreases _ -func AtomicEnter(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock *sync.Mutex, ioSharedArg SharedArg, dp io.DataPlaneSpec) { +func AtomicEnter(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock gpointer[gsync.GhostMutex], ioSharedArg SharedArg, dp io.DataPlaneSpec) { ghost ioLock.Lock() unfold SharedInv!< dp, ioSharedArg !>() t, s := *ioSharedArg.Place, *ioSharedArg.State @@ -79,10 +81,11 @@ requires egressID != none[io.IO_ifs] requires len(oldPkt.CurrSeg.Future) > 0 requires ElemWitness(ioSharedArg.IBufY, ingressID, oldPkt) requires dp.dp3s_forward_ext(oldPkt, newPkt, get(egressID)) -preserves acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; +preserves acc(ioLock.LockP(), _) +preserves ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> ensures ElemWitness(ioSharedArg.OBufY, egressID, newPkt) decreases _ -func AtomicExit(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock *sync.Mutex, ioSharedArg SharedArg, dp io.DataPlaneSpec) { +func AtomicExit(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock gpointer[gsync.GhostMutex], ioSharedArg SharedArg, dp io.DataPlaneSpec) { ghost ioLock.Lock() unfold SharedInv!< dp, ioSharedArg !>() t, s := *ioSharedArg.Place, *ioSharedArg.State @@ -113,28 +116,31 @@ requires dp.dp2_xover_guard( oldPkt.CurrSeg, get(oldPkt.LeftSeg), io.establishGuardTraversedsegInc(oldPkt.CurrSeg, !oldPkt.CurrSeg.ConsDir), - io.IO_pkt2(io.IO_Packet2{ + io.IO_Packet2 { get(oldPkt.LeftSeg), oldPkt.MidSeg, oldPkt.RightSeg, - some(io.establishGuardTraversedsegInc(oldPkt.CurrSeg, !oldPkt.CurrSeg.ConsDir))}), + some(io.establishGuardTraversedsegInc(oldPkt.CurrSeg, !oldPkt.CurrSeg.ConsDir)), + }, oldPkt.CurrSeg.Future[0], get(oldPkt.LeftSeg).Future[0], get(oldPkt.LeftSeg).Future[1:], dp.Asid(), get(ingressID)) requires dp.dp3s_forward_xover( - io.IO_pkt2(io.IO_Packet2{ + io.IO_Packet2 { get(oldPkt.LeftSeg), oldPkt.MidSeg, oldPkt.RightSeg, - some(io.establishGuardTraversedsegInc(oldPkt.CurrSeg, !oldPkt.CurrSeg.ConsDir))}), + some(io.establishGuardTraversedsegInc(oldPkt.CurrSeg, !oldPkt.CurrSeg.ConsDir)), + }, newPkt, egressID) -preserves acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; +preserves acc(ioLock.LockP(), _) +preserves ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> ensures ElemWitness(ioSharedArg.OBufY, egressID, newPkt) decreases _ -func AtomicXover(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock *sync.Mutex, ioSharedArg SharedArg, dp io.DataPlaneSpec) { +func AtomicXover(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock gpointer[gsync.GhostMutex], ioSharedArg SharedArg, dp io.DataPlaneSpec) { ghost ioLock.Lock() unfold SharedInv!< dp, ioSharedArg !>() t, s := *ioSharedArg.Place, *ioSharedArg.State diff --git a/router/io-spec-lemmas.gobra b/router/io-spec-lemmas.gobra index d1878e906..f0dc435af 100644 --- a/router/io-spec-lemmas.gobra +++ b/router/io-spec-lemmas.gobra @@ -372,9 +372,7 @@ decreases pure func (p* scionPacketProcessor) EqAbsHopField(pkt io.IO_pkt2) bool { return let absHop := p.hopField.ToIO_HF() in let currHF := pkt.CurrSeg.Future[0] in - absHop.InIF2 == currHF.InIF2 && - absHop.EgIF2 == currHF.EgIF2 && - absHop.HVF == currHF.HVF + absHop == currHF } ghost diff --git a/router/io-spec.gobra b/router/io-spec.gobra index e65376a1a..a633f6668 100644 --- a/router/io-spec.gobra +++ b/router/io-spec.gobra @@ -51,12 +51,12 @@ pure func absPkt(raw []byte) (res io.IO_pkt2) { let prevSegLen := scion.LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) in let numINF := scion.NumInfoFields(seg1Len, seg2Len, seg3Len) in let offset := scion.HopFieldOffset(numINF, 0, headerOffsetWithMetaLen) in - io.IO_pkt2(io.IO_Packet2{ + io.IO_Packet2 { CurrSeg : scion.CurrSeg(raw, offset + path.HopLen * prevSegLen, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen), LeftSeg : scion.LeftSeg(raw, currInfIdx + 1, seg1Len, seg2Len , seg3Len, headerOffsetWithMetaLen), MidSeg : scion.MidSeg(raw, currInfIdx + 2, seg1Len, seg2Len , seg3Len, headerOffsetWithMetaLen), RightSeg : scion.RightSeg(raw, currInfIdx - 1, seg1Len, seg2Len , seg3Len, headerOffsetWithMetaLen), - }) + } } ghost @@ -67,7 +67,7 @@ decreases pure func absIO_val_Unsupported(raw []byte, ingressID uint16) (val io.IO_val) { return io.IO_val(io.IO_val_Unsupported{ path.ifsToIO_ifs(ingressID), - io.Unit(io.Unit_{}), + io.Unit{}, }) } diff --git a/router/widen-lemma.gobra b/router/widen-lemma.gobra index c3f6e7ae3..cfa9eb66e 100644 --- a/router/widen-lemma.gobra +++ b/router/widen-lemma.gobra @@ -147,4 +147,4 @@ func absPktWidenLemma(raw []byte, length int) { scion.WidenRightSeg(raw, currInfIdx - 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, 0, length) assert reveal absPkt(raw) == reveal absPkt(raw[:length]) -} \ No newline at end of file +} diff --git a/verification/dependencies/errors/errors_spec.gobra b/verification/dependencies/errors/errors_spec.gobra index 74e5ef38d..715c963be 100644 --- a/verification/dependencies/errors/errors_spec.gobra +++ b/verification/dependencies/errors/errors_spec.gobra @@ -68,7 +68,6 @@ type ErrorCell interface { // This is not meant to be called. Instead, // it shows that Mem() is strong enough to // allow for the assignment of an error. - ghost requires Mem() && CanSet(e) ensures Mem() && Get() === e decreases diff --git a/verification/dependencies/golang.org/x/net/internal/socket/socket.gobra b/verification/dependencies/golang.org/x/net/internal/socket/socket.gobra index 4d28c738d..e09aca4e4 100644 --- a/verification/dependencies/golang.org/x/net/internal/socket/socket.gobra +++ b/verification/dependencies/golang.org/x/net/internal/socket/socket.gobra @@ -42,11 +42,10 @@ type Message struct { NN int // # of bytes read or written from/to OOB Flags int // protocol-specific information on the received message - // (VerifiedSCION) the following are, morally, ghost fields: // is it still ok to read the Addr of the Message? - IsActive bool + ghost IsActive bool // do we have a fixed amount of perms to the Addr or a wildcard amount? - WildcardPerm bool + ghost WildcardPerm bool } pred (m *Message) Mem() { diff --git a/verification/dependencies/syscall/syscall_unix.gobra b/verification/dependencies/syscall/syscall_unix.gobra index c0b53a89a..9e46ace9a 100644 --- a/verification/dependencies/syscall/syscall_unix.gobra +++ b/verification/dependencies/syscall/syscall_unix.gobra @@ -54,7 +54,6 @@ pred (s *Errno) Mem() { acc(s) } -ghost preserves s.Mem() && s.CanSet(e) ensures s.Get() === e decreases diff --git a/verification/io/hopfields.gobra b/verification/io/hopfields.gobra index 735da0e78..d2f71864b 100644 --- a/verification/io/hopfields.gobra +++ b/verification/io/hopfields.gobra @@ -27,4 +27,4 @@ type IO_HF adt { EgIF2 option[IO_ifs] HVF IO_msgterm } -} +} \ No newline at end of file diff --git a/verification/io/io-spec.gobra b/verification/io/io-spec.gobra index c5bdfd790..de71c2e17 100644 --- a/verification/io/io-spec.gobra +++ b/verification/io/io-spec.gobra @@ -67,7 +67,8 @@ pure func establishGuardTraversedseg(currseg IO_seg3, direction bool) IO_seg3 { Peer: currseg.Peer, Past: currseg.Past, Future: currseg.Future, - History: currseg.History} + History: currseg.History, + } } // Establishes the traversed segment for packets that are incremented (external). @@ -85,7 +86,8 @@ pure func establishGuardTraversedsegInc(currseg IO_seg3, direction bool) IO_seg3 Peer: currseg.Peer, Past: seq[IO_HF]{currseg.Future[0]} ++ currseg.Past, Future: currseg.Future[1:], - History: seq[IO_ahi]{currseg.Future[0].Toab()} ++ currseg.History} + History: seq[IO_ahi]{currseg.Future[0].Toab()} ++ currseg.History, + } } /*** End of helper functions, not in Isabelle ***/ @@ -110,12 +112,12 @@ pure func (dp DataPlaneSpec) dp3s_iospec_bio3s_enter_guard(s IO_dp3s_state_local v.IO_Internal_val1_2, fut) && dp.dp3s_forward( - IO_pkt2( - IO_Packet2{ - traversedseg, - v.IO_Internal_val1_1.LeftSeg, - v.IO_Internal_val1_1.MidSeg, - v.IO_Internal_val1_1.RightSeg}), + IO_Packet2 { + traversedseg, + v.IO_Internal_val1_1.LeftSeg, + v.IO_Internal_val1_1.MidSeg, + v.IO_Internal_val1_1.RightSeg, + }, v.IO_Internal_val1_3, v.IO_Internal_val1_4) } @@ -173,7 +175,7 @@ pure func (dp DataPlaneSpec) dp3s_iospec_bio3s_xover_guard(s IO_dp3s_state_local currseg, nextseg, traversedseg, - IO_pkt2(IO_Packet2{nextseg, v.IO_Internal_val1_1.MidSeg, v.IO_Internal_val1_1.RightSeg, some(traversedseg)}), + IO_Packet2 { nextseg, v.IO_Internal_val1_1.MidSeg, v.IO_Internal_val1_1.RightSeg, some(traversedseg) }, hf1, hf2, nextfut, diff --git a/verification/io/other_defs.gobra b/verification/io/other_defs.gobra index d4eb1df8f..a5c48ccae 100644 --- a/verification/io/other_defs.gobra +++ b/verification/io/other_defs.gobra @@ -18,9 +18,7 @@ package io -type Unit adt { - Unit_{} -} +type Unit struct{} // interface IDs type IO_ifs uint16 diff --git a/verification/io/packets.gobra b/verification/io/packets.gobra index 4944fc21c..8047357bd 100644 --- a/verification/io/packets.gobra +++ b/verification/io/packets.gobra @@ -19,8 +19,9 @@ package io // pkt2 +// Here, we already instantiated the type params, instead of +// leaving them generic as done in Isabelle. type IO_pkt2 adt { - // Here, we already instantiated the type params IO_Packet2 { CurrSeg IO_seg3 LeftSeg option[IO_seg2] diff --git a/verification/io/router.gobra b/verification/io/router.gobra index db9cabaab..f1741bed5 100644 --- a/verification/io/router.gobra +++ b/verification/io/router.gobra @@ -96,7 +96,7 @@ pure func (dp DataPlaneSpec) is_target(asid IO_as, nextif IO_ifs, a2 IO_as, i2 I ghost decreases pure func dp3s_add_ibuf(s IO_dp3s_state_local, i option[IO_ifs], pkt IO_pkt3) IO_dp3s_state_local { - return IO_dp3s_state_local_{ + return IO_dp3s_state_local_ { ibuf: insert(s.ibuf, i, pkt), obuf: s.obuf, } @@ -105,7 +105,7 @@ pure func dp3s_add_ibuf(s IO_dp3s_state_local, i option[IO_ifs], pkt IO_pkt3) IO ghost decreases pure func dp3s_add_obuf(s IO_dp3s_state_local, i option[IO_ifs], pkt IO_pkt3) IO_dp3s_state_local { - return IO_dp3s_state_local_{ + return IO_dp3s_state_local_ { ibuf: s.ibuf, obuf: insert(s.obuf, i, pkt), } @@ -151,8 +151,6 @@ pure func (dp DataPlaneSpec) dp3s_forward_ext_xover(m IO_pkt3, newpkt IO_pkt3, n dp.is_target(dp.Asid(), nextif, a2, i2) } - -// TODO: should we change IO_ifs to being implemented as an option type? ghost requires len(m.CurrSeg.Future) > 0 requires dp.Valid() diff --git a/verification/io/router_events.gobra b/verification/io/router_events.gobra index c03f05fad..54822ddbe 100644 --- a/verification/io/router_events.gobra +++ b/verification/io/router_events.gobra @@ -67,7 +67,7 @@ requires dp.Asid() == asid decreases pure func (dp DataPlaneSpec) dp2_forward_ext_guard(asid IO_as, m IO_pkt2, nextif IO_ifs, currseg, traversedseg IO_seg2, newpkt IO_pkt2, fut seq[IO_HF], hf1 IO_HF) bool { return m.CurrSeg == currseg && - newpkt == IO_pkt2(IO_Packet2{traversedseg, m.LeftSeg, m.MidSeg, m.RightSeg}) && + newpkt == IO_Packet2{traversedseg, m.LeftSeg, m.MidSeg, m.RightSeg} && // The outgoing interface is correct: dp2_exit_interface(currseg.ConsDir, asid, hf1, nextif) && // Next validate the current hop field with the *original* UInfo field): diff --git a/verification/io/segments.gobra b/verification/io/segments.gobra index acaca602d..9b3248c1a 100644 --- a/verification/io/segments.gobra +++ b/verification/io/segments.gobra @@ -20,8 +20,9 @@ package io type IO_ainfo = uint +// Here, we already instantiated the type params, contrary to what +// is done in Isabelle, where they are left generic. type IO_seg2 adt { - // Here, we already instantiated the type params IO_seg3_ { AInfo IO_ainfo // nat in Isabelle UInfo set[IO_msgterm] diff --git a/verification/io/xover.gobra b/verification/io/xover.gobra index 8fff431fb..8f2d91c69 100644 --- a/verification/io/xover.gobra +++ b/verification/io/xover.gobra @@ -47,7 +47,7 @@ pure func (dp DataPlaneSpec) dp2_xover_guard(m IO_pkt2, return m.CurrSeg == currseg && m.LeftSeg == some(nextseg) && nextseg.History == seq[IO_ahi]{} && - newpkt == IO_pkt2(IO_Packet2{nextseg, m.MidSeg, m.RightSeg, some(traversedseg)}) && + newpkt == IO_Packet2{nextseg, m.MidSeg, m.RightSeg, some(traversedseg)} && currseg.Future == seq[IO_HF]{hf1} && nextseg.Future == seq[IO_HF]{hf2} ++ nextfut && dp.dp2_enter_interface(currseg.ConsDir, asid, hf1, recvif) && @@ -90,11 +90,11 @@ pure func (dp DataPlaneSpec) xover2_link_type(asid IO_as, hf1 IO_HF, hf2 IO_HF) ghost decreases pure func swap_if_dir2(hf IO_HF, d bool) IO_HF { - return IO_HF(IO_HF_{ - InIF2: d ? hf.InIF2 : hf.EgIF2, - EgIF2: d ? hf.EgIF2 : hf.InIF2, - HVF: hf.HVF, - }) + return IO_HF_ { + InIF2: d ? hf.InIF2 : hf.EgIF2, + EgIF2: d ? hf.EgIF2 : hf.InIF2, + HVF: hf.HVF, + } } ghost diff --git a/verification/utils/ghost_sync/ghost-mutex.gobra b/verification/utils/ghost_sync/ghost-mutex.gobra new file mode 100644 index 000000000..e0295f43d --- /dev/null +++ b/verification/utils/ghost_sync/ghost-mutex.gobra @@ -0,0 +1,60 @@ +// Copyright 2022 ETH Zurich +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +gobra + +package ghost_sync + +import "sync" +import . "verification/utils/definitions" + +// This package contains the definition of a Ghost Mutex (a.k.a. Ghost Lock), as described +// in https://arxiv.org/pdf/2311.14452. These Mutexes are used to provide a ghost-like +// interface to an invariant, for the duration of a an atomic operation. To use ghost +// mutexes soundly, we must ensure two properties: +// (1) All calls to Lock() must be acoompanied by a call to Unlock(). +// (2) All operations performed between a call to Lock() and the corresponding call to +// Unlock() must be atomic. +// Currently, Gobra does not check any of these two properties. Property (1) could be done +// by using obligations. + +type GhostMutex struct { + privateField PrivateField +} + +pred (m gpointer[GhostMutex]) LockP() +pred (m gpointer[GhostMutex]) UnlockP() + +ghost +requires acc(m.LockP(), _) +decreases _ +pure func (m gpointer[GhostMutex]) LockInv() pred() + +ghost +requires inv() && acc(m) && *m == GhostMutex{} +ensures m.LockP() && m.LockInv() == inv +decreases +func (m gpointer[GhostMutex]) SetInv(inv pred()) + +ghost +requires acc(m.LockP(), _) +ensures m.LockP() && m.UnlockP() && m.LockInv()() +decreases _ if sync.IgnoreBlockingForTermination() +func (m gpointer[GhostMutex]) Lock() + +ghost +requires acc(m.LockP(), _) && m.UnlockP() && m.LockInv()() +ensures m.LockP() +decreases _ +func (m gpointer[GhostMutex]) Unlock() From 57cdd3e0753e777fc3b2e86733e76960a43e323f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Sun, 26 May 2024 19:00:12 +0200 Subject: [PATCH 40/57] Fix ghostness of output params (#349) * fix ghostness of a few outparams * backup --- router/dataplane.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/router/dataplane.go b/router/dataplane.go index cefcb0cc0..27c190f24 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -1463,7 +1463,7 @@ func (p *scionPacketProcessor) reset() (err error) { // @ decreases 0 if sync.IgnoreBlockingForTermination() // @ #backend[moreJoins(1)] func (p *scionPacketProcessor) processPkt(rawPkt []byte, - srcAddr *net.UDPAddr /*@, ghost ioLock gpointer[gsync.GhostMutex], ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/) (respr processResult, reserr error /*@ , addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { + srcAddr *net.UDPAddr /*@, ghost ioLock gpointer[gsync.GhostMutex], ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/) (respr processResult, reserr error /*@ , ghost addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { if err := p.reset(); err != nil { // @ fold p.sInitD().validResult(processResult{}, false) @@ -1767,7 +1767,7 @@ func (p *scionPacketProcessor) processIntraBFD(data []byte) (res error) { // @ newAbsPkt.isIO_val_Unsupported // @ ensures (respr.OutPkt == nil) == (newAbsPkt == io.IO_val_Unit{}) // @ decreases 0 if sync.IgnoreBlockingForTermination() -func (p *scionPacketProcessor) processSCION( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int, ghost ioLock gpointer[gsync.GhostMutex], ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error /*@ , addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { +func (p *scionPacketProcessor) processSCION( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int, ghost ioLock gpointer[gsync.GhostMutex], ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error /*@ , ghost addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { var ok bool // @ unfold acc(p.scionLayer.Mem(ub), R20) @@ -1972,8 +1972,8 @@ func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte @*/ ) (respr proce // TODO(lukedirtwalker) parameter problem invalid path? return processResult{}, err } - // (VerifiedSCION) This assumption cannot be proven at this point. - // There might be a check is missing. + // (VerifiedSCION) This assumption will be dropped after clarifying + // https://github.com/scionproto/scion/issues/4531 // @ TemporaryAssumeForIO(p.path.CurrInfMatchesCurrHF(ubPath)) // @ p.EstablishEqAbsHeader(ub, startP, endP) // @ p.path.EstablishValidPktMetaHdr(ubPath) @@ -2555,7 +2555,7 @@ func (p *scionPacketProcessor) verifyCurrentMAC( /*@ ghost oldPkt io.IO_pkt2, gh // @ ensures reserr != nil && respr.OutPkt != nil ==> // @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases 0 if sync.IgnoreBlockingForTermination() -func (p *scionPacketProcessor) resolveInbound( /*@ ghost ubScionL []byte @*/ ) (resaddr *net.UDPAddr, respr processResult, reserr error /*@ , addrAliasesUb bool @*/) { +func (p *scionPacketProcessor) resolveInbound( /*@ ghost ubScionL []byte @*/ ) (resaddr *net.UDPAddr, respr processResult, reserr error /*@ , ghost addrAliasesUb bool @*/) { // (VerifiedSCION) the parameter used to be p.scionLayer, // instead of &p.scionLayer. a, err /*@ , addrAliases @*/ := p.d.resolveLocalDst(&p.scionLayer /*@, ubScionL @*/) @@ -2781,7 +2781,7 @@ func (p *scionPacketProcessor) ingressInterface( /*@ ghost ubPath []byte @*/ ) u // @ ensures p.EqAbsHopField(oldPkt) // @ ensures AbsEgressInterfaceConstraint(oldPkt, path.ifsToIO_ifs(egress)) // @ decreases -func (p *scionPacketProcessor) egressInterface( /*@ ghost oldPkt io.IO_pkt2 @*/ ) /*@ (egress @*/ uint16 /*@ ) @*/ { +func (p *scionPacketProcessor) egressInterface( /*@ ghost oldPkt io.IO_pkt2 @*/ ) (egress uint16) { // @ reveal p.EqAbsInfoField(oldPkt) // @ reveal p.EqAbsHopField(oldPkt) if p.infoField.ConsDir { @@ -3207,7 +3207,7 @@ func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte @*/ ) ( // @ ensures (respr.OutPkt == nil) == (newAbsPkt == io.IO_val_Unit{}) // @ decreases 0 if sync.IgnoreBlockingForTermination() // @ #backend[stateConsolidationMode(6)] -func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int, ghost ioLock gpointer[gsync.GhostMutex], ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error /*@, addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { +func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int, ghost ioLock gpointer[gsync.GhostMutex], ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error /*@, ghost addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { if r, err := p.parsePath( /*@ ub @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) return r, err /*@, false, absReturnErr(r) @*/ @@ -3425,7 +3425,7 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, // @ newAbsPkt == absIO_val(respr.OutPkt, respr.EgressID) && // @ newAbsPkt.isIO_val_Unsupported // @ decreases 0 if sync.IgnoreBlockingForTermination() -func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error /*@ , addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { +func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error /*@ , ghost addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { // @ ghost ubScionL := p.rawPkt // @ p.scionLayer.ExtractAcc(ubScionL) s := p.scionLayer @@ -3619,7 +3619,7 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // specs a lot easier and, makes the implementation faster as well by avoiding passing large data-structures // by value. We should consider porting merging this in upstream SCION. // @ decreases 0 if sync.IgnoreBlockingForTermination() -func (d *DataPlane) resolveLocalDst(s *slayers.SCION /*@, ghost ub []byte @*/) (resaddr *net.UDPAddr, reserr error /*@ , addrAliasesUb bool @*/) { +func (d *DataPlane) resolveLocalDst(s *slayers.SCION /*@, ghost ub []byte @*/) (resaddr *net.UDPAddr, reserr error /*@ , ghost addrAliasesUb bool @*/) { // @ ghost start, end := s.ExtractAcc(ub) // @ assert s.RawDstAddr === ub[start:end] // @ sl.SplitRange_Bytes(ub, start, end, R15) From 1c8991635ee2b3ed9224d666412b0adb9f28f662 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Tue, 4 Jun 2024 22:01:54 -0400 Subject: [PATCH 41/57] fix fmt (#351) --- router/dataplane.go | 176 +++++++++++++++++++++----------------------- 1 file changed, 82 insertions(+), 94 deletions(-) diff --git a/router/dataplane.go b/router/dataplane.go index 27c190f24..f77997d7e 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -3862,15 +3862,13 @@ func (p *scionPacketProcessor) prepareSCMP( return nil, serrors.WithCtx(cannotRoute, "details", "unsupported path type", "path type", pathType) } - /*@ - scionBuf := epicPath.GetUnderlyingScionPathBuf(ubPath) - unfold acc(epicPath.Mem(ubPath), R4) - assert ubPath[epic.MetadataLen:] === scionBuf - epicPathUb = ubPath - ubPath = scionBuf - startP += epic.MetadataLen - assert ubPath === ub[startP:endP] - @*/ + // @ scionBuf := epicPath.GetUnderlyingScionPathBuf(ubPath) + // @ unfold acc(epicPath.Mem(ubPath), R4) + // @ assert ubPath[epic.MetadataLen:] === scionBuf + // @ epicPathUb = ubPath + // @ ubPath = scionBuf + // @ startP += epic.MetadataLen + // @ assert ubPath === ub[startP:endP] path = epicPath.ScionPath // @ pathFromEpic = true default: @@ -3878,46 +3876,40 @@ func (p *scionPacketProcessor) prepareSCMP( return nil, serrors.WithCtx(cannotRoute, "details", "unsupported path type", "path type", pathType) } - /*@ - assert pathType == scion.PathType || pathType == epic.PathType - assert typeOf(p.scionLayer.Path) == type[*scion.Raw] || typeOf(p.scionLayer.Path) == type[*epic.Path] - assert !pathFromEpic ==> typeOf(p.scionLayer.Path) == type[*scion.Raw] - assert pathFromEpic ==> typeOf(p.scionLayer.Path) == type[*epic.Path] - sl.SplitRange_Bytes(ub, startP, endP, writePerm) - @*/ + // @ assert pathType == scion.PathType || pathType == epic.PathType + // @ assert typeOf(p.scionLayer.Path) == type[*scion.Raw] || typeOf(p.scionLayer.Path) == type[*epic.Path] + // @ assert !pathFromEpic ==> typeOf(p.scionLayer.Path) == type[*scion.Raw] + // @ assert pathFromEpic ==> typeOf(p.scionLayer.Path) == type[*epic.Path] + // @ sl.SplitRange_Bytes(ub, startP, endP, writePerm) decPath, err := path.ToDecoded( /*@ ubPath @*/ ) if err != nil { - /*@ - sl.CombineRange_Bytes(ub, startP, endP, writePerm) - ghost if pathFromEpic { - epicPath := p.scionLayer.Path.(*epic.Path) - assert acc(path.Mem(ubPath), R4) - fold acc(epicPath.Mem(epicPathUb), R4) - } else { - rawPath := p.scionLayer.Path.(*scion.Raw) - assert acc(path.Mem(ubPath), R4) - assert acc(rawPath.Mem(ubPath), R4) - } - fold acc(p.scionLayer.Mem(ub), R4) - @*/ + // @ sl.CombineRange_Bytes(ub, startP, endP, writePerm) + // @ ghost if pathFromEpic { + // @ epicPath := p.scionLayer.Path.(*epic.Path) + // @ assert acc(path.Mem(ubPath), R4) + // @ fold acc(epicPath.Mem(epicPathUb), R4) + // @ } else { + // @ rawPath := p.scionLayer.Path.(*scion.Raw) + // @ assert acc(path.Mem(ubPath), R4) + // @ assert acc(rawPath.Mem(ubPath), R4) + // @ } + // @ fold acc(p.scionLayer.Mem(ub), R4) return nil, serrors.Wrap(cannotRoute, err, "details", "decoding raw path") } // @ ghost rawPath := path.RawBufferMem(ubPath) revPathTmp, err := decPath.Reverse( /*@ rawPath @*/ ) if err != nil { - /*@ - sl.CombineRange_Bytes(ub, startP, endP, writePerm) - ghost if pathFromEpic { - epicPath := p.scionLayer.Path.(*epic.Path) - assert acc(path.Mem(ubPath), R4) - fold acc(epicPath.Mem(epicPathUb), R4) - } else { - rawPath := p.scionLayer.Path.(*scion.Raw) - assert acc(path.Mem(ubPath), R4) - assert acc(rawPath.Mem(ubPath), R4) - } - fold acc(p.scionLayer.Mem(ub), R4) - @*/ + // @ sl.CombineRange_Bytes(ub, startP, endP, writePerm) + // @ ghost if pathFromEpic { + // @ epicPath := p.scionLayer.Path.(*epic.Path) + // @ assert acc(path.Mem(ubPath), R4) + // @ fold acc(epicPath.Mem(epicPathUb), R4) + // @ } else { + // @ rawPath := p.scionLayer.Path.(*scion.Raw) + // @ assert acc(path.Mem(ubPath), R4) + // @ assert acc(rawPath.Mem(ubPath), R4) + // @ } + // @ fold acc(p.scionLayer.Mem(ub), R4) return nil, serrors.Wrap(cannotRoute, err, "details", "reversing path for SCMP") } // @ assert revPathTmp.Mem(rawPath) @@ -3927,19 +3919,17 @@ func (p *scionPacketProcessor) prepareSCMP( // Revert potential path segment switches that were done during processing. if revPath.IsXover( /*@ rawPath @*/ ) { if err := revPath.IncPath( /*@ rawPath @*/ ); err != nil { - /*@ - sl.CombineRange_Bytes(ub, startP, endP, writePerm) - ghost if pathFromEpic { - epicPath := p.scionLayer.Path.(*epic.Path) - assert acc(path.Mem(ubPath), R4) - fold acc(epicPath.Mem(epicPathUb), R4) - } else { - rawPath := p.scionLayer.Path.(*scion.Raw) - assert acc(path.Mem(ubPath), R4) - assert acc(rawPath.Mem(ubPath), R4) - } - fold acc(p.scionLayer.Mem(ub), R4) - @*/ + // @ sl.CombineRange_Bytes(ub, startP, endP, writePerm) + // @ ghost if pathFromEpic { + // @ epicPath := p.scionLayer.Path.(*epic.Path) + // @ assert acc(path.Mem(ubPath), R4) + // @ fold acc(epicPath.Mem(epicPathUb), R4) + // @ } else { + // @ rawPath := p.scionLayer.Path.(*scion.Raw) + // @ assert acc(path.Mem(ubPath), R4) + // @ assert acc(rawPath.Mem(ubPath), R4) + // @ } + // @ fold acc(p.scionLayer.Mem(ub), R4) return nil, serrors.Wrap(cannotRoute, err, "details", "reverting cross over for SCMP") } } @@ -3966,19 +3956,17 @@ func (p *scionPacketProcessor) prepareSCMP( // @ fold revPath.Mem(rawPath) // @ ) if err := revPath.IncPath( /*@ rawPath @*/ ); err != nil { - /*@ - sl.CombineRange_Bytes(ub, startP, endP, writePerm) - ghost if pathFromEpic { - epicPath := p.scionLayer.Path.(*epic.Path) - assert acc(path.Mem(ubPath), R4) - fold acc(epicPath.Mem(epicPathUb), R4) - } else { - rawPath := p.scionLayer.Path.(*scion.Raw) - assert acc(path.Mem(ubPath), R4) - assert acc(rawPath.Mem(ubPath), R4) - } - fold acc(p.scionLayer.Mem(ub), R4) - @*/ + // @ sl.CombineRange_Bytes(ub, startP, endP, writePerm) + // @ ghost if pathFromEpic { + // @ epicPath := p.scionLayer.Path.(*epic.Path) + // @ assert acc(path.Mem(ubPath), R4) + // @ fold acc(epicPath.Mem(epicPathUb), R4) + // @ } else { + // @ rawPath := p.scionLayer.Path.(*scion.Raw) + // @ assert acc(path.Mem(ubPath), R4) + // @ assert acc(rawPath.Mem(ubPath), R4) + // @ } + // @ fold acc(p.scionLayer.Mem(ub), R4) return nil, serrors.Wrap(cannotRoute, err, "details", "incrementing path for SCMP") } } @@ -4050,13 +4038,13 @@ func (p *scionPacketProcessor) prepareSCMP( // Returns the last decoded layer. // @ requires base != nil && base.NonInitMem() // @ requires forall i int :: { &opts[i] } 0 <= i && i < len(opts) ==> -// @ (acc(&opts[i], R10) && opts[i] != nil && opts[i].NonInitMem()) +// @ (acc(&opts[i], R10) && opts[i] != nil && opts[i].NonInitMem()) // Due to Viper's very strict injectivity constraints: // @ requires forall i, j int :: { &opts[i], &opts[j] } 0 <= i && i < j && j < len(opts) ==> -// @ opts[i] !== opts[j] +// @ opts[i] !== opts[j] // @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R39) // @ ensures forall i int :: { &opts[i] } 0 <= i && i < len(opts) ==> -// @ (acc(&opts[i], R10) && opts[i] != nil) +// @ (acc(&opts[i], R10) && opts[i] != nil) // @ ensures -1 <= idx && idx < len(opts) // @ ensures len(processed) == len(opts) // @ ensures len(offsets) == len(opts) @@ -4069,13 +4057,13 @@ func (p *scionPacketProcessor) prepareSCMP( // @ base.EqAbsHeader(data) && base.ValidScionInitSpec(data) // @ ensures reterr == nil ==> base.EqPathType(data) // @ ensures forall i int :: {&opts[i]}{processed[i]} 0 <= i && i < len(opts) ==> -// @ (processed[i] ==> (0 <= offsets[i].start && offsets[i].start <= offsets[i].end && offsets[i].end <= len(data))) +// @ (processed[i] ==> (0 <= offsets[i].start && offsets[i].start <= offsets[i].end && offsets[i].end <= len(data))) // @ ensures reterr == nil ==> forall i int :: {&opts[i]}{processed[i]} 0 <= i && i < len(opts) ==> -// @ ((processed[i] && !offsets[i].isNil) ==> opts[i].Mem(data[offsets[i].start:offsets[i].end])) +// @ ((processed[i] && !offsets[i].isNil) ==> opts[i].Mem(data[offsets[i].start:offsets[i].end])) // @ ensures reterr == nil ==> forall i int :: {&opts[i]}{processed[i]} 0 <= i && i < len(opts) ==> -// @ ((processed[i] && offsets[i].isNil) ==> opts[i].Mem(nil)) +// @ ((processed[i] && offsets[i].isNil) ==> opts[i].Mem(nil)) // @ ensures reterr == nil ==> forall i int :: {&opts[i]}{processed[i]} 0 <= i && i < len(opts) ==> -// @ (!processed[i] ==> opts[i].NonInitMem()) +// @ (!processed[i] ==> opts[i].NonInitMem()) // @ ensures reterr != nil ==> base.NonInitMem() // @ ensures reterr != nil ==> (forall i int :: { &opts[i] } 0 <= i && i < len(opts) ==> opts[i].NonInitMem()) // @ ensures reterr != nil ==> reterr.ErrorMem() @@ -4114,15 +4102,15 @@ func decodeLayers(data []byte, base *slayers.SCION, // @ invariant idx == -1 ==> (last === base && oldStart == 0 && oldEnd == len(oldData)) // @ invariant 0 <= idx ==> (processed[idx] && last === opts[idx]) // @ invariant forall i int :: {&opts[i]}{processed[i]} 0 <= i && i < len(opts) ==> - // @ (processed[i] ==> (0 <= offsets[i].start && offsets[i].start <= offsets[i].end && offsets[i].end <= len(data))) + // @ (processed[i] ==> (0 <= offsets[i].start && offsets[i].start <= offsets[i].end && offsets[i].end <= len(data))) // @ invariant forall i int :: {&opts[i]}{processed[i]} 0 <= i && i < len(opts) ==> - // @ ((processed[i] && !offsets[i].isNil) ==> opts[i].Mem(oldData[offsets[i].start:offsets[i].end])) + // @ ((processed[i] && !offsets[i].isNil) ==> opts[i].Mem(oldData[offsets[i].start:offsets[i].end])) // @ invariant forall i int :: {&opts[i]}{processed[i]} 0 <= i && i < len(opts) ==> - // @ ((processed[i] && offsets[i].isNil) ==> opts[i].Mem(nil)) + // @ ((processed[i] && offsets[i].isNil) ==> opts[i].Mem(nil)) // @ invariant forall i int :: {&opts[i]}{processed[i]} 0 < len(opts) && i0 <= i && i < len(opts) ==> - // @ !processed[i] + // @ !processed[i] // @ invariant forall i int :: {&opts[i]}{processed[i]} 0 <= i && i < len(opts) ==> - // @ (!processed[i] ==> opts[i].NonInitMem()) + // @ (!processed[i] ==> opts[i].NonInitMem()) // @ invariant gopacket.NilDecodeFeedback.Mem() // @ invariant 0 <= oldStart && oldStart <= oldEnd && oldEnd <= len(oldData) // @ decreases len(opts) - i0 @@ -4132,11 +4120,11 @@ func decodeLayers(data []byte, base *slayers.SCION, // @ ghost var pos offsetPair // @ ghost var ub []byte // @ ghost if idx == -1 { - // @ pos = offsetPair{0, len(oldData), false} - // @ ub = oldData + // @ pos = offsetPair{0, len(oldData), false} + // @ ub = oldData // @ } else { - // @ pos = offsets[idx] - // @ if pos.isNil { ub = nil } else { ub = oldData[pos.start:pos.end] } + // @ pos = offsets[idx] + // @ if pos.isNil { ub = nil } else { ub = oldData[pos.start:pos.end] } // @ } if layerClassTmp.Contains(last.NextLayerType( /*@ ub @*/ )) { data /*@ , start, end @*/ := last.LayerPayload( /*@ ub @*/ ) @@ -4146,7 +4134,7 @@ func decodeLayers(data []byte, base *slayers.SCION, // @ ghost if data == nil { // @ sl.NilAcc_Bytes() // @ } else { - // @ sl.SplitRange_Bytes(oldData, oldStart, oldEnd, R40) + // @ sl.SplitRange_Bytes(oldData, oldStart, oldEnd, R40) // @ } if err := opt.DecodeFromBytes(data, gopacket.NilDecodeFeedback); err != nil { // @ ghost if data != nil { sl.CombineRange_Bytes(oldData, oldStart, oldEnd, R40) } @@ -4161,22 +4149,22 @@ func decodeLayers(data []byte, base *slayers.SCION, // @ invariant forall i, j int :: {&opts[i], &opts[j]} 0 <= i && i < j && j < len(opts) ==> opts[i] !== opts[j] // @ invariant forall i int :: {&opts[i]} 0 <= i && i < len(opts) ==> opts[i] != nil // @ invariant forall i int :: {&opts[i]}{processed[i]} 0 <= i && i < len(opts) ==> - // @ (processed[i] ==> (0 <= offsets[i].start && offsets[i].start <= offsets[i].end && offsets[i].end <= len(oldData))) + // @ (processed[i] ==> (0 <= offsets[i].start && offsets[i].start <= offsets[i].end && offsets[i].end <= len(oldData))) // @ invariant forall i int :: {&opts[i]}{processed[i]} 0 <= i && i < len(opts) ==> - // @ ((processed[i] && !offsets[i].isNil) ==> opts[i].Mem(oldData[offsets[i].start:offsets[i].end])) + // @ ((processed[i] && !offsets[i].isNil) ==> opts[i].Mem(oldData[offsets[i].start:offsets[i].end])) // @ invariant forall i int :: {&opts[i]}{processed[i]} 0 <= i && i < len(opts) ==> - // @ ((processed[i] && offsets[i].isNil) ==> opts[i].Mem(nil)) + // @ ((processed[i] && offsets[i].isNil) ==> opts[i].Mem(nil)) // @ invariant forall i int :: {&opts[i]}{processed[i]} 0 <= i && i < len(opts) ==> - // @ (!processed[i] ==> opts[i].NonInitMem()) + // @ (!processed[i] ==> opts[i].NonInitMem()) // @ invariant forall i int :: {&opts[i]}{processed[i]} 0 < len(opts) && c < i && i < len(opts) ==> - // @ !processed[i] + // @ !processed[i] // @ decreases c // @ for c := i0-1; 0 <= c; c=c-1 { - // @ if processed[c] { - // @ off := offsets[c] - // @ if off.isNil { + // @ if processed[c] { + // @ off := offsets[c] + // @ if off.isNil { // @ opts[c].DowngradePerm(nil) - // @ } else { + // @ } else { // @ opts[c].DowngradePerm(oldData[off.start:off.end]) // @ } // @ } From fe87c9ec642237a3baf84ee26f7eaf5179f5db50 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Wed, 12 Jun 2024 14:48:14 -0400 Subject: [PATCH 42/57] big clean-up (#354) --- pkg/addr/host.go | 32 +-- pkg/addr/host_spec.gobra | 4 +- pkg/experimental/epic/epic.go | 56 ++--- pkg/experimental/epic/epic_spec.gobra | 2 +- pkg/scrypto/scrypto_spec.gobra | 6 +- pkg/slayers/extn.go | 52 ++-- pkg/slayers/extn_spec.gobra | 6 +- pkg/slayers/path/epic/epic.go | 92 +++---- pkg/slayers/path/epic/epic_spec.gobra | 6 +- pkg/slayers/path/hopfield.go | 22 +- pkg/slayers/path/hopfield_spec.gobra | 6 +- pkg/slayers/path/infofield.go | 12 +- pkg/slayers/path/infofield_spec.gobra | 20 +- pkg/slayers/path/mac.go | 24 +- pkg/slayers/path/onehop/onehop.go | 40 +-- pkg/slayers/path/path.go | 24 +- pkg/slayers/path/path_spec_test.gobra | 4 +- pkg/slayers/path/scion/base.go | 14 +- pkg/slayers/path/scion/base_spec.gobra | 20 +- pkg/slayers/path/scion/decoded.go | 30 +-- .../path/scion/decoded_spec_test.gobra | 4 +- pkg/slayers/path/scion/raw.go | 94 +++---- pkg/slayers/path/scion/raw_spec.gobra | 171 +++---------- pkg/slayers/path/scion/raw_spec_test.gobra | 4 +- pkg/slayers/path/scion/widen-lemma.gobra | 44 ++-- pkg/slayers/scion.go | 206 +++++++-------- pkg/slayers/scion_spec.gobra | 70 +++--- pkg/slayers/scion_test.gobra | 12 +- pkg/slayers/scmp.go | 62 ++--- pkg/slayers/scmp_msg.go | 236 +++++++++--------- pkg/slayers/scmp_typecode.go | 8 +- private/topology/linktype.go | 38 +-- private/underlay/conn/conn.go | 20 +- private/underlay/conn/conn_spec.gobra | 14 +- router/bfd_spec.gobra | 2 +- router/dataplane.go | 210 ++++++++-------- router/dataplane_spec.gobra | 14 +- router/dataplane_spec_test.gobra | 2 +- router/io-spec-lemmas.gobra | 62 ++--- router/io-spec.gobra | 10 +- router/widen-lemma.gobra | 56 ++--- verification/dependencies/bytes/bytes.gobra | 4 +- .../dependencies/crypto/aes/cipher.gobra | 2 +- .../dependencies/crypto/cipher/cbc.gobra | 4 +- .../dependencies/crypto/cipher/cipher.gobra | 20 +- .../crypto/subtle/constant_time.gobra | 10 +- .../github.com/google/gopacket/decode.gobra | 4 +- .../github.com/google/gopacket/flows.gobra | 8 +- .../google/gopacket/layers/base.gobra | 22 +- .../google/gopacket/layers/bfd.gobra | 6 +- .../google/gopacket/layertype.gobra | 4 +- .../github.com/google/gopacket/packet.gobra | 6 +- .../github.com/google/gopacket/parser.gobra | 6 +- .../github.com/google/gopacket/writer.gobra | 12 +- .../x/net/internal/socket/socket.gobra | 4 +- .../x/net/internal/socket/socket_test.gobra | 6 +- verification/dependencies/net/ip.gobra | 6 +- verification/dependencies/net/net.gobra | 10 +- verification/dependencies/net/udpsock.gobra | 12 +- verification/utils/slices/slices.gobra | 156 +++--------- .../utils/slices/slices_contents.gobra | 137 ---------- verification/utils/slices/slices_test.gobra | 28 +-- 62 files changed, 967 insertions(+), 1311 deletions(-) delete mode 100644 verification/utils/slices/slices_contents.gobra diff --git a/pkg/addr/host.go b/pkg/addr/host.go index 067b348a5..8efeb76aa 100644 --- a/pkg/addr/host.go +++ b/pkg/addr/host.go @@ -28,7 +28,7 @@ import ( "github.com/scionproto/scion/pkg/private/serrors" //@ . "github.com/scionproto/scion/verification/utils/definitions" - //@ "github.com/scionproto/scion/verification/utils/slices" + //@ sl "github.com/scionproto/scion/verification/utils/slices" ) type HostAddrType uint8 @@ -196,7 +196,7 @@ func (h HostIPv4) Pack() (res []byte) { func (h HostIPv4) IP() (res net.IP) { // XXX(kormat): ensure the reply is the 4-byte representation. //@ unfold acc(h.Mem(), R13) - //@ unfold acc(slices.AbsSlice_Bytes(h, 0, len(h)), R13) + //@ unfold acc(sl.Bytes(h, 0, len(h)), R13) return net.IP(h).To4( /*@ false @*/ ) } @@ -205,10 +205,10 @@ func (h HostIPv4) IP() (res net.IP) { // @ decreases func (h HostIPv4) Copy() (res HostAddr) { //@ unfold acc(h.Mem(), R13) - //@ unfold acc(slices.AbsSlice_Bytes(h, 0, len(h)), R13) + //@ unfold acc(sl.Bytes(h, 0, len(h)), R13) var tmp HostIPv4 = HostIPv4(append( /*@ R13, @*/ net.IP(nil), h...)) - //@ fold acc(slices.AbsSlice_Bytes(h, 0, len(h)), R13) - //@ fold slices.AbsSlice_Bytes(tmp, 0, len(tmp)) + //@ fold acc(sl.Bytes(h, 0, len(h)), R13) + //@ fold sl.Bytes(tmp, 0, len(tmp)) //@ fold acc(h.Mem(), R13) //@ fold tmp.Mem() return tmp @@ -231,7 +231,7 @@ func (h HostIPv4) Equal(o HostAddr) bool { func (h HostIPv4) String() string { //@ assert unfolding acc(h.Mem(), R13) in len(h) == HostLenIPv4 //@ ghost defer fold acc(h.Mem(), R13) - //@ ghost defer fold acc(slices.AbsSlice_Bytes(h, 0, len(h)), R13) + //@ ghost defer fold acc(sl.Bytes(h, 0, len(h)), R13) return h.IP().String() } @@ -254,7 +254,7 @@ func (h HostIPv6) Type() HostAddrType { // @ decreases func (h HostIPv6) Pack() (res []byte) { //@ unfold acc(h.Mem(), R13) - //@ unfold acc(slices.AbsSlice_Bytes(h, 0, len(h)), R13) + //@ unfold acc(sl.Bytes(h, 0, len(h)), R13) return []byte(h)[:HostLenIPv6] } @@ -264,7 +264,7 @@ func (h HostIPv6) Pack() (res []byte) { // @ decreases func (h HostIPv6) IP() (res net.IP) { //@ unfold acc(h.Mem(), R13) - //@ unfold acc(slices.AbsSlice_Bytes(h, 0, len(h)), R13) + //@ unfold acc(sl.Bytes(h, 0, len(h)), R13) return net.IP(h) } @@ -273,10 +273,10 @@ func (h HostIPv6) IP() (res net.IP) { // @ decreases func (h HostIPv6) Copy() (res HostAddr) { //@ unfold acc(h.Mem(), R13) - //@ unfold acc(slices.AbsSlice_Bytes(h, 0, len(h)), R13) + //@ unfold acc(sl.Bytes(h, 0, len(h)), R13) var tmp HostIPv6 = HostIPv6(append( /*@ R13, @*/ net.IP(nil), h...)) - //@ fold acc(slices.AbsSlice_Bytes(h, 0, len(h)), R13) - //@ fold slices.AbsSlice_Bytes(tmp, 0, len(tmp)) + //@ fold acc(sl.Bytes(h, 0, len(h)), R13) + //@ fold sl.Bytes(tmp, 0, len(tmp)) //@ fold acc(h.Mem(), R13) //@ fold tmp.Mem() return tmp @@ -299,7 +299,7 @@ func (h HostIPv6) Equal(o HostAddr) bool { func (h HostIPv6) String() string { //@ assert unfolding acc(h.Mem(), R13) in len(h) == HostLenIPv6 //@ ghost defer fold acc(h.Mem(), R13) - //@ ghost defer fold acc(slices.AbsSlice_Bytes(h, 0, len(h)), R13) + //@ ghost defer fold acc(sl.Bytes(h, 0, len(h)), R13) return h.IP().String() } @@ -442,7 +442,7 @@ func HostFromRaw(b []byte, htype HostAddrType) (res HostAddr, err error) { } //@ assert forall i int :: { &b[:HostLenIPv4][i] } 0 <= i && i < len(b[:HostLenIPv4]) ==> &b[:HostLenIPv4][i] == &b[i] tmp := HostIPv4(b[:HostLenIPv4]) - //@ fold slices.AbsSlice_Bytes(tmp, 0, len(tmp)) + //@ fold sl.Bytes(tmp, 0, len(tmp)) //@ fold tmp.Mem() return tmp, nil case HostTypeIPv6: @@ -451,7 +451,7 @@ func HostFromRaw(b []byte, htype HostAddrType) (res HostAddr, err error) { } //@ assert forall i int :: { &b[:HostLenIPv4][i] } 0 <= i && i < len(b[:HostLenIPv4]) ==> &b[:HostLenIPv4][i] == &b[i] tmp := HostIPv6(b[:HostLenIPv6]) - //@ fold slices.AbsSlice_Bytes(tmp, 0, len(tmp)) + //@ fold sl.Bytes(tmp, 0, len(tmp)) //@ fold tmp.Mem() return tmp, nil case HostTypeSVC: @@ -473,12 +473,12 @@ func HostFromRaw(b []byte, htype HostAddrType) (res HostAddr, err error) { func HostFromIP(ip net.IP) (res HostAddr) { if ip4 := ip.To4( /*@ false @*/ ); ip4 != nil { tmp := HostIPv4(ip4) - //@ fold slices.AbsSlice_Bytes(tmp, 0, len(tmp)) + //@ fold sl.Bytes(tmp, 0, len(tmp)) //@ fold tmp.Mem() return tmp } tmp := HostIPv6(ip) - //@ fold slices.AbsSlice_Bytes(tmp, 0, len(tmp)) + //@ fold sl.Bytes(tmp, 0, len(tmp)) //@ fold tmp.Mem() return tmp } diff --git a/pkg/addr/host_spec.gobra b/pkg/addr/host_spec.gobra index 16364a6c5..ed6e9032f 100644 --- a/pkg/addr/host_spec.gobra +++ b/pkg/addr/host_spec.gobra @@ -28,14 +28,14 @@ HostNone implements HostAddr pred (h HostIPv4) Mem() { len(h) == HostLenIPv4 && - slices.AbsSlice_Bytes(h, 0, len(h)) + slices.Bytes(h, 0, len(h)) } HostIPv4 implements HostAddr pred (h HostIPv6) Mem() { len(h) == HostLenIPv6 && - slices.AbsSlice_Bytes(h, 0, len(h)) + slices.Bytes(h, 0, len(h)) } HostIPv6 implements HostAddr diff --git a/pkg/experimental/epic/epic.go b/pkg/experimental/epic/epic.go index 1eee3d3c5..6c8cb443e 100644 --- a/pkg/experimental/epic/epic.go +++ b/pkg/experimental/epic/epic.go @@ -51,7 +51,7 @@ var zeroInitVector /*@@@*/ [16]byte /*@ // ghost init func init() { - fold acc(sl.AbsSlice_Bytes(zeroInitVector[:], 0, len(zeroInitVector[:])), _) + fold acc(sl.Bytes(zeroInitVector[:], 0, len(zeroInitVector[:])), _) fold acc(postInitInvariant(), _) } @*/ @@ -108,14 +108,14 @@ func VerifyTimestamp(timestamp time.Time, epicTS uint32, now time.Time) (err err // EPIC MAC may get overwritten. Only the most recently returned EPIC MAC is guaranteed to be // valid. // @ requires len(auth) == 16 -// @ requires sl.AbsSlice_Bytes(buffer, 0, len(buffer)) +// @ requires sl.Bytes(buffer, 0, len(buffer)) // @ preserves acc(s.Mem(ub), R20) -// @ preserves acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R20) -// @ preserves acc(sl.AbsSlice_Bytes(auth, 0, len(auth)), R30) -// @ ensures reserr == nil ==> sl.AbsSlice_Bytes(res, 0, len(res)) -// @ ensures reserr == nil ==> (sl.AbsSlice_Bytes(res, 0, len(res)) --* sl.AbsSlice_Bytes(buffer, 0, len(buffer))) +// @ preserves acc(sl.Bytes(ub, 0, len(ub)), R20) +// @ preserves acc(sl.Bytes(auth, 0, len(auth)), R30) +// @ ensures reserr == nil ==> sl.Bytes(res, 0, len(res)) +// @ ensures reserr == nil ==> (sl.Bytes(res, 0, len(res)) --* sl.Bytes(buffer, 0, len(buffer))) // @ ensures reserr != nil ==> reserr.ErrorMem() -// @ ensures reserr != nil ==> sl.AbsSlice_Bytes(buffer, 0, len(buffer)) +// @ ensures reserr != nil ==> sl.Bytes(buffer, 0, len(buffer)) // @ decreases func CalcMac(auth []byte, pktID epic.PktID, s *slayers.SCION, timestamp uint32, buffer []byte /*@ , ghost ub []byte @*/) (res []byte, reserr error) { @@ -124,7 +124,7 @@ func CalcMac(auth []byte, pktID epic.PktID, s *slayers.SCION, // @ ghost allocatesNewBuffer := len(buffer) < MACBufferSize if len(buffer) < MACBufferSize { buffer = make([]byte, MACBufferSize) - // @ fold sl.AbsSlice_Bytes(buffer, 0, len(buffer)) + // @ fold sl.Bytes(buffer, 0, len(buffer)) } // Initialize cryptographic MAC function @@ -147,14 +147,14 @@ func CalcMac(auth []byte, pktID epic.PktID, s *slayers.SCION, // @ ghost end := start + 4 result := input[len(input)-f.BlockSize() : len(input)-f.BlockSize()+4] // @ sl.SplitRange_Bytes(input, start, end, writePerm) - // @ package (sl.AbsSlice_Bytes(result, 0, len(result)) --* sl.AbsSlice_Bytes(oldBuffer, 0, len(oldBuffer))) { + // @ package (sl.Bytes(result, 0, len(result)) --* sl.Bytes(oldBuffer, 0, len(oldBuffer))) { // @ ghost if !allocatesNewBuffer { // @ assert oldBuffer === buffer // @ sl.CombineRange_Bytes(input, start, end, writePerm) // @ sl.CombineRange_Bytes(oldBuffer, 0, inputLength, writePerm) // @ } // @ } - // @ assert (sl.AbsSlice_Bytes(result, 0, len(result)) --* sl.AbsSlice_Bytes(oldBuffer, 0, len(oldBuffer))) + // @ assert (sl.Bytes(result, 0, len(result)) --* sl.Bytes(oldBuffer, 0, len(oldBuffer))) return result, nil } @@ -163,11 +163,11 @@ func CalcMac(auth []byte, pktID epic.PktID, s *slayers.SCION, // bytes of the SCION path type MAC, has invalid length, or if the MAC calculation gives an error, // also VerifyHVF returns an error. The verification was successful if and only if VerifyHVF // returns nil. -// @ preserves sl.AbsSlice_Bytes(buffer, 0, len(buffer)) +// @ preserves sl.Bytes(buffer, 0, len(buffer)) // @ preserves acc(s.Mem(ub), R20) -// @ preserves acc(sl.AbsSlice_Bytes(hvf, 0, len(hvf)), R50) -// @ preserves acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R20) -// @ preserves acc(sl.AbsSlice_Bytes(auth, 0, len(auth)), R30) +// @ preserves acc(sl.Bytes(hvf, 0, len(hvf)), R50) +// @ preserves acc(sl.Bytes(ub, 0, len(ub)), R20) +// @ preserves acc(sl.Bytes(auth, 0, len(auth)), R30) // @ ensures reserr != nil ==> reserr.ErrorMem() // @ decreases func VerifyHVF(auth []byte, pktID epic.PktID, s *slayers.SCION, @@ -183,11 +183,11 @@ func VerifyHVF(auth []byte, pktID epic.PktID, s *slayers.SCION, } if subtle.ConstantTimeCompare(hvf, mac) == 0 { - // @ apply sl.AbsSlice_Bytes(mac, 0, len(mac)) --* sl.AbsSlice_Bytes(buffer, 0, len(buffer)) + // @ apply sl.Bytes(mac, 0, len(mac)) --* sl.Bytes(buffer, 0, len(buffer)) return serrors.New("epic hop validation field verification failed", "hvf in packet", hvf, "calculated mac", mac, "auth", auth) } - // @ apply sl.AbsSlice_Bytes(mac, 0, len(mac)) --* sl.AbsSlice_Bytes(buffer, 0, len(buffer)) + // @ apply sl.Bytes(mac, 0, len(mac)) --* sl.Bytes(buffer, 0, len(buffer)) return nil } @@ -206,7 +206,7 @@ func CoreFromPktCounter(counter uint32) (uint8, uint32) { } // @ requires len(key) == 16 -// @ preserves acc(sl.AbsSlice_Bytes(key, 0, len(key)), R50) +// @ preserves acc(sl.Bytes(key, 0, len(key)), R50) // @ ensures reserr == nil ==> res != nil && res.Mem() && res.BlockSize() == 16 // @ ensures reserr != nil ==> reserr.ErrorMem() // @ decreases @@ -225,8 +225,8 @@ func initEpicMac(key []byte) (res cipher.BlockMode, reserr error) { // @ requires MACBufferSize <= len(inputBuffer) // @ preserves acc(s.Mem(ub), R20) -// @ preserves acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R20) -// @ preserves sl.AbsSlice_Bytes(inputBuffer, 0, len(inputBuffer)) +// @ preserves acc(sl.Bytes(ub, 0, len(ub)), R20) +// @ preserves sl.Bytes(inputBuffer, 0, len(inputBuffer)) // @ ensures reserr == nil ==> 16 <= res && res <= len(inputBuffer) // @ ensures reserr != nil ==> reserr.ErrorMem() // @ decreases @@ -265,7 +265,7 @@ func prepareMacInput(pktID epic.PktID, s *slayers.SCION, timestamp uint32, inputLength := 16 * nrBlocks // Fill input - // @ unfold sl.AbsSlice_Bytes(inputBuffer, 0, len(inputBuffer)) + // @ unfold sl.Bytes(inputBuffer, 0, len(inputBuffer)) offset := 0 inputBuffer[0] = uint8(s.SrcAddrType & 0x3) // extract length bits offset += 1 @@ -273,12 +273,12 @@ func prepareMacInput(pktID epic.PktID, s *slayers.SCION, timestamp uint32, // @ &inputBuffer[offset:][i] == &inputBuffer[offset+i] binary.BigEndian.PutUint32(inputBuffer[offset:], timestamp) offset += 4 - // @ fold sl.AbsSlice_Bytes(inputBuffer, 0, len(inputBuffer)) + // @ fold sl.Bytes(inputBuffer, 0, len(inputBuffer)) // @ sl.SplitRange_Bytes(inputBuffer, offset, len(inputBuffer), writePerm) pktID.SerializeTo(inputBuffer[offset:]) // @ sl.CombineRange_Bytes(inputBuffer, offset, len(inputBuffer), writePerm) offset += epic.PktIDLen - // @ unfold sl.AbsSlice_Bytes(inputBuffer, 0, len(inputBuffer)) + // @ unfold sl.Bytes(inputBuffer, 0, len(inputBuffer)) // @ assert forall i int :: { &inputBuffer[offset:][i] } 0 <= i && i < len(inputBuffer[offset:]) ==> // @ &inputBuffer[offset:][i] == &inputBuffer[offset+i] binary.BigEndian.PutUint64(inputBuffer[offset:], uint64(s.SrcIA)) @@ -286,9 +286,9 @@ func prepareMacInput(pktID epic.PktID, s *slayers.SCION, timestamp uint32, // @ assert forall i int :: { &inputBuffer[offset:][i] } 0 <= i && i < len(inputBuffer[offset:]) ==> // @ &inputBuffer[offset:][i] == &inputBuffer[offset+i] // @ sl.SplitRange_Bytes(ub, start, end, R20) - // @ unfold acc(sl.AbsSlice_Bytes(srcAddr, 0, len(srcAddr)), R20) + // @ unfold acc(sl.Bytes(srcAddr, 0, len(srcAddr)), R20) copy(inputBuffer[offset:], srcAddr /*@ , R20 @*/) - // @ fold acc(sl.AbsSlice_Bytes(srcAddr, 0, len(srcAddr)), R20) + // @ fold acc(sl.Bytes(srcAddr, 0, len(srcAddr)), R20) // @ sl.CombineRange_Bytes(ub, start, end, R20) offset += l // @ assert forall i int :: { &inputBuffer[offset:][i] } 0 <= i && i < len(inputBuffer[offset:]) ==> @@ -304,15 +304,15 @@ func prepareMacInput(pktID epic.PktID, s *slayers.SCION, timestamp uint32, // @ acc(&inputBuffer[offset:inputLength][i]) // @ establishPostInitInvariant() // @ unfold acc(postInitInvariant(), _) - // @ assert acc(sl.AbsSlice_Bytes(zeroInitVector[:], 0, 16), _) + // @ assert acc(sl.Bytes(zeroInitVector[:], 0, 16), _) // (VerifiedSCION) From the package invariant, we learn that we have a wildcard access to zeroInitVector. // Unfortunately, it is not possible to call `copy` with a wildcard amount, even though // that would be perfectly fine. The spec of `copy` would need to be adapted to allow for that case. - // @ inhale acc(sl.AbsSlice_Bytes(zeroInitVector[:], 0, len(zeroInitVector[:])), R55) - // @ unfold acc(sl.AbsSlice_Bytes(zeroInitVector[:], 0, len(zeroInitVector[:])), R55) + // @ inhale acc(sl.Bytes(zeroInitVector[:], 0, len(zeroInitVector[:])), R55) + // @ unfold acc(sl.Bytes(zeroInitVector[:], 0, len(zeroInitVector[:])), R55) // @ assert forall i int :: { &zeroInitVector[:][i] } 0 <= i && i < len(zeroInitVector[:]) ==> // @ &zeroInitVector[:][i] == &zeroInitVector[i] copy(inputBuffer[offset:inputLength], zeroInitVector[:] /*@ , R55 @*/) - // @ fold sl.AbsSlice_Bytes(inputBuffer, 0, len(inputBuffer)) + // @ fold sl.Bytes(inputBuffer, 0, len(inputBuffer)) return inputLength, nil } diff --git a/pkg/experimental/epic/epic_spec.gobra b/pkg/experimental/epic/epic_spec.gobra index b0eae8f15..7a8fdc0ed 100644 --- a/pkg/experimental/epic/epic_spec.gobra +++ b/pkg/experimental/epic/epic_spec.gobra @@ -21,7 +21,7 @@ import sl "github.com/scionproto/scion/verification/utils/slices" pred postInitInvariant() { acc(&zeroInitVector, _) && len(zeroInitVector[:]) == 16 && - acc(sl.AbsSlice_Bytes(zeroInitVector[:], 0, len(zeroInitVector[:])), _) + acc(sl.Bytes(zeroInitVector[:], 0, len(zeroInitVector[:])), _) } // learn the invariant established by init diff --git a/pkg/scrypto/scrypto_spec.gobra b/pkg/scrypto/scrypto_spec.gobra index 803e0d146..9244f498b 100644 --- a/pkg/scrypto/scrypto_spec.gobra +++ b/pkg/scrypto/scrypto_spec.gobra @@ -20,18 +20,18 @@ package scrypto import "hash" -import "github.com/scionproto/scion/verification/utils/slices" +import sl "github.com/scionproto/scion/verification/utils/slices" // The error returned by initMac is produced deterministically depending on the key. // If an initial call to initmac succeeds with a key, then any subsequent // calls to it will also succeed. This behaviour is abstracted using this // ghost function. ghost -requires acc(slices.AbsSlice_Bytes(key, 0, len(key)), _) +requires acc(sl.Bytes(key, 0, len(key)), _) decreases _ pure func ValidKeyForHash(key []byte) bool -preserves acc(slices.AbsSlice_Bytes(key, 0, len(key)), _) +preserves acc(sl.Bytes(key, 0, len(key)), _) ensures old(ValidKeyForHash(key)) ==> e == nil ensures e == nil ==> (h != nil && h.Mem() && ValidKeyForHash(key)) ensures e != nil ==> e.ErrorMem() diff --git a/pkg/slayers/extn.go b/pkg/slayers/extn.go index 08a7701e7..951771c4f 100644 --- a/pkg/slayers/extn.go +++ b/pkg/slayers/extn.go @@ -70,16 +70,16 @@ func (o *tlvOption) length(fixLengths bool) (res int) { // @ requires 2 <= len(data) // @ preserves acc(o) -// @ preserves acc(sl.AbsSlice_Bytes(o.OptData, 0, len(o.OptData)), R20) -// @ preserves sl.AbsSlice_Bytes(data, 0, len(data)) +// @ preserves acc(sl.Bytes(o.OptData, 0, len(o.OptData)), R20) +// @ preserves sl.Bytes(data, 0, len(data)) // @ decreases func (o *tlvOption) serializeTo(data []byte, fixLengths bool) { dryrun := data == nil if o.OptType == OptTypePad1 { if !dryrun { - // @ unfold sl.AbsSlice_Bytes(data, 0, len(data)) + // @ unfold sl.Bytes(data, 0, len(data)) data[0] = 0x0 - // @ fold sl.AbsSlice_Bytes(data, 0, len(data)) + // @ fold sl.Bytes(data, 0, len(data)) } return } @@ -87,19 +87,19 @@ func (o *tlvOption) serializeTo(data []byte, fixLengths bool) { o.OptDataLen = uint8(len(o.OptData)) } if !dryrun { - // @ unfold sl.AbsSlice_Bytes(data, 0, len(data)) - // @ unfold acc(sl.AbsSlice_Bytes(o.OptData, 0, len(o.OptData)), R20) + // @ unfold sl.Bytes(data, 0, len(data)) + // @ unfold acc(sl.Bytes(o.OptData, 0, len(o.OptData)), R20) data[0] = uint8(o.OptType) data[1] = o.OptDataLen // @ assert forall i int :: { &data[2:][i] } 0 <= i && i < len(data[2:]) ==> &data[2:][i] == &data[2+i] copy(data[2:], o.OptData /*@ , R20 @*/) - // @ fold acc(sl.AbsSlice_Bytes(o.OptData, 0, len(o.OptData)), R20) - // @ fold sl.AbsSlice_Bytes(data, 0, len(data)) + // @ fold acc(sl.Bytes(o.OptData, 0, len(o.OptData)), R20) + // @ fold sl.Bytes(data, 0, len(data)) } } // @ requires 1 <= len(data) -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R41) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R41) // @ ensures err == nil ==> acc(res) // @ ensures (err == nil && res.OptType != OptTypePad1) ==> ( // @ 2 <= res.ActualLength && res.ActualLength <= len(data) && res.OptData === data[2:res.ActualLength]) @@ -107,8 +107,8 @@ func (o *tlvOption) serializeTo(data []byte, fixLengths bool) { // @ ensures err != nil ==> err.ErrorMem() // @ decreases func decodeTLVOption(data []byte) (res *tlvOption, err error) { - // @ unfold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R41) - // @ defer fold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R41) + // @ unfold acc(sl.Bytes(data, 0, len(data)), R41) + // @ defer fold acc(sl.Bytes(data, 0, len(data)), R41) o := &tlvOption{OptType: OptionType(data[0])} if OptionType(data[0]) == OptTypePad1 { o.ActualLength = 1 @@ -133,16 +133,16 @@ func decodeTLVOption(data []byte) (res *tlvOption, err error) { // serializeTLVOptionPadding adds an appropriate PadN extension. // @ requires padLength == 1 ==> 1 <= len(data) // @ requires 1 < padLength ==> 2 <= len(data) -// @ preserves sl.AbsSlice_Bytes(data, 0, len(data)) +// @ preserves sl.Bytes(data, 0, len(data)) // @ decreases func serializeTLVOptionPadding(data []byte, padLength int) { if padLength <= 0 { return } if padLength == 1 { - // @ unfold sl.AbsSlice_Bytes(data, 0, len(data)) + // @ unfold sl.Bytes(data, 0, len(data)) data[0] = 0x0 - // @ fold sl.AbsSlice_Bytes(data, 0, len(data)) + // @ fold sl.Bytes(data, 0, len(data)) return } dataLen := uint8(padLength) - 2 @@ -151,7 +151,7 @@ func serializeTLVOptionPadding(data []byte, padLength int) { OptDataLen: dataLen, OptData: make([]byte, int(dataLen)), } - // @ fold sl.AbsSlice_Bytes(padN.OptData, 0, len(padN.OptData)) + // @ fold sl.Bytes(padN.OptData, 0, len(padN.OptData)) padN.serializeTo(data, false) } @@ -241,7 +241,7 @@ func (e *extnBase) serializeToWithTLVOptions(b gopacket.SerializeBuffer, // @ requires df != nil // @ preserves df.Mem() -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R40) // @ ensures resErr != nil ==> resErr.ErrorMem() // The following poscondition is more a lot more complicated than it would be if the return type // was *extnBase instead of extnBase @@ -259,10 +259,10 @@ func decodeExtnBase(data []byte, df gopacket.DecodeFeedback) (res extnBase, resE len(data))) } - // @ unfold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) + // @ unfold acc(sl.Bytes(data, 0, len(data)), R40) e.NextHdr = L4ProtocolType(data[0]) e.ExtLen = data[1] - // @ fold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) + // @ fold acc(sl.Bytes(data, 0, len(data)), R40) e.ActualLen = (int(e.ExtLen) + 1) * LineLen if len(data) < e.ActualLen { return extnBase{}, serrors.New(fmt.Sprintf("invalid extension header. "+ @@ -346,7 +346,7 @@ func (h *HopByHopExtn) SerializeTo(b gopacket.SerializeBuffer, // @ requires h.NonInitMem() // @ requires df != nil // @ preserves df.Mem() -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R40) // @ ensures res == nil ==> h.Mem(data) // @ ensures res != nil ==> (h.NonInitMem() && res.ErrorMem()) // @ decreases @@ -373,7 +373,7 @@ func (h *HopByHopExtn) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) // @ invariant len(h.Options) == lenOptions // @ invariant forall i int :: { &h.Options[i] } 0 <= i && i < lenOptions ==> // @ (acc(&h.Options[i]) && h.Options[i].Mem(i)) - // @ invariant acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) + // @ invariant acc(sl.Bytes(data, 0, len(data)), R40) // @ invariant h.BaseLayer.Contents === data[:h.ActualLen] // @ invariant h.BaseLayer.Payload === data[h.ActualLen:] // @ decreases h.ActualLen - offset @@ -399,7 +399,7 @@ func (h *HopByHopExtn) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) } // @ requires p != nil -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ preserves p.Mem() // @ ensures res != nil ==> res.ErrorMem() // @ decreases @@ -478,7 +478,7 @@ func (e *EndToEndExtn) LayerPayload( /*@ ghost ub []byte @*/ ) (res []byte /*@ , // @ requires e.NonInitMem() // @ requires df != nil // @ preserves df.Mem() -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R40) // @ ensures res == nil ==> e.Mem(data) // @ ensures res != nil ==> (e.NonInitMem() && res.ErrorMem()) // @ decreases @@ -505,7 +505,7 @@ func (e *EndToEndExtn) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) // @ invariant len(e.Options) == lenOptions // @ invariant forall i int :: { &e.Options[i] } 0 <= i && i < lenOptions ==> // @ (acc(&e.Options[i]) && e.Options[i].Mem(i)) - // @ invariant acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) + // @ invariant acc(sl.Bytes(data, 0, len(data)), R40) // @ invariant e.BaseLayer.Contents === data[:e.ActualLen] // @ invariant e.BaseLayer.Payload === data[e.ActualLen:] // @ decreases e.ActualLen - offset @@ -531,7 +531,7 @@ func (e *EndToEndExtn) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) } // @ requires p != nil -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ preserves p.Mem() // @ ensures res != nil ==> res.ErrorMem() // @ decreases @@ -600,7 +600,7 @@ type HopByHopExtnSkipper struct { // DecodeFromBytes implementation according to gopacket.DecodingLayer // @ requires s.NonInitMem() // @ requires df != nil -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R40) // @ preserves df.Mem() // @ ensures res == nil ==> s.Mem(data) // @ ensures res != nil ==> (s.NonInitMem() && res.ErrorMem()) @@ -656,7 +656,7 @@ type EndToEndExtnSkipper struct { // @ requires s.NonInitMem() // @ requires df != nil // @ preserves df.Mem() -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R40) // @ ensures res == nil ==> s.Mem(data) // @ ensures res != nil ==> (s.NonInitMem() && res.ErrorMem()) // @ decreases diff --git a/pkg/slayers/extn_spec.gobra b/pkg/slayers/extn_spec.gobra index aa9ff299f..32c6ab920 100644 --- a/pkg/slayers/extn_spec.gobra +++ b/pkg/slayers/extn_spec.gobra @@ -20,7 +20,7 @@ import ( "github.com/google/gopacket" . "github.com/scionproto/scion/verification/utils/definitions" - "github.com/scionproto/scion/verification/utils/slices" + // sl "github.com/scionproto/scion/verification/utils/slices" ) /** start of extnBase **/ @@ -226,7 +226,7 @@ func (s *EndToEndExtnSkipper) DowngradePerm(ghost ub []byte) { pred (o *HopByHopOption) Mem(_ int) { // permissions to the elements of OptData will be stored // together with the underlying, not in the option itself - acc(o) // && slices.AbsSlice_Bytes(o.OptData, 0, len(o.OptData)) + acc(o) // && sl.Bytes(o.OptData, 0, len(o.OptData)) } // TODO: maybe add the underlying slice as a parameter to be able to @@ -234,7 +234,7 @@ pred (o *HopByHopOption) Mem(_ int) { pred (e *EndToEndOption) Mem(_ int) { // permissions to the elements of OptData will be stored // together with the underlying, not in the option itself - acc(e) // && slices.AbsSlice_Bytes(e.OptData, 0, len(e.OptData)) + acc(e) // && sl.Bytes(e.OptData, 0, len(e.OptData)) } diff --git a/pkg/slayers/path/epic/epic.go b/pkg/slayers/path/epic/epic.go index 6a43efc29..7c14a9c96 100644 --- a/pkg/slayers/path/epic/epic.go +++ b/pkg/slayers/path/epic/epic.go @@ -24,7 +24,7 @@ import ( "github.com/scionproto/scion/pkg/slayers/path" "github.com/scionproto/scion/pkg/slayers/path/scion" //@ . "github.com/scionproto/scion/verification/utils/definitions" - //@ "github.com/scionproto/scion/verification/utils/slices" + //@ sl "github.com/scionproto/scion/verification/utils/slices" ) const ( @@ -81,8 +81,8 @@ type Path struct { // SerializeTo serializes the Path into buffer b. On failure, an error is returned, otherwise // SerializeTo will return nil. // @ preserves acc(p.Mem(ubuf), R1) -// @ preserves slices.AbsSlice_Bytes(ubuf, 0, len(ubuf)) -// @ preserves slices.AbsSlice_Bytes(b, 0, len(b)) +// @ preserves sl.Bytes(ubuf, 0, len(ubuf)) +// @ preserves sl.Bytes(b, 0, len(b)) // @ ensures r != nil ==> r.ErrorMem() // @ ensures !old(p.hasScionPath(ubuf)) ==> r != nil // @ ensures len(b) < old(p.Len(ubuf)) ==> r != nil @@ -105,40 +105,40 @@ func (p *Path) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { if p.ScionPath == nil { return serrors.New("SCION path is nil") } - //@ slices.SplitByIndex_Bytes(b, 0, len(b), PktIDLen, writePerm) - //@ slices.Reslice_Bytes(b, 0, PktIDLen, writePerm) + //@ sl.SplitByIndex_Bytes(b, 0, len(b), PktIDLen, writePerm) + //@ sl.Reslice_Bytes(b, 0, PktIDLen, writePerm) p.PktID.SerializeTo(b[:PktIDLen]) - //@ slices.Unslice_Bytes(b, 0, PktIDLen, writePerm) - //@ slices.SplitByIndex_Bytes(b, PktIDLen, len(b), PktIDLen+HVFLen, writePerm) - //@ slices.Reslice_Bytes(b, PktIDLen, PktIDLen+HVFLen, writePerm) - //@ unfold slices.AbsSlice_Bytes(b[PktIDLen:(PktIDLen+HVFLen)], 0, HVFLen) - //@ unfold acc(slices.AbsSlice_Bytes(p.PHVF, 0, len(p.PHVF)), R2) + //@ sl.Unslice_Bytes(b, 0, PktIDLen, writePerm) + //@ sl.SplitByIndex_Bytes(b, PktIDLen, len(b), PktIDLen+HVFLen, writePerm) + //@ sl.Reslice_Bytes(b, PktIDLen, PktIDLen+HVFLen, writePerm) + //@ unfold sl.Bytes(b[PktIDLen:(PktIDLen+HVFLen)], 0, HVFLen) + //@ unfold acc(sl.Bytes(p.PHVF, 0, len(p.PHVF)), R2) copy(b[PktIDLen:(PktIDLen+HVFLen)], p.PHVF /*@, R3 @*/) - //@ fold slices.AbsSlice_Bytes(b[PktIDLen:(PktIDLen+HVFLen)], 0, HVFLen) - //@ fold acc(slices.AbsSlice_Bytes(p.PHVF, 0, len(p.PHVF)), R2) - //@ slices.Unslice_Bytes(b, PktIDLen, PktIDLen+HVFLen, writePerm) - //@ slices.CombineAtIndex_Bytes(b, 0, PktIDLen+HVFLen, PktIDLen, writePerm) - //@ slices.SplitByIndex_Bytes(b, PktIDLen+HVFLen, len(b), MetadataLen, writePerm) - //@ slices.Reslice_Bytes(b, PktIDLen+HVFLen, MetadataLen, writePerm) - //@ unfold acc(slices.AbsSlice_Bytes(p.LHVF, 0, len(p.LHVF)), R3) - //@ unfold slices.AbsSlice_Bytes(b[(PktIDLen+HVFLen):MetadataLen], 0, HVFLen) + //@ fold sl.Bytes(b[PktIDLen:(PktIDLen+HVFLen)], 0, HVFLen) + //@ fold acc(sl.Bytes(p.PHVF, 0, len(p.PHVF)), R2) + //@ sl.Unslice_Bytes(b, PktIDLen, PktIDLen+HVFLen, writePerm) + //@ sl.CombineAtIndex_Bytes(b, 0, PktIDLen+HVFLen, PktIDLen, writePerm) + //@ sl.SplitByIndex_Bytes(b, PktIDLen+HVFLen, len(b), MetadataLen, writePerm) + //@ sl.Reslice_Bytes(b, PktIDLen+HVFLen, MetadataLen, writePerm) + //@ unfold acc(sl.Bytes(p.LHVF, 0, len(p.LHVF)), R3) + //@ unfold sl.Bytes(b[(PktIDLen+HVFLen):MetadataLen], 0, HVFLen) copy(b[(PktIDLen+HVFLen):MetadataLen], p.LHVF /*@, R3 @*/) - //@ fold slices.AbsSlice_Bytes(b[(PktIDLen+HVFLen):MetadataLen], 0, HVFLen) - //@ fold acc(slices.AbsSlice_Bytes(p.LHVF, 0, len(p.LHVF)), R3) - //@ slices.Unslice_Bytes(b, PktIDLen+HVFLen, MetadataLen, writePerm) - //@ slices.CombineAtIndex_Bytes(b, 0, MetadataLen, PktIDLen+HVFLen, writePerm) - //@ slices.Reslice_Bytes(b, MetadataLen, len(b), writePerm) - //@ ghost defer slices.CombineAtIndex_Bytes(b, 0, len(b), MetadataLen, writePerm) - //@ ghost defer slices.Unslice_Bytes(b, MetadataLen, len(b), writePerm) - //@ slices.SplitRange_Bytes(ubuf, MetadataLen, len(ubuf), writePerm) - //@ ghost defer slices.CombineRange_Bytes(ubuf, MetadataLen, len(ubuf), writePerm) + //@ fold sl.Bytes(b[(PktIDLen+HVFLen):MetadataLen], 0, HVFLen) + //@ fold acc(sl.Bytes(p.LHVF, 0, len(p.LHVF)), R3) + //@ sl.Unslice_Bytes(b, PktIDLen+HVFLen, MetadataLen, writePerm) + //@ sl.CombineAtIndex_Bytes(b, 0, MetadataLen, PktIDLen+HVFLen, writePerm) + //@ sl.Reslice_Bytes(b, MetadataLen, len(b), writePerm) + //@ ghost defer sl.CombineAtIndex_Bytes(b, 0, len(b), MetadataLen, writePerm) + //@ ghost defer sl.Unslice_Bytes(b, MetadataLen, len(b), writePerm) + //@ sl.SplitRange_Bytes(ubuf, MetadataLen, len(ubuf), writePerm) + //@ ghost defer sl.CombineRange_Bytes(ubuf, MetadataLen, len(ubuf), writePerm) return p.ScionPath.SerializeTo(b[MetadataLen:] /*@, ubuf[MetadataLen:] @*/) } // DecodeFromBytes deserializes the buffer b into the Path. On failure, an error is returned, // otherwise SerializeTo will return nil. // @ requires p.NonInitMem() -// @ preserves acc(slices.AbsSlice_Bytes(b, 0, len(b)), R42) +// @ preserves acc(sl.Bytes(b, 0, len(b)), R42) // @ ensures len(b) < MetadataLen ==> r != nil // @ ensures r == nil ==> p.Mem(b) // @ ensures r != nil ==> p.NonInitMem() && r.ErrorMem() @@ -148,41 +148,41 @@ func (p *Path) DecodeFromBytes(b []byte) (r error) { return serrors.New("EPIC Path raw too short", "expected", int(MetadataLen), "actual", int(len(b))) } //@ unfold p.NonInitMem() - //@ slices.SplitRange_Bytes(b, 0, PktIDLen, R42) + //@ sl.SplitRange_Bytes(b, 0, PktIDLen, R42) p.PktID.DecodeFromBytes(b[:PktIDLen]) - //@ slices.CombineRange_Bytes(b, 0, PktIDLen, R42) - //@ unfold acc(slices.AbsSlice_Bytes(b, 0, len(b)), R42) + //@ sl.CombineRange_Bytes(b, 0, PktIDLen, R42) + //@ unfold acc(sl.Bytes(b, 0, len(b)), R42) p.PHVF = make([]byte, HVFLen) p.LHVF = make([]byte, HVFLen) //@ assert forall i int :: { &b[PktIDLen:(PktIDLen+HVFLen)][i] } 0 <= i && //@ i < len(b[PktIDLen:(PktIDLen+HVFLen)]) ==> //@ &b[PktIDLen:(PktIDLen+HVFLen)][i] == &b[PktIDLen+i] copy(p.PHVF, b[PktIDLen:(PktIDLen+HVFLen)] /*@, R42 @*/) - //@ fold slices.AbsSlice_Bytes(p.PHVF, 0, len(p.PHVF)) + //@ fold sl.Bytes(p.PHVF, 0, len(p.PHVF)) //@ assert forall i int :: { &b[(PktIDLen+HVFLen):MetadataLen][i] } 0 <= i && //@ i < len(b[(PktIDLen+HVFLen):MetadataLen]) ==> //@ &b[(PktIDLen+HVFLen):MetadataLen][i] == &b[(PktIDLen+HVFLen)+i] copy(p.LHVF, b[(PktIDLen+HVFLen):MetadataLen] /*@, R42 @*/) - //@ fold slices.AbsSlice_Bytes(p.LHVF, 0, len(p.LHVF)) + //@ fold sl.Bytes(p.LHVF, 0, len(p.LHVF)) p.ScionPath = &scion.Raw{} //@ fold p.ScionPath.Base.NonInitMem() //@ fold p.ScionPath.NonInitMem() - //@ fold acc(slices.AbsSlice_Bytes(b, 0, len(b)), R42) - //@ slices.SplitRange_Bytes(b, MetadataLen, len(b), R42) + //@ fold acc(sl.Bytes(b, 0, len(b)), R42) + //@ sl.SplitRange_Bytes(b, MetadataLen, len(b), R42) ret := p.ScionPath.DecodeFromBytes(b[MetadataLen:]) //@ ghost if ret == nil { //@ fold p.Mem(b) //@ } else { //@ fold p.NonInitMem() //@ } - //@ slices.CombineRange_Bytes(b, MetadataLen, len(b), R42) + //@ sl.CombineRange_Bytes(b, MetadataLen, len(b), R42) return ret } // Reverse reverses the EPIC path. In particular, this means that the SCION path type subheader // is reversed. // @ requires p.Mem(ubuf) -// @ preserves slices.AbsSlice_Bytes(ubuf, 0, len(ubuf)) +// @ preserves sl.Bytes(ubuf, 0, len(ubuf)) // @ ensures r == nil ==> ret != nil // @ ensures r == nil ==> ret.Mem(ubuf) // @ ensures r == nil ==> ret != nil @@ -194,13 +194,13 @@ func (p *Path) Reverse( /*@ ghost ubuf []byte @*/ ) (ret path.Path, r error) { //@ fold p.Mem(ubuf) return nil, serrors.New("scion subpath must not be nil") } - //@ slices.SplitRange_Bytes(ubuf, MetadataLen, len(ubuf), writePerm) + //@ sl.SplitRange_Bytes(ubuf, MetadataLen, len(ubuf), writePerm) revScion, err := p.ScionPath.Reverse( /*@ ubuf[MetadataLen:] @*/ ) if err != nil { - // @ slices.CombineRange_Bytes(ubuf, MetadataLen, len(ubuf), writePerm) + // @ sl.CombineRange_Bytes(ubuf, MetadataLen, len(ubuf), writePerm) return nil, err } - //@ slices.CombineRange_Bytes(ubuf, MetadataLen, len(ubuf), writePerm) + //@ sl.CombineRange_Bytes(ubuf, MetadataLen, len(ubuf), writePerm) ScionPath, ok := revScion.(*scion.Raw) if !ok { return nil, serrors.New("reversed path of type scion.Raw must not change type") @@ -245,29 +245,29 @@ type PktID struct { // DecodeFromBytes deserializes the buffer (raw) into the PktID. // @ requires len(raw) >= PktIDLen // @ preserves acc(i) -// @ preserves acc(slices.AbsSlice_Bytes(raw, 0, len(raw)), R42) +// @ preserves acc(sl.Bytes(raw, 0, len(raw)), R42) // @ ensures 0 <= i.Timestamp // @ ensures 0 <= i.Counter // @ decreases func (i *PktID) DecodeFromBytes(raw []byte) { - //@ unfold acc(slices.AbsSlice_Bytes(raw, 0, len(raw)), R42) + //@ unfold acc(sl.Bytes(raw, 0, len(raw)), R42) //@ assert forall i int :: { &raw[:4][i] } 0 <= i && i < 4 ==> &raw[:4][i] == &raw[i] i.Timestamp = binary.BigEndian.Uint32(raw[:4]) //@ assert forall i int :: { &raw[4:8][i] } 0 <= i && i < 4 ==> &raw[4:8][i] == &raw[4 + i] i.Counter = binary.BigEndian.Uint32(raw[4:8]) - //@ fold acc(slices.AbsSlice_Bytes(raw, 0, len(raw)), R42) + //@ fold acc(sl.Bytes(raw, 0, len(raw)), R42) } // SerializeTo serializes the PktID into the buffer (b). // @ requires len(b) >= PktIDLen // @ preserves acc(i, R1) -// @ preserves slices.AbsSlice_Bytes(b, 0, len(b)) +// @ preserves sl.Bytes(b, 0, len(b)) // @ decreases func (i *PktID) SerializeTo(b []byte) { - //@ unfold slices.AbsSlice_Bytes(b, 0, len(b)) + //@ unfold sl.Bytes(b, 0, len(b)) //@ assert forall j int :: { &b[:4][j] } 0 <= 4 ==> &b[:4][j] == &b[j] binary.BigEndian.PutUint32(b[:4], i.Timestamp) //@ assert forall j int :: { &b[4:8][j] } 0 <= 4 ==> &b[4:8][j] == &b[4 + j] binary.BigEndian.PutUint32(b[4:8], i.Counter) - //@ fold slices.AbsSlice_Bytes(b, 0, len(b)) + //@ fold sl.Bytes(b, 0, len(b)) } diff --git a/pkg/slayers/path/epic/epic_spec.gobra b/pkg/slayers/path/epic/epic_spec.gobra index e3b58968e..aeabff882 100644 --- a/pkg/slayers/path/epic/epic_spec.gobra +++ b/pkg/slayers/path/epic/epic_spec.gobra @@ -18,7 +18,7 @@ package epic import ( "github.com/scionproto/scion/pkg/slayers/path" - "github.com/scionproto/scion/verification/utils/slices" + sl "github.com/scionproto/scion/verification/utils/slices" ) pred (p *Path) NonInitMem() { @@ -27,8 +27,8 @@ pred (p *Path) NonInitMem() { pred (p *Path) Mem(ubuf []byte) { acc(&p.PktID) && - acc(&p.PHVF) && slices.AbsSlice_Bytes(p.PHVF, 0, len(p.PHVF)) && - acc(&p.LHVF) && slices.AbsSlice_Bytes(p.LHVF, 0, len(p.LHVF)) && + acc(&p.PHVF) && sl.Bytes(p.PHVF, 0, len(p.PHVF)) && + acc(&p.LHVF) && sl.Bytes(p.LHVF, 0, len(p.LHVF)) && acc(&p.ScionPath) && p.ScionPath != nil && MetadataLen <= len(ubuf) && diff --git a/pkg/slayers/path/hopfield.go b/pkg/slayers/path/hopfield.go index 0c3e0787b..c26b6d5e1 100644 --- a/pkg/slayers/path/hopfield.go +++ b/pkg/slayers/path/hopfield.go @@ -22,7 +22,7 @@ import ( "github.com/scionproto/scion/pkg/private/serrors" //@ . "github.com/scionproto/scion/verification/utils/definitions" - //@ "github.com/scionproto/scion/verification/utils/slices" + //@ sl "github.com/scionproto/scion/verification/utils/slices" ) const ( @@ -76,7 +76,7 @@ type HopField struct { // path.HopLen. // @ requires acc(h) // @ requires len(raw) >= HopLen -// @ preserves acc(slices.AbsSlice_Bytes(raw, 0, HopLen), R45) +// @ preserves acc(sl.Bytes(raw, 0, HopLen), R45) // @ ensures h.Mem() // @ ensures err == nil // @ ensures unfolding h.Mem() in @@ -86,7 +86,7 @@ func (h *HopField) DecodeFromBytes(raw []byte) (err error) { if len(raw) < HopLen { return serrors.New("HopField raw too short", "expected", HopLen, "actual", len(raw)) } - //@ unfold acc(slices.AbsSlice_Bytes(raw, 0, HopLen), R46) + //@ unfold acc(sl.Bytes(raw, 0, HopLen), R46) h.EgressRouterAlert = raw[0]&0x1 == 0x1 h.IngressRouterAlert = raw[0]&0x2 == 0x2 h.ExpTime = raw[1] @@ -103,7 +103,7 @@ func (h *HopField) DecodeFromBytes(raw []byte) (err error) { //@ assert forall i int :: {&h.Mac[i]} 0 <= i && i < MacLen ==> h.Mac[:][i] == h.Mac[i] //@ EqualBytesImplyEqualMac(raw[6:6+MacLen], h.Mac) //@ assert BytesToIO_HF(raw, 0, 0, HopLen) == h.ToIO_HF() - //@ fold acc(slices.AbsSlice_Bytes(raw, 0, HopLen), R46) + //@ fold acc(sl.Bytes(raw, 0, HopLen), R46) //@ fold h.Mem() return nil } @@ -112,7 +112,7 @@ func (h *HopField) DecodeFromBytes(raw []byte) (err error) { // path.HopLen. // @ requires len(b) >= HopLen // @ preserves acc(h.Mem(), R10) -// @ preserves slices.AbsSlice_Bytes(b, 0, HopLen) +// @ preserves sl.Bytes(b, 0, HopLen) // @ ensures err == nil // @ decreases func (h *HopField) SerializeTo(b []byte) (err error) { @@ -121,10 +121,10 @@ func (h *HopField) SerializeTo(b []byte) (err error) { } //@ requires len(b) >= HopLen //@ preserves acc(h.Mem(), R11) - //@ preserves slices.AbsSlice_Bytes(b, 0, HopLen) + //@ preserves sl.Bytes(b, 0, HopLen) //@ decreases //@ outline( - //@ unfold slices.AbsSlice_Bytes(b, 0, HopLen) + //@ unfold sl.Bytes(b, 0, HopLen) //@ unfold acc(h.Mem(), R11) b[0] = 0 if h.EgressRouterAlert { @@ -139,22 +139,22 @@ func (h *HopField) SerializeTo(b []byte) (err error) { //@ assert &b[4:6][0] == &b[4] && &b[4:6][1] == &b[5] binary.BigEndian.PutUint16(b[4:6], h.ConsEgress) //@ assert forall i int :: { &b[i] } 0 <= i && i < HopLen ==> acc(&b[i]) - //@ fold slices.AbsSlice_Bytes(b, 0, HopLen) + //@ fold sl.Bytes(b, 0, HopLen) //@ fold acc(h.Mem(), R11) //@ ) //@ requires len(b) >= HopLen //@ preserves acc(h.Mem(), R11) - //@ preserves slices.AbsSlice_Bytes(b, 0, HopLen) + //@ preserves sl.Bytes(b, 0, HopLen) //@ decreases //@ outline( - //@ unfold slices.AbsSlice_Bytes(b, 0, HopLen) + //@ unfold sl.Bytes(b, 0, HopLen) //@ unfold acc(h.Mem(), R11) //@ assert forall i int :: { &h.Mac[:][i] } 0 <= i && i < len(h.Mac) ==> //@ &h.Mac[i] == &h.Mac[:][i] //@ assert forall i int :: { &b[6:6+MacLen][i] }{ &b[i+6] } 0 <= i && i < MacLen ==> //@ &b[6:6+MacLen][i] == &b[i+6] copy(b[6:6+MacLen], h.Mac[:] /*@, R11 @*/) - //@ fold slices.AbsSlice_Bytes(b, 0, HopLen) + //@ fold sl.Bytes(b, 0, HopLen) //@ fold acc(h.Mem(), R11) //@ ) return nil diff --git a/pkg/slayers/path/hopfield_spec.gobra b/pkg/slayers/path/hopfield_spec.gobra index 543ea1185..26e79f8a1 100644 --- a/pkg/slayers/path/hopfield_spec.gobra +++ b/pkg/slayers/path/hopfield_spec.gobra @@ -18,7 +18,7 @@ package path import ( "verification/io" - "verification/utils/slices" + sl "verification/utils/slices" "verification/dependencies/encoding/binary" . "verification/utils/definitions" ) @@ -43,13 +43,13 @@ pure func IO_ifsToIfs(ifs option[io.IO_ifs]) uint16{ ghost requires 0 <= start && start <= middle requires middle + HopLen <= end && end <= len(raw) -requires acc(slices.AbsSlice_Bytes(raw, start, end), _) +requires acc(sl.Bytes(raw, start, end), _) decreases pure func BytesToIO_HF(raw [] byte, start int, middle int, end int) (io.IO_HF) { return let _ := Asserting(forall k int :: {&raw[middle+2:middle+4][k]} 0 <= k && k < 2 ==> &raw[middle+2:middle+4][k] == &raw[middle + 2 + k]) in let _ := Asserting(forall k int :: {&raw[middle+4:middle+6][k]} 0 <= k && k < 4 ==> &raw[middle+4:middle+6][k] == &raw[middle + 4 + k]) in let _ := Asserting(forall k int :: {&raw[middle+6:middle+6+MacLen][k]} 0 <= k && k < MacLen ==> &raw[middle+6:middle+6+MacLen][k] == &raw[middle + 6 + k]) in - unfolding acc(slices.AbsSlice_Bytes(raw, start, end), _) in + unfolding acc(sl.Bytes(raw, start, end), _) in let inif2 := binary.BigEndian.Uint16(raw[middle+2:middle+4]) in let egif2 := binary.BigEndian.Uint16(raw[middle+4:middle+6]) in let op_inif2 := ifsToIO_ifs(inif2) in diff --git a/pkg/slayers/path/infofield.go b/pkg/slayers/path/infofield.go index 80009c77d..754b44846 100644 --- a/pkg/slayers/path/infofield.go +++ b/pkg/slayers/path/infofield.go @@ -62,7 +62,7 @@ type InfoField struct { // path.InfoLen. // @ requires len(raw) >= InfoLen // @ preserves acc(inf) -// @ preserves acc(slices.AbsSlice_Bytes(raw, 0, InfoLen), R45) +// @ preserves acc(slices.Bytes(raw, 0, InfoLen), R45) // @ ensures err == nil // @ ensures BytesToIntermediateAbsInfoField(raw, 0, 0, InfoLen) == // @ inf.ToIntermediateAbsInfoField() @@ -71,7 +71,7 @@ func (inf *InfoField) DecodeFromBytes(raw []byte) (err error) { if len(raw) < InfoLen { return serrors.New("InfoField raw too short", "expected", InfoLen, "actual", len(raw)) } - //@ unfold acc(slices.AbsSlice_Bytes(raw, 0, InfoLen), R50) + //@ unfold acc(slices.Bytes(raw, 0, InfoLen), R50) inf.ConsDir = raw[0]&0x1 == 0x1 inf.Peer = raw[0]&0x2 == 0x2 //@ assert &raw[2:4][0] == &raw[2] && &raw[2:4][1] == &raw[3] @@ -79,7 +79,7 @@ func (inf *InfoField) DecodeFromBytes(raw []byte) (err error) { //@ assert &raw[4:8][0] == &raw[4] && &raw[4:8][1] == &raw[5] //@ assert &raw[4:8][2] == &raw[6] && &raw[4:8][3] == &raw[7] inf.Timestamp = binary.BigEndian.Uint32(raw[4:8]) - //@ fold acc(slices.AbsSlice_Bytes(raw, 0, InfoLen), R50) + //@ fold acc(slices.Bytes(raw, 0, InfoLen), R50) return nil } @@ -87,7 +87,7 @@ func (inf *InfoField) DecodeFromBytes(raw []byte) (err error) { // path.InfoLen. // @ requires len(b) >= InfoLen // @ preserves acc(inf, R10) -// @ preserves slices.AbsSlice_Bytes(b, 0, InfoLen) +// @ preserves slices.Bytes(b, 0, InfoLen) // @ ensures err == nil // @ ensures inf.ToIntermediateAbsInfoField() == // @ BytesToIntermediateAbsInfoField(b, 0, 0, InfoLen) @@ -98,7 +98,7 @@ func (inf *InfoField) SerializeTo(b []byte) (err error) { "actual", len(b)) } //@ ghost targetAbsInfo := inf.ToIntermediateAbsInfoField() - //@ unfold slices.AbsSlice_Bytes(b, 0, InfoLen) + //@ unfold slices.Bytes(b, 0, InfoLen) b[0] = 0 if inf.ConsDir { b[0] |= 0x1 @@ -126,7 +126,7 @@ func (inf *InfoField) SerializeTo(b []byte) (err error) { binary.BigEndian.PutUint32(b[4:8], inf.Timestamp) //@ ghost tmpInfo4 := BytesToIntermediateAbsInfoFieldHelper(b, 0, InfoLen) //@ assert tmpInfo4.AInfo == targetAbsInfo.AInfo - //@ fold slices.AbsSlice_Bytes(b, 0, InfoLen) + //@ fold slices.Bytes(b, 0, InfoLen) return nil } diff --git a/pkg/slayers/path/infofield_spec.gobra b/pkg/slayers/path/infofield_spec.gobra index 19ddedd33..3b52222d2 100644 --- a/pkg/slayers/path/infofield_spec.gobra +++ b/pkg/slayers/path/infofield_spec.gobra @@ -32,31 +32,31 @@ pure func InfoFieldOffset(currINF, headerOffset int) int { ghost requires 0 <= currINF && 0 <= headerOffset requires InfoFieldOffset(currINF, headerOffset) < len(raw) -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) decreases pure func ConsDir(raw []byte, currINF int, headerOffset int) bool { - return unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in + return unfolding acc(sl.Bytes(raw, 0, len(raw)), R56) in raw[InfoFieldOffset(currINF, headerOffset)] & 0x1 == 0x1 } ghost requires 0 <= currINF && 0 <= headerOffset requires InfoFieldOffset(currINF, headerOffset) < len(raw) -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) decreases pure func Peer(raw []byte, currINF int, headerOffset int) bool { - return unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in + return unfolding acc(sl.Bytes(raw, 0, len(raw)), R56) in raw[InfoFieldOffset(currINF, headerOffset)] & 0x2 == 0x2 } ghost requires 0 <= currINF && 0 <= headerOffset requires InfoFieldOffset(currINF, headerOffset) + InfoLen < len(raw) -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) decreases pure func Timestamp(raw []byte, currINF int, headerOffset int) io.IO_ainfo { return let idx := InfoFieldOffset(currINF, headerOffset) + 4 in - unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in + unfolding acc(sl.Bytes(raw, 0, len(raw)), R56) in let _ := Asserting(forall i int :: { &raw[idx+i] } { &raw[idx:idx+4][i] } 0 <= i && i < 4 ==> &raw[idx+i] == &raw[idx:idx+4][i]) in io.IO_ainfo(binary.BigEndian.Uint32(raw[idx : idx + 4])) @@ -65,11 +65,11 @@ pure func Timestamp(raw []byte, currINF int, headerOffset int) io.IO_ainfo { ghost requires 0 <= currINF && 0 <= headerOffset requires InfoFieldOffset(currINF, headerOffset) + InfoLen < len(raw) -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) decreases pure func AbsUinfo(raw []byte, currINF int, headerOffset int) set[io.IO_msgterm] { return let idx := InfoFieldOffset(currINF, headerOffset) + 2 in - unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in + unfolding acc(sl.Bytes(raw, 0, len(raw)), R56) in let _ := Asserting(forall k int :: {&raw[idx:idx+2][k]} 0 <= k && k < 2 ==> &raw[idx:idx+4][k] == &raw[idx + k]) in AbsUInfoFromUint16(binary.BigEndian.Uint16(raw[idx:idx+2])) @@ -89,10 +89,10 @@ type IntermediateAbsInfoField adt { ghost requires 0 <= start && start <= middle requires middle+InfoLen <= end && end <= len(raw) -requires acc(sl.AbsSlice_Bytes(raw, start, end), _) +requires acc(sl.Bytes(raw, start, end), _) decreases pure func BytesToIntermediateAbsInfoField(raw [] byte, start int, middle int, end int) (IntermediateAbsInfoField) { - return unfolding acc(sl.AbsSlice_Bytes(raw, start, end), _) in + return unfolding acc(sl.Bytes(raw, start, end), _) in BytesToIntermediateAbsInfoFieldHelper(raw, middle, end) } diff --git a/pkg/slayers/path/mac.go b/pkg/slayers/path/mac.go index c3d91d918..df11254b7 100644 --- a/pkg/slayers/path/mac.go +++ b/pkg/slayers/path/mac.go @@ -20,7 +20,7 @@ import ( "encoding/binary" "hash" //@ . "github.com/scionproto/scion/verification/utils/definitions" - //@ "github.com/scionproto/scion/verification/utils/slices" + //@ sl "github.com/scionproto/scion/verification/utils/slices" ) const MACBufferSize = 16 @@ -30,13 +30,13 @@ const MACBufferSize = 16 // this method does not modify info or hf. // Modifying the provided buffer after calling this function may change the returned HopField MAC. // @ requires h != nil && h.Mem() -// @ preserves len(buffer) >= MACBufferSize ==> slices.AbsSlice_Bytes(buffer, 0, len(buffer)) +// @ preserves len(buffer) >= MACBufferSize ==> sl.Bytes(buffer, 0, len(buffer)) // @ ensures h.Mem() // @ decreases func MAC(h hash.Hash, info InfoField, hf HopField, buffer []byte) [MacLen]byte { mac := FullMAC(h, info, hf, buffer) var res /*@ @ @*/ [MacLen]byte - //@ unfold slices.AbsSlice_Bytes(mac, 0, MACBufferSize) + //@ unfold sl.Bytes(mac, 0, MACBufferSize) copy(res[:], mac[:MacLen] /*@, R1 @*/) return res } @@ -47,21 +47,21 @@ func MAC(h hash.Hash, info InfoField, hf HopField, buffer []byte) [MacLen]byte { // Modifying the provided buffer after calling this function may change the returned HopField MAC. // In contrast to MAC(), FullMAC returns all the 16 bytes instead of only 6 bytes of the MAC. // @ requires h != nil && h.Mem() -// @ preserves len(buffer) >= MACBufferSize ==> slices.AbsSlice_Bytes(buffer, 0, len(buffer)) +// @ preserves len(buffer) >= MACBufferSize ==> sl.Bytes(buffer, 0, len(buffer)) // @ ensures h.Mem() -// @ ensures len(res) == MACBufferSize && slices.AbsSlice_Bytes(res, 0, MACBufferSize) +// @ ensures len(res) == MACBufferSize && sl.Bytes(res, 0, MACBufferSize) // @ decreases func FullMAC(h hash.Hash, info InfoField, hf HopField, buffer []byte) (res []byte) { if len(buffer) < MACBufferSize { buffer = make([]byte, MACBufferSize) - //@ fold slices.AbsSlice_Bytes(buffer, 0, len(buffer)) + //@ fold sl.Bytes(buffer, 0, len(buffer)) } h.Reset() MACInput(info.SegID, info.Timestamp, hf.ExpTime, hf.ConsIngress, hf.ConsEgress, buffer) - //@ unfold slices.AbsSlice_Bytes(buffer, 0, len(buffer)) - //@ defer fold slices.AbsSlice_Bytes(buffer, 0, len(buffer)) + //@ unfold sl.Bytes(buffer, 0, len(buffer)) + //@ defer fold sl.Bytes(buffer, 0, len(buffer)) // Write must not return an error: https://godoc.org/hash#Hash if _, err := h.Write(buffer); err != nil { // @ Unreachable() @@ -69,7 +69,7 @@ func FullMAC(h hash.Hash, info InfoField, hf HopField, buffer []byte) (res []byt } //@ assert h.Size() >= 16 res = h.Sum(buffer[:0])[:16] - //@ fold slices.AbsSlice_Bytes(res, 0, MACBufferSize) + //@ fold sl.Bytes(res, 0, MACBufferSize) return res } @@ -88,11 +88,11 @@ func FullMAC(h hash.Hash, info InfoField, hf HopField, buffer []byte) (res []byt // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // // @ requires len(buffer) >= MACBufferSize -// @ preserves slices.AbsSlice_Bytes(buffer, 0, len(buffer)) +// @ preserves sl.Bytes(buffer, 0, len(buffer)) // @ decreases func MACInput(segID uint16, timestamp uint32, expTime uint8, consIngress, consEgress uint16, buffer []byte) { - //@ unfold slices.AbsSlice_Bytes(buffer, 0, len(buffer)) + //@ unfold sl.Bytes(buffer, 0, len(buffer)) //@ assert &buffer[0:2][0] == &buffer[0] && &buffer[0:2][1] == &buffer[1] binary.BigEndian.PutUint16(buffer[0:2], 0) @@ -109,5 +109,5 @@ func MACInput(segID uint16, timestamp uint32, expTime uint8, binary.BigEndian.PutUint16(buffer[12:14], consEgress) //@ assert &buffer[14:16][0] == &buffer[14] && &buffer[14:16][1] == &buffer[15] binary.BigEndian.PutUint16(buffer[14:16], 0) - //@ fold slices.AbsSlice_Bytes(buffer, 0, len(buffer)) + //@ fold sl.Bytes(buffer, 0, len(buffer)) } diff --git a/pkg/slayers/path/onehop/onehop.go b/pkg/slayers/path/onehop/onehop.go index ea906c5f2..8ebee355e 100644 --- a/pkg/slayers/path/onehop/onehop.go +++ b/pkg/slayers/path/onehop/onehop.go @@ -21,7 +21,7 @@ import ( "github.com/scionproto/scion/pkg/slayers/path" "github.com/scionproto/scion/pkg/slayers/path/scion" //@ . "github.com/scionproto/scion/verification/utils/definitions" - //@ "github.com/scionproto/scion/verification/utils/slices" + //@ sl "github.com/scionproto/scion/verification/utils/slices" ) // PathLen is the length of a serialized one hop path in bytes. @@ -66,7 +66,7 @@ type Path struct { } // @ requires o.NonInitMem() -// @ preserves acc(slices.AbsSlice_Bytes(data, 0, len(data)), R42) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R42) // @ ensures (len(data) >= PathLen) == (r == nil) // @ ensures r == nil ==> o.Mem(data) // @ ensures r != nil ==> o.NonInitMem() @@ -79,30 +79,30 @@ func (o *Path) DecodeFromBytes(data []byte) (r error) { } offset := 0 //@ unfold o.NonInitMem() - //@ slices.SplitRange_Bytes(data, 0, path.InfoLen, R42) + //@ sl.SplitRange_Bytes(data, 0, path.InfoLen, R42) if err := o.Info.DecodeFromBytes(data[:path.InfoLen]); err != nil { // @ Unreachable() return err } - //@ slices.CombineRange_Bytes(data,0, path.InfoLen, R42) + //@ sl.CombineRange_Bytes(data,0, path.InfoLen, R42) offset += path.InfoLen - //@ slices.SplitRange_Bytes(data, offset, offset+path.HopLen, R42) + //@ sl.SplitRange_Bytes(data, offset, offset+path.HopLen, R42) if err := o.FirstHop.DecodeFromBytes(data[offset : offset+path.HopLen]); err != nil { // @ Unreachable() return err } - //@ slices.CombineRange_Bytes(data, offset, offset+path.HopLen, R42) + //@ sl.CombineRange_Bytes(data, offset, offset+path.HopLen, R42) offset += path.HopLen - //@ slices.SplitRange_Bytes(data, offset, offset+path.HopLen, R42) + //@ sl.SplitRange_Bytes(data, offset, offset+path.HopLen, R42) r = o.SecondHop.DecodeFromBytes(data[offset : offset+path.HopLen]) - //@ slices.CombineRange_Bytes(data, offset, offset+path.HopLen, R42) + //@ sl.CombineRange_Bytes(data, offset, offset+path.HopLen, R42) //@ ghost if r == nil { fold o.Mem(data) } else { fold o.NonInitMem() } return r } // @ preserves acc(o.Mem(ubuf), R1) -// @ preserves acc(slices.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R1) -// @ preserves slices.AbsSlice_Bytes(b, 0, len(b)) +// @ preserves acc(sl.Bytes(ubuf, 0, len(ubuf)), R1) +// @ preserves sl.Bytes(b, 0, len(b)) // @ ensures (len(b) >= PathLen) == (err == nil) // @ ensures err != nil ==> err.ErrorMem() // @ ensures err == nil ==> o.Len(ubuf) <= len(b) @@ -114,23 +114,23 @@ func (o *Path) SerializeTo(b []byte /*@, ubuf []byte @*/) (err error) { } offset := 0 //@ unfold acc(o.Mem(ubuf), R1) - //@ slices.SplitRange_Bytes(b, 0, offset+path.InfoLen, writePerm) + //@ sl.SplitRange_Bytes(b, 0, offset+path.InfoLen, writePerm) if err := o.Info.SerializeTo(b[:offset+path.InfoLen]); err != nil { - //@ slices.CombineRange_Bytes(b, 0, offset+path.InfoLen, writePerm) + //@ sl.CombineRange_Bytes(b, 0, offset+path.InfoLen, writePerm) return err } - //@ slices.CombineRange_Bytes(b, 0, offset+path.InfoLen, writePerm) + //@ sl.CombineRange_Bytes(b, 0, offset+path.InfoLen, writePerm) offset += path.InfoLen - //@ slices.SplitRange_Bytes(b, offset, offset+path.HopLen, writePerm) + //@ sl.SplitRange_Bytes(b, offset, offset+path.HopLen, writePerm) if err := o.FirstHop.SerializeTo(b[offset : offset+path.HopLen]); err != nil { - //@ slices.CombineRange_Bytes(b, offset, offset+path.HopLen, writePerm) + //@ sl.CombineRange_Bytes(b, offset, offset+path.HopLen, writePerm) return err } - //@ slices.CombineRange_Bytes(b, offset, offset+path.HopLen, writePerm) + //@ sl.CombineRange_Bytes(b, offset, offset+path.HopLen, writePerm) offset += path.HopLen - //@ slices.SplitRange_Bytes(b, offset, offset+path.HopLen, writePerm) + //@ sl.SplitRange_Bytes(b, offset, offset+path.HopLen, writePerm) err = o.SecondHop.SerializeTo(b[offset : offset+path.HopLen]) - //@ slices.CombineRange_Bytes(b, offset, offset+path.HopLen, writePerm) + //@ sl.CombineRange_Bytes(b, offset, offset+path.HopLen, writePerm) //@ fold acc(o.Mem(ubuf), R1) return err } @@ -138,7 +138,7 @@ func (o *Path) SerializeTo(b []byte /*@, ubuf []byte @*/) (err error) { // ToSCIONDecoded converts the one hop path in to a normal SCION path in the // decoded format. // @ preserves o.Mem(ubuf) -// @ preserves slices.AbsSlice_Bytes(ubuf, 0, len(ubuf)) +// @ preserves sl.Bytes(ubuf, 0, len(ubuf)) // @ ensures err == nil ==> (sd != nil && sd.Mem(ubuf)) // @ ensures err != nil ==> err.ErrorMem() // @ decreases @@ -201,7 +201,7 @@ func (o *Path) ToSCIONDecoded( /*@ ghost ubuf []byte @*/ ) (sd *scion.Decoded, e // Reverse a OneHop path that returns a reversed SCION path. // @ requires o.Mem(ubuf) -// @ preserves slices.AbsSlice_Bytes(ubuf, 0, len(ubuf)) +// @ preserves sl.Bytes(ubuf, 0, len(ubuf)) // @ ensures err == nil ==> p != nil // @ ensures err == nil ==> p.Mem(ubuf) // @ ensures err == nil ==> typeOf(p) == type[*scion.Decoded] diff --git a/pkg/slayers/path/path.go b/pkg/slayers/path/path.go index 1e5e01df7..c23a25215 100644 --- a/pkg/slayers/path/path.go +++ b/pkg/slayers/path/path.go @@ -71,9 +71,9 @@ type Path interface { // SerializeTo serializes the path into the provided buffer. // (VerifiedSCION) There are implementations of this interface that modify the underlying // structure when serializing (e.g. scion.Raw) - //@ preserves sl.AbsSlice_Bytes(underlyingBuf, 0, len(underlyingBuf)) + //@ preserves sl.Bytes(underlyingBuf, 0, len(underlyingBuf)) //@ preserves acc(Mem(underlyingBuf), R1) - //@ preserves sl.AbsSlice_Bytes(b, 0, len(b)) + //@ preserves sl.Bytes(b, 0, len(b)) //@ ensures e != nil ==> e.ErrorMem() //@ decreases SerializeTo(b []byte /*@, ghost underlyingBuf []byte @*/) (e error) @@ -81,7 +81,7 @@ type Path interface { // (VerifiedSCION) There are implementations of this interface (e.g., scion.Raw) that // store b and use it as internal data. //@ requires NonInitMem() - //@ preserves acc(sl.AbsSlice_Bytes(b, 0, len(b)), R42) + //@ preserves acc(sl.Bytes(b, 0, len(b)), R42) //@ ensures err == nil ==> Mem(b) //@ ensures err != nil ==> err.ErrorMem() //@ ensures err != nil ==> NonInitMem() @@ -91,13 +91,13 @@ type Path interface { //@ ghost //@ pure //@ requires Mem(b) - //@ requires acc(sl.AbsSlice_Bytes(b, 0, len(b)), R42) + //@ requires acc(sl.Bytes(b, 0, len(b)), R42) //@ decreases //@ IsValidResultOfDecoding(b []byte, err error) (res bool) // Reverse reverses a path such that it can be used in the reversed direction. // XXX(shitz): This method should possibly be moved to a higher-level path manipulation package. //@ requires Mem(underlyingBuf) - //@ preserves sl.AbsSlice_Bytes(underlyingBuf, 0, len(underlyingBuf)) + //@ preserves sl.Bytes(underlyingBuf, 0, len(underlyingBuf)) //@ ensures e == nil ==> p != nil //@ ensures e == nil ==> p.Mem(underlyingBuf) //@ ensures e != nil ==> e.ErrorMem() @@ -210,23 +210,23 @@ type rawPath struct { } // @ preserves acc(p.Mem(underlyingBuf), R10) -// @ preserves acc(sl.AbsSlice_Bytes(underlyingBuf, 0, len(underlyingBuf)), R10) -// @ preserves sl.AbsSlice_Bytes(b, 0, len(b)) +// @ preserves acc(sl.Bytes(underlyingBuf, 0, len(underlyingBuf)), R10) +// @ preserves sl.Bytes(b, 0, len(b)) // @ ensures e == nil // @ decreases func (p *rawPath) SerializeTo(b []byte /*@, ghost underlyingBuf []byte @*/) (e error) { - //@ unfold sl.AbsSlice_Bytes(b, 0, len(b)) + //@ unfold sl.Bytes(b, 0, len(b)) //@ unfold acc(p.Mem(underlyingBuf), R10) - //@ unfold acc(sl.AbsSlice_Bytes(p.raw, 0, len(p.raw)), R11) + //@ unfold acc(sl.Bytes(p.raw, 0, len(p.raw)), R11) copy(b, p.raw /*@, R11 @*/) - //@ fold acc(sl.AbsSlice_Bytes(p.raw, 0, len(p.raw)), R11) + //@ fold acc(sl.Bytes(p.raw, 0, len(p.raw)), R11) //@ fold acc(p.Mem(underlyingBuf), R10) - //@ fold sl.AbsSlice_Bytes(b, 0, len(b)) + //@ fold sl.Bytes(b, 0, len(b)) return nil } // @ requires p.NonInitMem() -// @ preserves acc(sl.AbsSlice_Bytes(b, 0, len(b)), R42) +// @ preserves acc(sl.Bytes(b, 0, len(b)), R42) // @ ensures p.Mem(b) // @ ensures e == nil // @ decreases diff --git a/pkg/slayers/path/path_spec_test.gobra b/pkg/slayers/path/path_spec_test.gobra index 75f7f25f4..c882f0a0d 100644 --- a/pkg/slayers/path/path_spec_test.gobra +++ b/pkg/slayers/path/path_spec_test.gobra @@ -16,11 +16,11 @@ package path -import "github.com/scionproto/scion/verification/utils/slices" +import sl "github.com/scionproto/scion/verification/utils/slices" func foldMem_test() { r := &rawPath{} - fold slices.AbsSlice_Bytes(r.raw, 0, 0) + fold sl.Bytes(r.raw, 0, 0) fold r.Mem(nil) } diff --git a/pkg/slayers/path/scion/base.go b/pkg/slayers/path/scion/base.go index 7ab5bb869..610cd1df7 100644 --- a/pkg/slayers/path/scion/base.go +++ b/pkg/slayers/path/scion/base.go @@ -79,7 +79,7 @@ type Base struct { } // @ requires s.NonInitMem() -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R50) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R50) // @ ensures r != nil ==> // @ s.NonInitMem() && r.ErrorMem() // @ ensures r == nil ==> @@ -251,7 +251,7 @@ type MetaHdr struct { // DecodeFromBytes populates the fields from a raw buffer. The buffer must be of length >= // scion.MetaLen. // @ preserves acc(m) -// @ preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R50) +// @ preserves acc(sl.Bytes(raw, 0, len(raw)), R50) // @ ensures (len(raw) >= MetaLen) == (e == nil) // @ ensures e == nil ==> m.DecodeFromBytesSpec(raw) // @ ensures e != nil ==> e.ErrorMem() @@ -261,7 +261,7 @@ func (m *MetaHdr) DecodeFromBytes(raw []byte) (e error) { // (VerifiedSCION) added cast, otherwise Gobra cannot verify call return serrors.New("MetaHdr raw too short", "expected", int(MetaLen), "actual", int(len(raw))) } - //@ unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R50) + //@ unfold acc(sl.Bytes(raw, 0, len(raw)), R50) line := binary.BigEndian.Uint32(raw) m.CurrINF = uint8(line >> 30) m.CurrHF = uint8(line>>24) & 0x3F @@ -273,7 +273,7 @@ func (m *MetaHdr) DecodeFromBytes(raw []byte) (e error) { //@ bit.And3fAtMost64(uint8(line>>12)) //@ bit.And3fAtMost64(uint8(line>>6)) //@ bit.And3fAtMost64(uint8(line)) - //@ fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R50) + //@ fold acc(sl.Bytes(raw, 0, len(raw)), R50) return nil } @@ -281,7 +281,7 @@ func (m *MetaHdr) DecodeFromBytes(raw []byte) (e error) { // scion.MetaLen. // @ requires len(b) >= MetaLen // @ preserves acc(m, R50) -// @ preserves sl.AbsSlice_Bytes(b, 0, len(b)) +// @ preserves sl.Bytes(b, 0, len(b)) // @ ensures e == nil // @ ensures m.SerializeToSpec(b) // @ decreases @@ -294,9 +294,9 @@ func (m *MetaHdr) SerializeTo(b []byte) (e error) { line |= uint32(m.SegLen[0]&0x3F) << 12 line |= uint32(m.SegLen[1]&0x3F) << 6 line |= uint32(m.SegLen[2] & 0x3F) - //@ unfold acc(sl.AbsSlice_Bytes(b, 0, len(b))) + //@ unfold acc(sl.Bytes(b, 0, len(b))) binary.BigEndian.PutUint32(b, line) - //@ fold acc(sl.AbsSlice_Bytes(b, 0, len(b))) + //@ fold acc(sl.Bytes(b, 0, len(b))) return nil } diff --git a/pkg/slayers/path/scion/base_spec.gobra b/pkg/slayers/path/scion/base_spec.gobra index 6310bdaec..c165e56bb 100644 --- a/pkg/slayers/path/scion/base_spec.gobra +++ b/pkg/slayers/path/scion/base_spec.gobra @@ -249,7 +249,7 @@ pure func DecodedFrom(line uint32) MetaHdr { } ghost -requires acc(sl.AbsSlice_Bytes(b, 0, len(b)), _) +requires acc(sl.Bytes(b, 0, len(b)), _) decreases pure func (m MetaHdr) DecodeFromBytesSpec(b []byte) bool { return MetaLen <= len(b) && @@ -267,7 +267,7 @@ pure func (m MetaHdr) DecodeFromBytesSpec(b []byte) bool { ghost requires acc(s.Mem(), _) -requires acc(sl.AbsSlice_Bytes(b, 0, len(b)), _) +requires acc(sl.Bytes(b, 0, len(b)), _) decreases pure func (s *Base) DecodeFromBytesSpec(b []byte) bool { return unfolding acc(s.Mem(), _) in @@ -293,7 +293,7 @@ pure func (m MetaHdr) SerializedToLine() uint32 { } ghost -requires acc(sl.AbsSlice_Bytes(b, 0, len(b)), _) +requires acc(sl.Bytes(b, 0, len(b)), _) decreases pure func (m MetaHdr) SerializeToSpec(b []byte) bool { return MetaLen <= len(b) && @@ -317,7 +317,7 @@ pure func (m MetaHdr) InBounds() bool { } ghost -requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) +requires acc(sl.Bytes(ub, 0, len(ub)), R56) decreases pure func (s Base) EqAbsHeader(ub []byte) bool { // we compute the sublice ub[:MetaLen] inside this function instead @@ -329,25 +329,25 @@ pure func (s Base) EqAbsHeader(ub []byte) bool { } ghost -requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) +requires acc(sl.Bytes(ub, 0, len(ub)), _) decreases pure func (s MetaHdr) EqAbsHeader(ub []byte) bool { return MetaLen <= len(ub) && - unfolding acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) in + unfolding acc(sl.Bytes(ub, 0, len(ub)), _) in s == DecodedFrom(binary.BigEndian.Uint32(ub[:MetaLen])) } ghost opaque requires MetaLen <= idx && idx <= len(ub) -requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R55) -requires acc(sl.AbsSlice_Bytes(ub[:idx], 0, idx), R55) +requires acc(sl.Bytes(ub, 0, len(ub)), R55) +requires acc(sl.Bytes(ub[:idx], 0, idx), R55) ensures s.EqAbsHeader(ub) == s.EqAbsHeader(ub[:idx]) decreases pure func (s MetaHdr) EqAbsHeaderForSublice(ub []byte, idx int) Lemma { return let _ := Asserting(ub[:MetaLen] === ub[:idx][:MetaLen]) in - unfolding acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) in - unfolding acc(sl.AbsSlice_Bytes(ub[:idx], 0, idx), R56) in + unfolding acc(sl.Bytes(ub, 0, len(ub)), R56) in + unfolding acc(sl.Bytes(ub[:idx], 0, idx), R56) in let _ := Asserting(s.EqAbsHeader(ub) == (s == DecodedFrom(binary.BigEndian.Uint32(ub[:MetaLen])))) in Lemma{} } diff --git a/pkg/slayers/path/scion/decoded.go b/pkg/slayers/path/scion/decoded.go index 0e98a614c..0d365765a 100644 --- a/pkg/slayers/path/scion/decoded.go +++ b/pkg/slayers/path/scion/decoded.go @@ -37,7 +37,7 @@ type Decoded struct { // DecodeFromBytes fully decodes the SCION path into the corresponding fields. // @ requires s.NonInitMem() -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R42) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R42) // @ ensures r == nil ==> ( // @ s.Mem(data) && // @ let lenD := len(data) in @@ -77,8 +77,8 @@ func (s *Decoded) DecodeFromBytes(data []byte) (r error) { //@ invariant len(data) >= MetaLen + s.Base.GetNumINF() * path.InfoLen + s.Base.GetNumHops() * path.HopLen //@ invariant offset == MetaLen + i * path.InfoLen //@ invariant forall j int :: { &s.InfoFields[j] } 0 <= j && j < s.Base.GetNumINF() ==> acc(&s.InfoFields[j]) - //@ invariant acc(sl.AbsSlice_Bytes(data, 0, offset), R43) - //@ invariant acc(sl.AbsSlice_Bytes(data, offset, len(data)), R43) + //@ invariant acc(sl.Bytes(data, 0, offset), R43) + //@ invariant acc(sl.Bytes(data, offset, len(data)), R43) //@ decreases s.Base.GetNumINF() - i for i := 0; i < /*@ unfolding acc(s.Base.Mem(), _) in @*/ s.NumINF; i++ { //@ sl.SplitByIndex_Bytes(data, offset, len(data), offset + path.InfoLen, R43) @@ -102,8 +102,8 @@ func (s *Decoded) DecodeFromBytes(data []byte) (r error) { //@ invariant forall j int :: { &s.HopFields[j] } 0 <= j && j < i ==> s.HopFields[j].Mem() //@ invariant len(data) >= MetaLen + s.Base.GetNumINF() * path.InfoLen + s.Base.GetNumHops() * path.HopLen //@ invariant offset == MetaLen + s.Base.GetNumINF() * path.InfoLen + i * path.HopLen - //@ invariant acc(sl.AbsSlice_Bytes(data, 0, offset), R43) - //@ invariant acc(sl.AbsSlice_Bytes(data, offset, len(data)), R43) + //@ invariant acc(sl.Bytes(data, 0, offset), R43) + //@ invariant acc(sl.Bytes(data, offset, len(data)), R43) //@ decreases s.Base.GetNumHops() - i for i := 0; i < /*@ unfolding acc(s.Base.Mem(), R2) in @*/ s.NumHops; i++ { //@ sl.SplitByIndex_Bytes(data, offset, len(data), offset + path.HopLen, R43) @@ -126,8 +126,8 @@ func (s *Decoded) DecodeFromBytes(data []byte) (r error) { // SerializeTo writePerms the path to a slice. The slice must be big enough to hold the entire data, // otherwise an error is returned. // @ preserves acc(s.Mem(ubuf), R1) -// @ preserves sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) -// @ preserves b !== ubuf ==> sl.AbsSlice_Bytes(b, 0, len(b)) +// @ preserves sl.Bytes(ubuf, 0, len(ubuf)) +// @ preserves b !== ubuf ==> sl.Bytes(b, 0, len(b)) // @ ensures r != nil ==> r.ErrorMem() // @ decreases func (s *Decoded) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { @@ -136,7 +136,7 @@ func (s *Decoded) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { "actual", len(b)) } //@ unfold acc(s.Mem(ubuf), R1) - //@ assert sl.AbsSlice_Bytes(b, 0, len(b)) + //@ assert sl.Bytes(b, 0, len(b)) //@ sl.SplitByIndex_Bytes(b, 0, len(b), MetaLen, writePerm) //@ sl.Reslice_Bytes(b, 0, MetaLen, writePerm) //@ unfold acc(s.Base.Mem(), R1) @@ -151,8 +151,8 @@ func (s *Decoded) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { offset := MetaLen //@ invariant acc(s.Mem(ubuf), R1) - //@ invariant sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) - //@ invariant b !== ubuf ==> sl.AbsSlice_Bytes(b, 0, len(b)) + //@ invariant sl.Bytes(ubuf, 0, len(ubuf)) + //@ invariant b !== ubuf ==> sl.Bytes(b, 0, len(b)) //@ invariant s.Len(ubuf) <= len(b) //@ invariant 0 <= i && i <= s.getLenInfoFields(ubuf) //@ invariant offset == MetaLen + i * path.InfoLen @@ -166,7 +166,7 @@ func (s *Decoded) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { //@ sl.SplitByIndex_Bytes(b, 0, len(b), offset, writePerm) //@ sl.SplitByIndex_Bytes(b, offset, len(b), offset + path.InfoLen, writePerm) //@ sl.Reslice_Bytes(b, offset, offset + path.InfoLen, writePerm) - //@ assert sl.AbsSlice_Bytes(b[offset:offset+path.InfoLen], 0, path.InfoLen) + //@ assert sl.Bytes(b[offset:offset+path.InfoLen], 0, path.InfoLen) if err := info.SerializeTo(b[offset : offset+path.InfoLen]); err != nil { //@ Unreachable() return err @@ -178,8 +178,8 @@ func (s *Decoded) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { offset += path.InfoLen } //@ invariant acc(s.Mem(ubuf), R1) - //@ invariant sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) - //@ invariant b !== ubuf ==> sl.AbsSlice_Bytes(b, 0, len(b)) + //@ invariant sl.Bytes(ubuf, 0, len(ubuf)) + //@ invariant b !== ubuf ==> sl.Bytes(b, 0, len(b)) //@ invariant s.Len(ubuf) <= len(b) //@ invariant 0 <= i && i <= s.getLenHopFields(ubuf) //@ invariant offset == MetaLen + s.getLenInfoFields(ubuf) * path.InfoLen + i * path.HopLen @@ -291,7 +291,7 @@ func (s *Decoded) Reverse( /*@ ghost ubuf []byte @*/ ) (p path.Path, r error) { // ToRaw tranforms scion.Decoded into scion.Raw. // @ preserves s.Mem(ubuf1) -// @ preserves sl.AbsSlice_Bytes(ubuf1, 0, len(ubuf1)) +// @ preserves sl.Bytes(ubuf1, 0, len(ubuf1)) // @ ensures err == nil ==> r.Mem(ubuf2) // @ ensures err != nil ==> err.ErrorMem() // @ decreases @@ -300,7 +300,7 @@ func (s *Decoded) ToRaw( /*@ ghost ubuf1 []byte @*/ ) (r *Raw, err error /*@, gh // make cannot contain ghost subexpressions tmp := s.Len( /*@ ubuf1 @*/ ) b := make([]byte, tmp) - //@ fold sl.AbsSlice_Bytes(b, 0, len(b)) + //@ fold sl.Bytes(b, 0, len(b)) if err := s.SerializeTo(b /*@, ubuf1 @*/); err != nil { return nil, err /*@, b @*/ } diff --git a/pkg/slayers/path/scion/decoded_spec_test.gobra b/pkg/slayers/path/scion/decoded_spec_test.gobra index a794b2dcb..4ea0575aa 100644 --- a/pkg/slayers/path/scion/decoded_spec_test.gobra +++ b/pkg/slayers/path/scion/decoded_spec_test.gobra @@ -18,7 +18,7 @@ package scion import ( "github.com/scionproto/scion/pkg/slayers/path" - "github.com/scionproto/scion/verification/utils/slices" + sl "github.com/scionproto/scion/verification/utils/slices" ) func testAllocateNonInitDecoded() { @@ -32,6 +32,6 @@ func testAllocateDecoded() { fold d.Base.Mem() assert d.Base.Len() == MetaLen b := make([]byte, MetaLen) - fold slices.AbsSlice_Bytes(b, 0, MetaLen) + fold sl.Bytes(b, 0, MetaLen) fold d.Mem(b) } diff --git a/pkg/slayers/path/scion/raw.go b/pkg/slayers/path/scion/raw.go index 48998c4aa..c2ace96b6 100644 --- a/pkg/slayers/path/scion/raw.go +++ b/pkg/slayers/path/scion/raw.go @@ -35,7 +35,7 @@ type Raw struct { // DecodeFromBytes only decodes the PathMetaHeader. Otherwise the nothing is decoded and simply kept // as raw bytes. // @ requires s.NonInitMem() -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R42) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R42) // @ ensures res == nil ==> s.Mem(data) // @ ensures res != nil ==> (s.NonInitMem() && res.ErrorMem()) // posts for IO: @@ -64,8 +64,8 @@ func (s *Raw) DecodeFromBytes(data []byte) (res error) { // SerializeTo writes the path to a slice. The slice must be big enough to hold the entire data, // otherwise an error is returned. // @ preserves acc(s.Mem(ubuf), R1) -// @ preserves sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) -// @ preserves sl.AbsSlice_Bytes(b, 0, len(b)) +// @ preserves sl.Bytes(ubuf, 0, len(ubuf)) +// @ preserves sl.Bytes(b, 0, len(b)) // @ ensures r != nil ==> r.ErrorMem() // @ decreases func (s *Raw) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { @@ -80,7 +80,7 @@ func (s *Raw) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { // directly. //@ unfold acc(s.Base.Mem(), R1) //@ sl.SplitRange_Bytes(ubuf, 0, len(s.Raw), writePerm) - //@ assert sl.AbsSlice_Bytes(s.Raw, 0, len(s.Raw)) + //@ assert sl.Bytes(s.Raw, 0, len(s.Raw)) //@ sl.SplitRange_Bytes(s.Raw, 0, MetaLen, writePerm) if err := s.PathMeta.SerializeTo(s.Raw[:MetaLen]); err != nil { // @ Unreachable() @@ -88,11 +88,11 @@ func (s *Raw) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { } //@ fold acc(s.Base.Mem(), R1) //@ sl.CombineRange_Bytes(s.Raw, 0, MetaLen, writePerm) - //@ unfold acc(sl.AbsSlice_Bytes(s.Raw, 0, len(s.Raw)), R2) - //@ unfold sl.AbsSlice_Bytes(b, 0, len(b)) + //@ unfold acc(sl.Bytes(s.Raw, 0, len(s.Raw)), R2) + //@ unfold sl.Bytes(b, 0, len(b)) copy(b, s.Raw /*@ , R2 @*/) - //@ fold sl.AbsSlice_Bytes(b, 0, len(b)) - //@ fold acc(sl.AbsSlice_Bytes(s.Raw, 0, len(s.Raw)), R2) + //@ fold sl.Bytes(b, 0, len(b)) + //@ fold acc(sl.Bytes(s.Raw, 0, len(s.Raw)), R2) //@ sl.CombineRange_Bytes(ubuf, 0, len(s.Raw), writePerm) //@ fold acc(s.Mem(ubuf), R1) return nil @@ -100,7 +100,7 @@ func (s *Raw) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { // Reverse reverses the path such that it can be used in the reverse direction. // @ requires s.Mem(ubuf) -// @ preserves sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) +// @ preserves sl.Bytes(ubuf, 0, len(ubuf)) // @ ensures err == nil ==> typeOf(p) == type[*Raw] // @ ensures err == nil ==> p != nil && p != (*Raw)(nil) // @ ensures err == nil ==> p.Mem(ubuf) @@ -135,7 +135,7 @@ func (s *Raw) Reverse( /*@ ghost ubuf []byte @*/ ) (p path.Path, err error) { // ToDecoded transforms a scion.Raw to a scion.Decoded. // @ preserves acc(s.Mem(ubuf), R5) -// @ preserves sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) +// @ preserves sl.Bytes(ubuf, 0, len(ubuf)) // @ ensures err == nil ==> ( // @ let newUb := s.RawBufferMem(ubuf) in // @ d.Mem(newUb) && @@ -161,40 +161,40 @@ func (s *Raw) ToDecoded( /*@ ghost ubuf []byte @*/ ) (d *Decoded, err error) { // @ Unreachable() return nil, err } - //@ ghost b0 := (unfolding acc(sl.AbsSlice_Bytes(s.Raw[:MetaLen], 0, MetaLen), _) in s.Raw[0]) - //@ ghost b1 := (unfolding acc(sl.AbsSlice_Bytes(s.Raw[:MetaLen], 0, MetaLen), _) in s.Raw[1]) - //@ ghost b2 := (unfolding acc(sl.AbsSlice_Bytes(s.Raw[:MetaLen], 0, MetaLen), _) in s.Raw[2]) - //@ ghost b3 := (unfolding acc(sl.AbsSlice_Bytes(s.Raw[:MetaLen], 0, MetaLen), _) in s.Raw[3]) + //@ ghost b0 := (unfolding acc(sl.Bytes(s.Raw[:MetaLen], 0, MetaLen), _) in s.Raw[0]) + //@ ghost b1 := (unfolding acc(sl.Bytes(s.Raw[:MetaLen], 0, MetaLen), _) in s.Raw[1]) + //@ ghost b2 := (unfolding acc(sl.Bytes(s.Raw[:MetaLen], 0, MetaLen), _) in s.Raw[2]) + //@ ghost b3 := (unfolding acc(sl.Bytes(s.Raw[:MetaLen], 0, MetaLen), _) in s.Raw[3]) //@ assert let line := s.PathMeta.SerializedToLine() in binary.BigEndian.PutUint32Spec(b0, b1, b2, b3, line) //@ sl.CombineRange_Bytes(ubuf, 0, MetaLen, HalfPerm) //@ assert &ubuf[0] == &s.Raw[:MetaLen][0] //@ assert &ubuf[1] == &s.Raw[:MetaLen][1] //@ assert &ubuf[2] == &s.Raw[:MetaLen][2] //@ assert &ubuf[3] == &s.Raw[:MetaLen][3] - //@ assert b0 == (unfolding acc(sl.AbsSlice_Bytes(s.Raw[:MetaLen], 0, MetaLen), _) in ubuf[0]) + //@ assert b0 == (unfolding acc(sl.Bytes(s.Raw[:MetaLen], 0, MetaLen), _) in ubuf[0]) // (VerifiedSCION): for some reason, silicon requires the following line to be able to prove // bX == ubuf[X]. - //@ assert unfolding acc(sl.AbsSlice_Bytes(s.Raw[:MetaLen], 0, MetaLen), _) in - //@ (ubuf[0] == (unfolding acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), _) in ubuf[0])) + //@ assert unfolding acc(sl.Bytes(s.Raw[:MetaLen], 0, MetaLen), _) in + //@ (ubuf[0] == (unfolding acc(sl.Bytes(ubuf, 0, len(ubuf)), _) in ubuf[0])) //@ sl.CombineRange_Bytes(ubuf, 0, MetaLen, HalfPerm) decoded := &Decoded{} //@ fold decoded.Base.NonInitMem() //@ fold decoded.NonInitMem() //@ sl.SplitByIndex_Bytes(ubuf, 0, len(ubuf), len(s.Raw), HalfPerm) - //@ assert unfolding acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), _) in - //@ (ubuf[0] == (unfolding acc(sl.AbsSlice_Bytes(ubuf, 0, len(s.Raw)), _) in ubuf[0])) + //@ assert unfolding acc(sl.Bytes(ubuf, 0, len(ubuf)), _) in + //@ (ubuf[0] == (unfolding acc(sl.Bytes(ubuf, 0, len(s.Raw)), _) in ubuf[0])) //@ sl.SplitByIndex_Bytes(ubuf, 0, len(ubuf), len(s.Raw), HalfPerm) //@ sl.Reslice_Bytes(ubuf, 0, len(s.Raw), HalfPerm) //@ assert &ubuf[0] == &ubuf[:len(s.Raw)][0] //@ assert &ubuf[1] == &ubuf[:len(s.Raw)][1] //@ assert &ubuf[2] == &ubuf[:len(s.Raw)][2] //@ assert &ubuf[3] == &ubuf[:len(s.Raw)][3] - //@ assert unfolding acc(sl.AbsSlice_Bytes(ubuf[:len(s.Raw)], 0, len(s.Raw)), _) in - //@ (ubuf[0] == (unfolding acc(sl.AbsSlice_Bytes(ubuf, 0, len(s.Raw)), _) in ubuf[0])) - //@ assert b0 == (unfolding acc(sl.AbsSlice_Bytes(ubuf, 0, len(s.Raw)), _) in ubuf[0]) - //@ assert b1 == (unfolding acc(sl.AbsSlice_Bytes(ubuf, 0, len(s.Raw)), _) in ubuf[1]) - //@ assert b2 == (unfolding acc(sl.AbsSlice_Bytes(ubuf, 0, len(s.Raw)), _) in ubuf[2]) - //@ assert b3 == (unfolding acc(sl.AbsSlice_Bytes(ubuf, 0, len(s.Raw)), _) in ubuf[3]) + //@ assert unfolding acc(sl.Bytes(ubuf[:len(s.Raw)], 0, len(s.Raw)), _) in + //@ (ubuf[0] == (unfolding acc(sl.Bytes(ubuf, 0, len(s.Raw)), _) in ubuf[0])) + //@ assert b0 == (unfolding acc(sl.Bytes(ubuf, 0, len(s.Raw)), _) in ubuf[0]) + //@ assert b1 == (unfolding acc(sl.Bytes(ubuf, 0, len(s.Raw)), _) in ubuf[1]) + //@ assert b2 == (unfolding acc(sl.Bytes(ubuf, 0, len(s.Raw)), _) in ubuf[2]) + //@ assert b3 == (unfolding acc(sl.Bytes(ubuf, 0, len(s.Raw)), _) in ubuf[3]) //@ sl.Reslice_Bytes(ubuf, 0, len(s.Raw), HalfPerm) if err := decoded.DecodeFromBytes(s.Raw); err != nil { //@ sl.Unslice_Bytes(ubuf, 0, len(s.Raw), writePerm) @@ -221,14 +221,14 @@ func (s *Raw) ToDecoded( /*@ ghost ubuf []byte @*/ ) (d *Decoded, err error) { // IncPath increments the path and writes it to the buffer. // @ requires s.Mem(ubuf) -// @ requires sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) +// @ requires sl.Bytes(ubuf, 0, len(ubuf)) // pres for IO: // @ requires s.EqAbsHeader(ubuf) // @ requires validPktMetaHdr(ubuf) // @ requires len(s.absPkt(ubuf).CurrSeg.Future) > 0 // @ requires s.GetIsXoverSpec(ubuf) ==> // @ s.absPkt(ubuf).LeftSeg != none[io.IO_seg3] -// @ ensures sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) +// @ ensures sl.Bytes(ubuf, 0, len(ubuf)) // @ ensures old(unfolding s.Mem(ubuf) in unfolding // @ s.Base.Mem() in (s.NumINF <= 0 || int(s.PathMeta.CurrHF) >= s.NumHops-1)) ==> r != nil // @ ensures r == nil ==> s.Mem(ubuf) @@ -263,7 +263,7 @@ func (s *Raw) IncPath( /*@ ghost ubuf []byte @*/ ) (r error) { //@ ValidPktMetaHdrSublice(ubuf, MetaLen) //@ sl.Reslice_Bytes(ubuf, MetaLen, len(ubuf), HalfPerm) //@ tail := ubuf[MetaLen:] - //@ unfold acc(sl.AbsSlice_Bytes(tail, 0, len(tail)), R50) + //@ unfold acc(sl.Bytes(tail, 0, len(tail)), R50) //@ oldoffsetWithHops := oldOffset + path.HopLen * oldPrevSegLen //@ oldHfIdxSeg := oldCurrHfIdx-oldPrevSegLen //@ WidenCurrSeg(ubuf, oldoffsetWithHops + MetaLen, oldCurrInfIdx, oldHfIdxSeg, @@ -319,7 +319,7 @@ func (s *Raw) IncPath( /*@ ghost ubuf []byte @*/ ) (r error) { //@ assert reveal s.absPkt(ubuf) == AbsXover(oldAbsPkt) //@ } - //@ fold acc(sl.AbsSlice_Bytes(tail, 0, len(tail)), R50) + //@ fold acc(sl.Bytes(tail, 0, len(tail)), R50) //@ sl.Unslice_Bytes(ubuf, MetaLen, len(ubuf), HalfPerm) //@ sl.CombineRange_Bytes(ubuf, 0, MetaLen, HalfPerm) //@ fold acc(s.Base.Mem(), R2) @@ -329,7 +329,7 @@ func (s *Raw) IncPath( /*@ ghost ubuf []byte @*/ ) (r error) { // GetInfoField returns the InfoField at a given index. // @ requires 0 <= idx -// @ preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R10) +// @ preserves acc(sl.Bytes(ubuf, 0, len(ubuf)), R10) // @ preserves acc(s.Mem(ubuf), R10) // @ ensures (idx < s.GetNumINF(ubuf)) == (err == nil) // @ ensures err == nil ==> s.CorrectlyDecodedInfWithIdx(ubuf, idx, ifield) @@ -353,12 +353,12 @@ func (s *Raw) GetInfoField(idx int /*@, ghost ubuf []byte @*/) (ifield path.Info return path.InfoField{}, err } //@ sl.CombineRange_Bytes(ubuf, infOffset, infOffset+path.InfoLen, R21) - //@ unfold acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R56) - //@ unfold acc(sl.AbsSlice_Bytes(ubuf[infOffset : infOffset+path.InfoLen], 0, path.InfoLen), R56) + //@ unfold acc(sl.Bytes(ubuf, 0, len(ubuf)), R56) + //@ unfold acc(sl.Bytes(ubuf[infOffset : infOffset+path.InfoLen], 0, path.InfoLen), R56) //@ assert info.ToIntermediateAbsInfoField() == //@ path.BytesToIntermediateAbsInfoField(ubuf, 0, infOffset, len(ubuf)) - //@ fold acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R56) - //@ fold acc(sl.AbsSlice_Bytes(ubuf[infOffset : infOffset+path.InfoLen], 0, path.InfoLen), R56) + //@ fold acc(sl.Bytes(ubuf, 0, len(ubuf)), R56) + //@ fold acc(sl.Bytes(ubuf[infOffset : infOffset+path.InfoLen], 0, path.InfoLen), R56) //@ sl.CombineRange_Bytes(ubuf, infOffset, infOffset+path.InfoLen, R21) //@ fold acc(s.Mem(ubuf), R11) //@ assert reveal s.CorrectlyDecodedInfWithIdx(ubuf, idx, info) @@ -368,7 +368,7 @@ func (s *Raw) GetInfoField(idx int /*@, ghost ubuf []byte @*/) (ifield path.Info // GetCurrentInfoField is a convenience method that returns the current hop field pointed to by the // CurrINF index in the path meta header. // @ preserves acc(s.Mem(ubuf), R8) -// @ preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R9) +// @ preserves acc(sl.Bytes(ubuf, 0, len(ubuf)), R9) // @ ensures (r == nil) == s.ValidCurrINF(ubuf) // @ ensures r == nil ==> s.CorrectlyDecodedInf(ubuf, res) // @ ensures r != nil ==> r.ErrorMem() @@ -389,12 +389,12 @@ func (s *Raw) GetCurrentInfoField( /*@ ghost ubuf []byte @*/ ) (res path.InfoFie // SetInfoField updates the InfoField at a given index. // @ requires 0 <= idx -// @ requires sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) +// @ requires sl.Bytes(ubuf, 0, len(ubuf)) // @ requires acc(s.Mem(ubuf), R20) // pres for IO: // @ requires validPktMetaHdr(ubuf) && s.EqAbsHeader(ubuf) // @ ensures acc(s.Mem(ubuf), R20) -// @ ensures sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) +// @ ensures sl.Bytes(ubuf, 0, len(ubuf)) // @ ensures r != nil ==> r.ErrorMem() // posts for IO: // @ ensures r == nil && idx == int(old(s.GetCurrINF(ubuf))) ==> @@ -424,9 +424,9 @@ func (s *Raw) SetInfoField(info path.InfoField, idx int /*@, ghost ubuf []byte @ //@ sl.SplitRange_Bytes(ubuf, 0, len(s.Raw), HalfPerm) //@ assert idx == oldCurrINF ==> RawBytesToBase(ubuf[:len(s.Raw)]).ValidCurrIdxsSpec() - //@ assert sl.AbsSlice_Bytes(s.Raw, 0, len(s.Raw)) + //@ assert sl.Bytes(s.Raw, 0, len(s.Raw)) //@ sl.SplitRange_Bytes(s.Raw, infOffset, infOffset+path.InfoLen, HalfPerm) - //@ assert acc(sl.AbsSlice_Bytes(s.Raw, 0, infOffset), HalfPerm) + //@ assert acc(sl.Bytes(s.Raw, 0, infOffset), HalfPerm) //@ sl.Reslice_Bytes(s.Raw, 0, infOffset, HalfPerm/2) //@ ValidPktMetaHdrSublice(s.Raw, infOffset) //@ sl.SplitRange_Bytes(s.Raw, infOffset, infOffset+path.InfoLen, HalfPerm) @@ -450,7 +450,7 @@ func (s *Raw) SetInfoField(info path.InfoField, idx int /*@, ghost ubuf []byte @ // GetHopField returns the HopField at a given index. // @ requires 0 <= idx -// @ preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R10) +// @ preserves acc(sl.Bytes(ubuf, 0, len(ubuf)), R10) // @ preserves acc(s.Mem(ubuf), R10) // @ ensures (idx < s.GetNumHops(ubuf)) == (r == nil) // @ ensures r == nil ==> s.CorrectlyDecodedHfWithIdx(ubuf, idx, res) @@ -475,12 +475,12 @@ func (s *Raw) GetHopField(idx int /*@, ghost ubuf []byte @*/) (res path.HopField } //@ unfold hop.Mem() //@ sl.CombineRange_Bytes(ubuf, hopOffset, hopOffset+path.HopLen, R21) - //@ unfold acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R56) - //@ unfold acc(sl.AbsSlice_Bytes(ubuf[hopOffset : hopOffset+path.HopLen], 0, path.HopLen), R56) + //@ unfold acc(sl.Bytes(ubuf, 0, len(ubuf)), R56) + //@ unfold acc(sl.Bytes(ubuf[hopOffset : hopOffset+path.HopLen], 0, path.HopLen), R56) //@ assert hop.ToIO_HF() == //@ path.BytesToIO_HF(ubuf, 0, hopOffset, len(ubuf)) - //@ fold acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R56) - //@ fold acc(sl.AbsSlice_Bytes(ubuf[hopOffset : hopOffset+path.HopLen], 0, path.HopLen), R56) + //@ fold acc(sl.Bytes(ubuf, 0, len(ubuf)), R56) + //@ fold acc(sl.Bytes(ubuf[hopOffset : hopOffset+path.HopLen], 0, path.HopLen), R56) //@ sl.CombineRange_Bytes(ubuf, hopOffset, hopOffset+path.HopLen, R21) //@ fold acc(s.Mem(ubuf), R11) //@ assert reveal s.CorrectlyDecodedHfWithIdx(ubuf, idx, hop) @@ -490,7 +490,7 @@ func (s *Raw) GetHopField(idx int /*@, ghost ubuf []byte @*/) (res path.HopField // GetCurrentHopField is a convenience method that returns the current hop field pointed to by the // CurrHF index in the path meta header. // @ preserves acc(s.Mem(ubuf), R8) -// @ preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R9) +// @ preserves acc(sl.Bytes(ubuf, 0, len(ubuf)), R9) // @ ensures (r == nil) == s.ValidCurrHF(ubuf) // @ ensures r == nil ==> s.CorrectlyDecodedHf(ubuf, res) // @ ensures r != nil ==> r.ErrorMem() @@ -512,7 +512,7 @@ func (s *Raw) GetCurrentHopField( /*@ ghost ubuf []byte @*/ ) (res path.HopField // SetHopField updates the HopField at a given index. // @ requires 0 <= idx // @ preserves acc(s.Mem(ubuf), R20) -// @ preserves sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) +// @ preserves sl.Bytes(ubuf, 0, len(ubuf)) // @ ensures r != nil ==> r.ErrorMem() // @ decreases func (s *Raw) SetHopField(hop path.HopField, idx int /*@, ghost ubuf []byte @*/) (r error) { @@ -532,7 +532,7 @@ func (s *Raw) SetHopField(hop path.HopField, idx int /*@, ghost ubuf []byte @*/) } hopOffset := MetaLen + s.NumINF*path.InfoLen + idx*path.HopLen //@ sl.SplitRange_Bytes(ubuf, 0, len(s.Raw), writePerm) - //@ assert sl.AbsSlice_Bytes(s.Raw, 0, len(s.Raw)) + //@ assert sl.Bytes(s.Raw, 0, len(s.Raw)) //@ sl.SplitRange_Bytes(s.Raw, hopOffset, hopOffset+path.HopLen, writePerm) ret := hop.SerializeTo(s.Raw[hopOffset : hopOffset+path.HopLen]) //@ sl.CombineRange_Bytes(s.Raw, hopOffset, hopOffset+path.HopLen, writePerm) diff --git a/pkg/slayers/path/scion/raw_spec.gobra b/pkg/slayers/path/scion/raw_spec.gobra index 1c5822a91..c17c5eb61 100644 --- a/pkg/slayers/path/scion/raw_spec.gobra +++ b/pkg/slayers/path/scion/raw_spec.gobra @@ -44,7 +44,7 @@ pred (s *Raw) Mem(buf []byte) { ghost pure requires acc(s.Mem(buf), _) -requires acc(sl.AbsSlice_Bytes(buf, 0, len(buf)), R42) +requires acc(sl.Bytes(buf, 0, len(buf)), R42) decreases func (s *Raw) IsValidResultOfDecoding(buf []byte, err error) (res bool) { return s.EqAbsHeader(buf) && @@ -153,7 +153,7 @@ pure func (s *Raw) CurrInfMatchesCurrHF(ghost ub []byte) bool { ghost requires acc(s.Mem(ub), _) -requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) +requires acc(sl.Bytes(ub, 0, len(ub)), R56) decreases pure func (s *Raw) EqAbsHeader(ub []byte) bool { return unfolding acc(s.Mem(ub), _) in @@ -193,103 +193,6 @@ func (s *Raw) DowngradePerm(buf []byte) { fold s.NonInitMem() } -/******** Lemma: RawPerm ********/ -pred (r *Raw) RawPermRemainder(ubuf []byte, p perm) { - 0 < p && - acc(r.Base.Mem(), p/2) && - acc(&r.Raw, p/2) && - len(r.Raw) <= len(ubuf) && - r.Raw === ubuf[:len(r.Raw)] && - acc(sl.AbsSlice_Bytes(ubuf, len(r.Raw), len(ubuf)), p) && - len(r.Raw) == r.Base.Len() -} - -ghost -requires 0 < p -requires acc(&r.Raw, p/2) && acc(sl.AbsSlice_Bytes(r.Raw, 0, len(r.Raw)), p) && acc(r.Base.Mem(), p/2) -requires r.RawPermRemainder(ubuf, p) -ensures acc(r.Mem(ubuf), p) -ensures acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), p) -decreases -func (r *Raw) UndoRawPerm(ubuf []byte, p perm) { - unfold r.RawPermRemainder(ubuf, p) - sl.Unslice_Bytes(ubuf, 0, len(r.Raw), p) - sl.CombineAtIndex_Bytes(ubuf, 0, len(ubuf), len(r.Raw), p) - fold acc(r.Mem(ubuf), p) -} - -ghost -requires 0 < p -requires acc(r.Mem(ubuf), p) -requires acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), p) -ensures acc(&r.Raw, p/2) -ensures acc(sl.AbsSlice_Bytes(r.Raw, 0, len(r.Raw)), p) -ensures acc(r.Base.Mem(), p/2) -ensures r.RawPermRemainder(ubuf, p) -ensures r.Raw === old(unfolding acc(r.Mem(ubuf), p) in r.Raw) -decreases -func (r *Raw) RawPerm(ubuf []byte, p perm) { - unfold acc(r.Mem(ubuf), p) - sl.SplitByIndex_Bytes(ubuf, 0, len(ubuf), len(r.Raw), p) - sl.Reslice_Bytes(ubuf, 0, len(r.Raw), p) - fold r.RawPermRemainder(ubuf, p) -} -/******** End of Lemma: RawPerm ********/ - -/******** Lemma: RawRangePerm ********/ -pred (r *Raw) RawRangePermRemainder(ubuf []byte, start, end int, p perm) { - 0 < p && - acc(r.Base.Mem(), p) && - acc(&r.Raw, p/2) && - 0 <= start && start <= end && end <= len(r.Raw) && - len(r.Raw) <= len(ubuf) && - r.Raw === ubuf[:len(r.Raw)] && - acc(sl.AbsSlice_Bytes(r.Raw, 0, start), p) && - acc(sl.AbsSlice_Bytes(r.Raw, end, len(r.Raw)), p) && - acc(sl.AbsSlice_Bytes(ubuf, len(r.Raw), len(ubuf)), p) && - len(r.Raw) == r.Base.Len() -} - -ghost -requires 0 < p -requires acc(&r.Raw, p/2) -requires 0 <= start && start <= end && end <= len(r.Raw) -requires acc(sl.AbsSlice_Bytes(r.Raw[start:end], 0, end-start), p) -requires r.RawRangePermRemainder(ubuf, start, end, p) -ensures acc(r.Mem(ubuf), p) -ensures acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), p) -decreases -func (r *Raw) UndoRawRangePerm(ubuf []byte, start, end int, p perm) { - unfold r.RawRangePermRemainder(ubuf, start, end, p) - sl.Unslice_Bytes(r.Raw, start, end, p) - sl.CombineAtIndex_Bytes(r.Raw, 0, end, start, p) - sl.CombineAtIndex_Bytes(r.Raw, 0, len(r.Raw), end, p) - fold r.RawPermRemainder(ubuf, p) - r.UndoRawPerm(ubuf, p) -} - -// Notice that no permission to r.Base.Mem() is provided, unlike the previous methods -ghost -requires 0 < p -requires acc(r.Mem(ubuf), p) -requires acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), p) -requires 0 <= start && start <= end && end <= unfolding acc(r.Mem(ubuf), p) in len(r.Raw) -ensures acc(&r.Raw, p/2) -ensures r.Raw === old(unfolding acc(r.Mem(ubuf), p) in r.Raw) -ensures acc(sl.AbsSlice_Bytes(r.Raw[start:end], 0, end-start), p) -ensures r.RawRangePermRemainder(ubuf, start, end, p) -decreases -func (r *Raw) RawRangePerm(ubuf []byte, start, end int, p perm) { - unfold acc(r.Mem(ubuf), p) - sl.SplitByIndex_Bytes(ubuf, 0, len(ubuf), len(r.Raw), p) - sl.Reslice_Bytes(ubuf, 0, len(r.Raw), p) - sl.SplitByIndex_Bytes(r.Raw, 0, len(r.Raw), start, p) - sl.SplitByIndex_Bytes(r.Raw, start, len(r.Raw), end, p) - sl.Reslice_Bytes(r.Raw, start, end, p) - fold r.RawRangePermRemainder(ubuf, start, end, p) -} -/******** End of Lemma: RawRangePerm ********/ - ghost requires r.Mem(ubuf1) requires len(ubuf1) <= len(ubuf2) @@ -387,7 +290,7 @@ ghost requires 0 <= offset requires 0 <= currHfIdx && currHfIdx <= segLen requires offset + path.HopLen * segLen <= len(raw) -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) ensures len(res) == segLen - currHfIdx decreases segLen - currHfIdx pure func hopFields( @@ -433,7 +336,7 @@ requires 0 <= offset requires 0 < segLen requires 0 <= currHfIdx && currHfIdx <= segLen requires offset + path.HopLen * segLen <= len(raw) -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) ensures len(res.Future) == segLen - currHfIdx ensures len(res.History) == currHfIdx ensures len(res.Past) == currHfIdx @@ -466,7 +369,7 @@ requires 0 < segLen requires offset + path.HopLen * segLen <= len(raw) requires 0 <= currHfIdx && currHfIdx <= segLen requires 0 <= currInfIdx && currInfIdx < 3 -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) decreases pure func CurrSeg(raw []byte, offset int, @@ -489,7 +392,7 @@ requires 0 <= seg2Len requires 0 <= seg3Len requires pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= len(raw) requires 1 <= currInfIdx && currInfIdx < 4 -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) decreases pure func LeftSeg( raw []byte, @@ -514,7 +417,7 @@ requires 0 <= seg2Len requires 0 <= seg3Len requires pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= len(raw) requires -1 <= currInfIdx && currInfIdx < 2 -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) decreases pure func RightSeg( raw []byte, @@ -539,7 +442,7 @@ requires 0 <= seg2Len requires 0 <= seg3Len requires pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= len(raw) requires 2 <= currInfIdx && currInfIdx < 5 -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) decreases pure func MidSeg( raw []byte, @@ -558,7 +461,7 @@ pure func MidSeg( ghost opaque -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) requires validPktMetaHdr(raw) decreases pure func (s *Raw) absPkt(raw []byte) (res io.IO_pkt2) { @@ -585,10 +488,10 @@ pure func (s *Raw) absPkt(raw []byte) (res io.IO_pkt2) { // the return type. ghost requires MetaLen <= len(raw) -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) decreases pure func RawBytesToMetaHdr(raw []byte) MetaHdr { - return unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in + return unfolding acc(sl.Bytes(raw, 0, len(raw)), R56) in let hdr := binary.BigEndian.Uint32(raw[:MetaLen]) in DecodedFrom(hdr) } @@ -597,7 +500,7 @@ pure func RawBytesToMetaHdr(raw []byte) MetaHdr { // the return type. ghost requires MetaLen <= len(raw) -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) decreases pure func RawBytesToBase(raw []byte) Base { return let metaHdr := RawBytesToMetaHdr(raw) in @@ -609,7 +512,7 @@ pure func RawBytesToBase(raw []byte) Base { ghost opaque -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) decreases pure func validPktMetaHdr(raw []byte) bool { return MetaLen <= len(raw) && @@ -626,32 +529,32 @@ pure func validPktMetaHdr(raw []byte) bool { ghost requires MetaLen <= idx && idx <= len(raw) -preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) -preserves acc(sl.AbsSlice_Bytes(raw[:idx], 0, idx), R56) +preserves acc(sl.Bytes(raw, 0, len(raw)), R56) +preserves acc(sl.Bytes(raw[:idx], 0, idx), R56) ensures RawBytesToMetaHdr(raw) == RawBytesToMetaHdr(raw[:idx]) ensures RawBytesToBase(raw) == RawBytesToBase(raw[:idx]) decreases func ValidPktMetaHdrSublice(raw []byte, idx int) { reveal validPktMetaHdr(raw) reveal validPktMetaHdr(raw[:idx]) - unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) - unfold acc(sl.AbsSlice_Bytes(raw[:idx], 0, idx), R56) + unfold acc(sl.Bytes(raw, 0, len(raw)), R56) + unfold acc(sl.Bytes(raw[:idx], 0, idx), R56) assert forall i int :: { &raw[:MetaLen][i] } 0 <= i && i < MetaLen ==> &raw[:MetaLen][i] == &raw[:idx][:MetaLen][i] - fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) - fold acc(sl.AbsSlice_Bytes(raw[:idx], 0, idx), R56) + fold acc(sl.Bytes(raw, 0, len(raw)), R56) + fold acc(sl.Bytes(raw[:idx], 0, idx), R56) } ghost requires acc(s.Mem(ub), R54) -requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R55) +requires acc(sl.Bytes(ub, 0, len(ub)), R55) requires s.InfsMatchHfs(ub) requires s.ValidCurrINF(ub) requires s.ValidCurrHF(ub) requires s.SegsInBounds(ub) requires s.CurrInfMatchesCurrHF(ub) requires s.EqAbsHeader(ub) -ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R55) +ensures acc(sl.Bytes(ub, 0, len(ub)), R55) ensures acc(s.Mem(ub), R54) ensures validPktMetaHdr(ub) ensures s.EqAbsHeader(ub) @@ -739,7 +642,7 @@ ghost opaque requires acc(s.Mem(ub), _) requires 0 <= idx && idx < s.GetNumINF(ub) -requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) +requires acc(sl.Bytes(ub, 0, len(ub)), R56) decreases pure func (s *Raw) CorrectlyDecodedInfWithIdx(ub []byte, idx int, info path.InfoField) bool { return unfolding acc(s.Mem(ub), _) in @@ -754,7 +657,7 @@ ghost opaque requires acc(s.Mem(ub), _) requires s.ValidCurrINF(ub) -requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) +requires acc(sl.Bytes(ub, 0, len(ub)), R56) decreases pure func (s *Raw) CorrectlyDecodedInf(ub []byte, info path.InfoField) bool { return unfolding acc(s.Mem(ub), _) in @@ -769,7 +672,7 @@ ghost opaque requires acc(s.Mem(ub), _) requires 0 <= idx && idx < s.GetNumHops(ub) -requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) +requires acc(sl.Bytes(ub, 0, len(ub)), R56) decreases pure func (s *Raw) CorrectlyDecodedHfWithIdx(ub []byte, idx int, hop path.HopField) bool { return unfolding acc(s.Mem(ub), _) in @@ -783,7 +686,7 @@ ghost opaque requires acc(s.Mem(ub), _) requires s.ValidCurrHF(ub) -requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) +requires acc(sl.Bytes(ub, 0, len(ub)), R56) decreases pure func (s *Raw) CorrectlyDecodedHf(ub []byte, hop path.HopField) bool { return unfolding acc(s.Mem(ub), _) in @@ -797,7 +700,7 @@ pure func (s *Raw) CorrectlyDecodedHf(ub []byte, hop path.HopField) bool { ghost preserves acc(s.Mem(ubuf), R55) preserves s.IsLastHopSpec(ubuf) -preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R56) +preserves acc(sl.Bytes(ubuf, 0, len(ubuf)), R56) preserves validPktMetaHdr(ubuf) preserves s.EqAbsHeader(ubuf) ensures len(s.absPkt(ubuf).CurrSeg.Future) == 1 @@ -822,7 +725,7 @@ func (s *Raw) LastHopLemma(ubuf []byte) { ghost preserves acc(s.Mem(ubuf), R55) preserves s.GetIsXoverSpec(ubuf) -preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R56) +preserves acc(sl.Bytes(ubuf, 0, len(ubuf)), R56) preserves validPktMetaHdr(ubuf) preserves s.EqAbsHeader(ubuf) ensures s.absPkt(ubuf).LeftSeg != none[io.IO_seg2] @@ -869,7 +772,7 @@ pure func (s *Raw) EqAbsInfoField(pkt io.IO_pkt2, info path.IntermediateAbsInfoF ghost preserves acc(s.Mem(ubuf), R53) -preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R53) +preserves acc(sl.Bytes(ubuf, 0, len(ubuf)), R53) preserves validPktMetaHdr(ubuf) preserves s.EqAbsHeader(ubuf) preserves len(s.absPkt(ubuf).CurrSeg.Future) > 0 @@ -913,7 +816,7 @@ requires 0 < segLen requires offset + path.HopLen * segLen <= len(raw) requires 0 <= currHfIdx && currHfIdx < segLen requires 0 <= currInfIdx && currInfIdx < 3 -preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +preserves acc(sl.Bytes(raw, 0, len(raw)), R56) ensures len(CurrSeg(raw, offset, currInfIdx, currHfIdx, segLen, 0).Future) > 0 decreases func LenCurrSeg(raw []byte, offset int, currInfIdx int, currHfIdx int, segLen int) { @@ -927,7 +830,7 @@ requires 0 <= seg3Len requires pktLen(seg1Len, seg2Len, seg3Len, 0) <= len(raw) requires 0 <= currInfIdx && currInfIdx < 2 requires 1 <= currInfIdx ==> 0 < seg3Len -preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +preserves acc(sl.Bytes(raw, 0, len(raw)), R56) ensures LeftSeg(raw, currInfIdx+1, seg1Len, seg2Len, seg3Len, 0) != none[io.IO_seg3] ensures RightSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) != none[io.IO_seg3] decreases @@ -942,7 +845,7 @@ requires 0 < segLen requires offset + path.HopLen * segLen <= len(raw) requires 0 <= currHfIdx && currHfIdx < segLen requires 0 <= currInfIdx && currInfIdx < 3 -preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +preserves acc(sl.Bytes(raw, 0, len(raw)), R56) preserves len(CurrSeg(raw, offset, currInfIdx, currHfIdx, segLen, 0).Future) > 0 ensures CurrSeg(raw, offset, currInfIdx, currHfIdx+1, segLen, 0) == absIncPathSeg(CurrSeg(raw, offset, currInfIdx, currHfIdx, segLen, 0)) @@ -969,7 +872,7 @@ requires 1 <= currInfIdx && currInfIdx < 3 requires 1 == currInfIdx ==> currHfIdx+1 == seg1Len requires 2 == currInfIdx ==> 0 < seg3Len && currHfIdx+1 == seg1Len + seg2Len requires pktLen(seg1Len, seg2Len, seg3Len, 0) <= len(raw) -preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +preserves acc(sl.Bytes(raw, 0, len(raw)), R56) preserves LeftSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) != none[io.IO_seg3] ensures let prevSegLen := LengthOfPrevSeg(currHfIdx+1, seg1Len, seg2Len, seg3Len) in @@ -995,7 +898,7 @@ requires 0 <= seg2Len requires 0 <= seg3Len requires pktLen(seg1Len, seg2Len, seg3Len, 0) <= len(raw) requires 2 <= currInfIdx && currInfIdx < 4 -preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +preserves acc(sl.Bytes(raw, 0, len(raw)), R56) ensures LeftSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) == MidSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) decreases @@ -1011,7 +914,7 @@ requires 0 <= seg2Len requires 0 <= seg3Len requires pktLen(seg1Len, seg2Len, seg3Len, 0) <= len(raw) requires -1 <= currInfIdx && currInfIdx < 1 -preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +preserves acc(sl.Bytes(raw, 0, len(raw)), R56) ensures MidSeg(raw, currInfIdx+4, seg1Len, seg2Len, seg3Len, 0) == RightSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) decreases @@ -1030,7 +933,7 @@ requires 0 <= currInfIdx && currInfIdx < 2 requires 0 == currInfIdx ==> currHfIdx+1 == seg1Len requires 1 == currInfIdx ==> 0 < seg3Len && currHfIdx+1 == seg1Len + seg2Len requires pktLen(seg1Len, seg2Len, seg3Len, 0) <= len(raw) -preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +preserves acc(sl.Bytes(raw, 0, len(raw)), R56) preserves RightSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) != none[io.IO_seg3] ensures let prevSegLen := LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) in @@ -1063,7 +966,7 @@ requires 0 <= offset requires 0 <= currHFIdx && currHFIdx <= segLen requires len(hops) == segLen - currHFIdx requires offset + path.HopLen * segLen <= len(raw) -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) decreases pure func hopFieldsBytePositions(raw []byte, offset int, currHFIdx int, segLen int, hops seq[io.IO_HF]) bool { return forall i int :: { hops[i] } 0 <= i && i < len(hops) ==> @@ -1075,7 +978,7 @@ requires R55 < p requires 0 <= offset requires 0 <= currHFIdx && currHFIdx <= segLen requires offset + path.HopLen * segLen <= len(raw) -preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), p) +preserves acc(sl.Bytes(raw, 0, len(raw)), p) ensures hopFieldsBytePositions(raw, offset, currHFIdx, segLen, hopFields(raw, offset, currHFIdx, segLen)) decreases segLen - currHFIdx func hopFieldsBytePositionsLemma( diff --git a/pkg/slayers/path/scion/raw_spec_test.gobra b/pkg/slayers/path/scion/raw_spec_test.gobra index 5ab5b8c25..d051ab530 100644 --- a/pkg/slayers/path/scion/raw_spec_test.gobra +++ b/pkg/slayers/path/scion/raw_spec_test.gobra @@ -16,7 +16,7 @@ package scion -import "github.com/scionproto/scion/verification/utils/slices" +import sl "github.com/scionproto/scion/verification/utils/slices" func testAllocateNonInitRaw() { r := &Raw{} @@ -29,7 +29,7 @@ func testAllocateRaw() { fold r.Base.Mem() assert r.Base.Len() == MetaLen s := make([]byte, MetaLen) - fold slices.AbsSlice_Bytes(s, 0, len(s)) + fold sl.Bytes(s, 0, len(s)) r.Raw = s fold r.Mem(s) } diff --git a/pkg/slayers/path/scion/widen-lemma.gobra b/pkg/slayers/path/scion/widen-lemma.gobra index 1d14e8013..ddbc6a451 100644 --- a/pkg/slayers/path/scion/widen-lemma.gobra +++ b/pkg/slayers/path/scion/widen-lemma.gobra @@ -32,8 +32,8 @@ requires offset + path.HopLen * segLen <= length requires length <= len(raw) requires 0 <= currHfIdx && currHfIdx <= segLen requires 0 <= currInfIdx && currInfIdx < 3 -preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) -preserves acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R51) +preserves acc(sl.Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.Bytes(raw[start:length], 0, len(raw[start:length])), R51) ensures CurrSeg(raw, offset, currInfIdx, currHfIdx, segLen, headerOffset) == CurrSeg(raw[start:length], offset-start, currInfIdx, currHfIdx, segLen, headerOffset-start) decreases @@ -45,8 +45,8 @@ func WidenCurrSeg(raw []byte, headerOffset int, start int, length int) { - unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) - unfold acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R53) + unfold acc(sl.Bytes(raw, 0, len(raw)), R53) + unfold acc(sl.Bytes(raw[start:length], 0, len(raw[start:length])), R53) ainfo1 := path.Timestamp(raw, currInfIdx, headerOffset) ainfo2 := path.Timestamp(raw[start:length], currInfIdx, headerOffset-start) @@ -67,8 +67,8 @@ func WidenCurrSeg(raw []byte, widenSegment(raw, offset, currHfIdx, ainfo1, uinfo1, consDir1, peer1, segLen, start, length) reveal CurrSeg(raw, offset, currInfIdx, currHfIdx, segLen, headerOffset) reveal CurrSeg(raw[start:length], offset-start, currInfIdx, currHfIdx, segLen, headerOffset-start) - fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) - fold acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R53) + fold acc(sl.Bytes(raw, 0, len(raw)), R53) + fold acc(sl.Bytes(raw[start:length], 0, len(raw[start:length])), R53) } ghost @@ -77,8 +77,8 @@ requires 0 < segLen requires 0 <= currHfIdx && currHfIdx <= segLen requires length <= len(raw) requires offset + path.HopLen * segLen <= length -preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R52) -preserves acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R52) +preserves acc(sl.Bytes(raw, 0, len(raw)), R52) +preserves acc(sl.Bytes(raw[start:length], 0, len(raw[start:length])), R52) ensures segment(raw, offset, currHfIdx, ainfo, uinfo, consDir, peer, segLen) == segment(raw[start:length], offset-start, currHfIdx, ainfo, uinfo, consDir, peer, segLen) decreases @@ -100,19 +100,19 @@ ghost requires 0 <= start && start <= middle requires middle + path.HopLen <= length requires length <= len(raw) -preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R54) -preserves acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R54) +preserves acc(sl.Bytes(raw, 0, len(raw)), R54) +preserves acc(sl.Bytes(raw[start:length], 0, len(raw[start:length])), R54) ensures path.BytesToIO_HF(raw, 0, middle, len(raw)) == path.BytesToIO_HF(raw[start:length], 0, middle-start, length-start) decreases func widenBytesToIO_HF(raw []byte, middle int, start int, length int) { - unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) - unfold acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R55) + unfold acc(sl.Bytes(raw, 0, len(raw)), R55) + unfold acc(sl.Bytes(raw[start:length], 0, len(raw[start:length])), R55) hfBytes1 := path.BytesToIO_HF(raw, 0, middle, len(raw)) hfBytes2 := path.BytesToIO_HF(raw[start:length], 0, middle-start, length-start) assert hfBytes1 == hfBytes2 - fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) - fold acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R55) + fold acc(sl.Bytes(raw, 0, len(raw)), R55) + fold acc(sl.Bytes(raw[start:length], 0, len(raw[start:length])), R55) } ghost @@ -121,8 +121,8 @@ requires 0 <= start && start <= offset requires 0 <= currHfIdx && currHfIdx <= segLen requires offset + path.HopLen * segLen <= length requires length <= len(raw) -preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), p) -preserves acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), p) +preserves acc(sl.Bytes(raw, 0, len(raw)), p) +preserves acc(sl.Bytes(raw[start:length], 0, len(raw[start:length])), p) ensures hopFields(raw, offset, currHfIdx, segLen) == hopFields(raw[start:length], offset-start, currHfIdx, segLen) decreases segLen - currHfIdx @@ -144,8 +144,8 @@ requires 0 <= seg3Len requires 0 <= length && length <= len(raw) requires pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= length requires 1 <= currInfIdx && currInfIdx < 4 -preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) -preserves acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R51) +preserves acc(sl.Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.Bytes(raw[start:length], 0, len(raw[start:length])), R51) ensures LeftSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset) == LeftSeg(raw[start:length], currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset-start) decreases @@ -177,8 +177,8 @@ requires 0 <= seg3Len requires 0 <= length && length <= len(raw) requires pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= length requires -1 <= currInfIdx && currInfIdx < 2 -preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) -preserves acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R51) +preserves acc(sl.Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.Bytes(raw[start:length], 0, len(raw[start:length])), R51) ensures RightSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset) == RightSeg(raw[start:length], currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset-start) decreases @@ -209,8 +209,8 @@ requires 0 <= seg3Len requires 2 <= currInfIdx && currInfIdx < 5 requires 0 <= length && length <= len(raw) requires pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= length -preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) -preserves acc(sl.AbsSlice_Bytes(raw[start:length], 0, len(raw[start:length])), R51) +preserves acc(sl.Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.Bytes(raw[start:length], 0, len(raw[start:length])), R51) ensures MidSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset) == MidSeg(raw[start:length], currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset - start) decreases diff --git a/pkg/slayers/scion.go b/pkg/slayers/scion.go index 6abb98b89..0676e3961 100644 --- a/pkg/slayers/scion.go +++ b/pkg/slayers/scion.go @@ -217,10 +217,10 @@ func (s *SCION) NetworkFlow() (res gopacket.Flow) { // @ requires !opts.FixLengths // @ requires b != nil && b.Mem() // @ requires acc(s.Mem(ubuf), R0) -// @ requires sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) +// @ requires sl.Bytes(ubuf, 0, len(ubuf)) // @ ensures b.Mem() // @ ensures acc(s.Mem(ubuf), R0) -// @ ensures sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) +// @ ensures sl.Bytes(ubuf, 0, len(ubuf)) // TODO: hide internal spec details // @ ensures e == nil && s.HasOneHopPath(ubuf) ==> // @ len(b.UBuf()) == old(len(b.UBuf())) + unfolding acc(s.Mem(ubuf), R55) in @@ -260,27 +260,27 @@ func (s *SCION) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeO // Serialize common header. firstLine := uint32(s.Version&0xF)<<28 | uint32(s.TrafficClass)<<20 | s.FlowID&0xFFFFF // @ sl.SplitRange_Bytes(buf, 0, 4, writePerm) - // @ unfold acc(sl.AbsSlice_Bytes(buf[:4], 0, 4), writePerm) + // @ unfold acc(sl.Bytes(buf[:4], 0, 4), writePerm) binary.BigEndian.PutUint32(buf[:4], firstLine) - // @ fold acc(sl.AbsSlice_Bytes(buf[:4], 0, 4), writePerm) + // @ fold acc(sl.Bytes(buf[:4], 0, 4), writePerm) // @ sl.CombineRange_Bytes(buf, 0, 4, writePerm) - // @ unfold acc(sl.AbsSlice_Bytes(buf, 0, len(buf)), writePerm) + // @ unfold acc(sl.Bytes(buf, 0, len(buf)), writePerm) buf[4] = uint8(s.NextHdr) buf[5] = s.HdrLen - // @ fold acc(sl.AbsSlice_Bytes(buf, 0, len(buf)), writePerm) + // @ fold acc(sl.Bytes(buf, 0, len(buf)), writePerm) // @ sl.SplitRange_Bytes(buf, 6, 8, writePerm) - // @ unfold acc(sl.AbsSlice_Bytes(buf[6:8], 0, 2), writePerm) + // @ unfold acc(sl.Bytes(buf[6:8], 0, 2), writePerm) binary.BigEndian.PutUint16(buf[6:8], s.PayloadLen) - // @ fold acc(sl.AbsSlice_Bytes(buf[6:8], 0, 2), writePerm) + // @ fold acc(sl.Bytes(buf[6:8], 0, 2), writePerm) // @ sl.CombineRange_Bytes(buf, 6, 8, writePerm) - // @ unfold acc(sl.AbsSlice_Bytes(buf, 0, len(buf)), writePerm) + // @ unfold acc(sl.Bytes(buf, 0, len(buf)), writePerm) buf[8] = uint8(s.PathType) buf[9] = uint8(s.DstAddrType&0x7)<<4 | uint8(s.SrcAddrType&0x7) - // @ fold acc(sl.AbsSlice_Bytes(buf, 0, len(buf)), writePerm) + // @ fold acc(sl.Bytes(buf, 0, len(buf)), writePerm) // @ sl.SplitRange_Bytes(buf, 10, 12, writePerm) - // @ unfold acc(sl.AbsSlice_Bytes(buf[10:12], 0, 2), writePerm) + // @ unfold acc(sl.Bytes(buf[10:12], 0, 2), writePerm) binary.BigEndian.PutUint16(buf[10:12], 0) - // @ fold acc(sl.AbsSlice_Bytes(buf[10:12], 0, 2), writePerm) + // @ fold acc(sl.Bytes(buf[10:12], 0, 2), writePerm) // @ sl.CombineRange_Bytes(buf, 10, 12, writePerm) // @ ghost sPath := s.Path @@ -317,7 +317,7 @@ func (s *SCION) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeO // data, so care should be taken to copy it first should later modification of data be required // before the SCION layer is discarded. // @ requires s.NonInitMem() -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R40) // @ preserves df != nil && df.Mem() // @ ensures res == nil ==> s.Mem(data) // @ ensures res == nil && typeOf(s.GetPath(data)) == *scion.Raw ==> @@ -333,12 +333,12 @@ func (s *SCION) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res er "min", CmnHdrLen, "actual", len(data)) } // @ sl.SplitRange_Bytes(data, 0, 4, R41) - // @ preserves 4 <= len(data) && acc(sl.AbsSlice_Bytes(data[:4], 0, 4), R41) + // @ preserves 4 <= len(data) && acc(sl.Bytes(data[:4], 0, 4), R41) // @ decreases // @ outline( - // @ unfold acc(sl.AbsSlice_Bytes(data[:4], 0, 4), R41) + // @ unfold acc(sl.Bytes(data[:4], 0, 4), R41) firstLine := binary.BigEndian.Uint32(data[:4]) - // @ fold acc(sl.AbsSlice_Bytes(data[:4], 0, 4), R41) + // @ fold acc(sl.Bytes(data[:4], 0, 4), R41) // @ ) // @ sl.CombineRange_Bytes(data, 0, 4, R41) // @ unfold s.NonInitMem() @@ -347,7 +347,7 @@ func (s *SCION) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res er s.FlowID = firstLine & 0xFFFFF // @ preserves acc(&s.NextHdr) && acc(&s.HdrLen) && acc(&s.PayloadLen) && acc(&s.PathType) // @ preserves acc(&s.DstAddrType) && acc(&s.SrcAddrType) - // @ preserves CmnHdrLen <= len(data) && acc(sl.AbsSlice_Bytes(data, 0, len(data)), R41) + // @ preserves CmnHdrLen <= len(data) && acc(sl.Bytes(data, 0, len(data)), R41) // @ ensures s.DstAddrType.Has3Bits() && s.SrcAddrType.Has3Bits() // @ ensures 0 <= s.PathType && s.PathType < 256 // @ ensures path.Type(GetPathType(data)) == s.PathType @@ -357,7 +357,7 @@ func (s *SCION) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res er // @ CmnHdrLen + 2*addr.IABytes + s.DstAddrType.Length() + s.SrcAddrType.Length() // @ decreases // @ outline( - // @ unfold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R41) + // @ unfold acc(sl.Bytes(data, 0, len(data)), R41) s.NextHdr = L4ProtocolType(data[4]) s.HdrLen = data[5] // @ assert &data[6:8][0] == &data[6] && &data[6:8][1] == &data[7] @@ -369,7 +369,7 @@ func (s *SCION) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res er // @ assert int(s.DstAddrType) == b.BitAnd7(int(data[9] >> 4)) s.SrcAddrType = AddrType(data[9] & 0x7) // @ assert int(s.SrcAddrType) == b.BitAnd7(int(data[9])) - // @ fold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R41) + // @ fold acc(sl.Bytes(data, 0, len(data)), R41) // @ ) // Decode address header. // @ sl.SplitByIndex_Bytes(data, 0, len(data), CmnHdrLen, R41) @@ -430,14 +430,14 @@ func (s *SCION) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res er s.Payload = data[hdrBytes:] // @ fold acc(s.Mem(data), R54) // @ ghost if(typeOf(s.GetPath(data)) == (*scion.Raw)) { - // @ unfold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R56) - // @ unfold acc(sl.AbsSlice_Bytes(data[offset : offset+pathLen], 0, len(data[offset : offset+pathLen])), R56) + // @ unfold acc(sl.Bytes(data, 0, len(data)), R56) + // @ unfold acc(sl.Bytes(data[offset : offset+pathLen], 0, len(data[offset : offset+pathLen])), R56) // @ unfold acc(s.Path.(*scion.Raw).Mem(data[offset : offset+pathLen]), R55) // @ assert reveal s.EqAbsHeader(data) // @ assert reveal s.ValidScionInitSpec(data) // @ fold acc(s.Path.Mem(data[offset : offset+pathLen]), R55) - // @ fold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R56) - // @ fold acc(sl.AbsSlice_Bytes(data[offset : offset+pathLen], 0, len(data[offset : offset+pathLen])), R56) + // @ fold acc(sl.Bytes(data, 0, len(data)), R56) + // @ fold acc(sl.Bytes(data[offset : offset+pathLen], 0, len(data[offset : offset+pathLen])), R56) // @ } // @ sl.CombineRange_Bytes(data, offset, offset+pathLen, R41) // @ assert typeOf(s.GetPath(data)) == *scion.Raw ==> s.EqAbsHeader(data) && s.ValidScionInitSpec(data) @@ -511,7 +511,7 @@ func (s *SCION) getPath(pathType path.Type) (res path.Path, err error) { } // @ requires pb != nil -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ preserves pb.Mem() // @ ensures res != nil ==> res.ErrorMem() // @ decreases @@ -596,15 +596,15 @@ func scionNextLayerTypeL4(t L4ProtocolType) gopacket.LayerType { // the destination address. // @ requires acc(&s.DstAddrType, R20) && acc(&s.RawDstAddr, R20) // @ requires s.DstAddrType == T4Svc ==> len(s.RawDstAddr) >= addr.HostLenSVC -// @ requires acc(sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R15) +// @ requires acc(sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R15) // @ ensures acc(&s.DstAddrType, R20) && acc(&s.RawDstAddr, R20) // @ ensures err == nil ==> acc(res.Mem(), R15) // @ ensures err == nil ==> typeOf(res) == *net.IPAddr || typeOf(res) == addr.HostSVC // @ ensures err == nil ==> // @ let rawDstAddr := s.RawDstAddr in -// @ (acc(res.Mem(), R15) --* acc(sl.AbsSlice_Bytes(rawDstAddr, 0, len(rawDstAddr)), R15)) +// @ (acc(res.Mem(), R15) --* acc(sl.Bytes(rawDstAddr, 0, len(rawDstAddr)), R15)) // @ ensures err != nil ==> -// @ acc(sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R15) +// @ acc(sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R15) // @ ensures err != nil ==> err.ErrorMem() // @ decreases func (s *SCION) DstAddr() (res net.Addr, err error) { @@ -617,15 +617,15 @@ func (s *SCION) DstAddr() (res net.Addr, err error) { // address. // @ requires acc(&s.SrcAddrType, R20) && acc(&s.RawSrcAddr, R20) // @ requires s.SrcAddrType == T4Svc ==> len(s.RawSrcAddr) >= addr.HostLenSVC -// @ requires acc(sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R15) +// @ requires acc(sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R15) // @ ensures acc(&s.SrcAddrType, R20) && acc(&s.RawSrcAddr, R20) // @ ensures err == nil ==> acc(res.Mem(), R15) // @ ensures err == nil ==> typeOf(res) == *net.IPAddr || typeOf(res) == addr.HostSVC // @ ensures err == nil ==> // @ let rawSrcAddr := s.RawSrcAddr in -// @ (acc(res.Mem(), R15) --* acc(sl.AbsSlice_Bytes(rawSrcAddr, 0, len(rawSrcAddr)), R15)) +// @ (acc(res.Mem(), R15) --* acc(sl.Bytes(rawSrcAddr, 0, len(rawSrcAddr)), R15)) // @ ensures err != nil ==> -// @ acc(sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R15) +// @ acc(sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R15) // @ ensures err != nil ==> err.ErrorMem() // @ decreases func (s *SCION) SrcAddr() (res net.Addr, err error) { @@ -644,9 +644,9 @@ func (s *SCION) SrcAddr() (res net.Addr, err error) { // @ ensures acc(&s.RawDstAddr) && acc(&s.DstAddrType) // @ ensures res != nil ==> res.ErrorMem() // @ ensures res == nil ==> isIP(dst) || isHostSVC(dst) -// @ ensures res == nil && wildcard && isIP(dst) ==> acc(sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), _) -// @ ensures res == nil && wildcard && isHostSVC(dst) ==> sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)) -// @ ensures res == nil && !wildcard && isHostSVC(dst) ==> sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)) +// @ ensures res == nil && wildcard && isIP(dst) ==> acc(sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), _) +// @ ensures res == nil && wildcard && isHostSVC(dst) ==> sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)) +// @ ensures res == nil && !wildcard && isHostSVC(dst) ==> sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)) // @ ensures res == nil && !wildcard ==> acc(dst.Mem(), R18) // @ ensures res == nil && !wildcard && isIP(dst) ==> (unfolding acc(dst.Mem(), R20) in (isIPv4(dst) ==> forall i int :: { &s.RawDstAddr[i] } 0 <= i && i < len(s.RawDstAddr) ==> &s.RawDstAddr[i] == &dst.(*net.IPAddr).IP[i])) // @ ensures res == nil && !wildcard && isIP(dst) ==> (unfolding acc(dst.Mem(), R20) in (isIPv6(dst) && isConvertibleToIPv4(dst) ==> forall i int :: { &s.RawDstAddr[i] } 0 <= i && i < len(s.RawDstAddr) ==> &s.RawDstAddr[i] == &dst.(*net.IPAddr).IP[12+i])) @@ -663,7 +663,7 @@ func (s *SCION) SetDstAddr(dst net.Addr /*@ , ghost wildcard bool @*/) (res erro var verScionTmp []byte s.DstAddrType, verScionTmp, err = packAddr(dst /*@ , wildcard @*/) // @ ghost if !wildcard && err == nil && isIP(dst) { - // @ apply acc(sl.AbsSlice_Bytes(verScionTmp, 0, len(verScionTmp)), R20) --* acc(dst.Mem(), R20) + // @ apply acc(sl.Bytes(verScionTmp, 0, len(verScionTmp)), R20) --* acc(dst.Mem(), R20) // @ } s.RawDstAddr = verScionTmp return err @@ -681,9 +681,9 @@ func (s *SCION) SetDstAddr(dst net.Addr /*@ , ghost wildcard bool @*/) (res erro // @ ensures acc(&s.RawSrcAddr) && acc(&s.SrcAddrType) // @ ensures res != nil ==> res.ErrorMem() // @ ensures res == nil ==> isIP(src) || isHostSVC(src) -// @ ensures res == nil && wildcard && isIP(src) ==> acc(sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), _) -// @ ensures res == nil && wildcard && isHostSVC(src) ==> sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)) -// @ ensures res == nil && !wildcard && isHostSVC(src) ==> sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)) +// @ ensures res == nil && wildcard && isIP(src) ==> acc(sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), _) +// @ ensures res == nil && wildcard && isHostSVC(src) ==> sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)) +// @ ensures res == nil && !wildcard && isHostSVC(src) ==> sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)) // @ ensures res == nil && !wildcard ==> acc(src.Mem(), R18) // @ ensures res == nil && !wildcard && isIP(src) ==> (unfolding acc(src.Mem(), R20) in (isIPv4(src) ==> forall i int :: { &s.RawSrcAddr[i] } 0 <= i && i < len(s.RawSrcAddr) ==> &s.RawSrcAddr[i] == &src.(*net.IPAddr).IP[i])) // @ ensures res == nil && !wildcard && isIP(src) ==> (unfolding acc(src.Mem(), R20) in (isIPv6(src) && isConvertibleToIPv4(src) ==> forall i int :: { &s.RawSrcAddr[i] } 0 <= i && i < len(s.RawSrcAddr) ==> &s.RawSrcAddr[i] == &src.(*net.IPAddr).IP[12+i])) @@ -700,48 +700,48 @@ func (s *SCION) SetSrcAddr(src net.Addr /*@, ghost wildcard bool @*/) (res error var verScionTmp []byte s.SrcAddrType, verScionTmp, err = packAddr(src /*@ , wildcard @*/) // @ ghost if !wildcard && err == nil && isIP(src) { - // @ apply acc(sl.AbsSlice_Bytes(verScionTmp, 0, len(verScionTmp)), R20) --* acc(src.Mem(), R20) + // @ apply acc(sl.Bytes(verScionTmp, 0, len(verScionTmp)), R20) --* acc(src.Mem(), R20) // @ } s.RawSrcAddr = verScionTmp return err } // @ requires addrType == T4Svc ==> len(raw) >= addr.HostLenSVC -// @ requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R15) +// @ requires acc(sl.Bytes(raw, 0, len(raw)), R15) // @ ensures err == nil ==> acc(res.Mem(), R15) // @ ensures err == nil ==> typeOf(res) == *net.IPAddr || typeOf(res) == addr.HostSVC // @ ensures err == nil ==> -// @ (acc(res.Mem(), R15) --* acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R15)) -// @ ensures err != nil ==> acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R15) +// @ (acc(res.Mem(), R15) --* acc(sl.Bytes(raw, 0, len(raw)), R15)) +// @ ensures err != nil ==> acc(sl.Bytes(raw, 0, len(raw)), R15) // @ ensures err != nil ==> err.ErrorMem() // @ decreases func parseAddr(addrType AddrType, raw []byte) (res net.Addr, err error) { switch addrType { case T4Ip: verScionTmp := &net.IPAddr{IP: net.IP(raw)} - // @ unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R15) + // @ unfold acc(sl.Bytes(raw, 0, len(raw)), R15) // @ fold acc(verScionTmp.Mem(), R15) - // @ package (acc((net.Addr)(verScionTmp).Mem(), R15) --* acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R15)) { + // @ package (acc((net.Addr)(verScionTmp).Mem(), R15) --* acc(sl.Bytes(raw, 0, len(raw)), R15)) { // @ assert acc(&verScionTmp.IP, R50) && verScionTmp.IP === raw // @ unfold acc(verScionTmp.Mem(), R15) - // @ fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R15) + // @ fold acc(sl.Bytes(raw, 0, len(raw)), R15) // @ } return verScionTmp, nil case T4Svc: - // @ unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R15) + // @ unfold acc(sl.Bytes(raw, 0, len(raw)), R15) verScionTmp := addr.HostSVC(binary.BigEndian.Uint16(raw[:addr.HostLenSVC])) - // @ fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R15) + // @ fold acc(sl.Bytes(raw, 0, len(raw)), R15) // @ fold acc(verScionTmp.Mem(), R15) - // @ package (acc((net.Addr)(verScionTmp).Mem(), R15) --* acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R15)) { } + // @ package (acc((net.Addr)(verScionTmp).Mem(), R15) --* acc(sl.Bytes(raw, 0, len(raw)), R15)) { } return verScionTmp, nil case T16Ip: verScionTmp := &net.IPAddr{IP: net.IP(raw)} - // @ unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R15) + // @ unfold acc(sl.Bytes(raw, 0, len(raw)), R15) // @ fold acc(verScionTmp.Mem(), R15) - // @ package (acc((net.Addr)(verScionTmp).Mem(), R15) --* acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R15)) { + // @ package (acc((net.Addr)(verScionTmp).Mem(), R15) --* acc(sl.Bytes(raw, 0, len(raw)), R15)) { // @ assert acc(&verScionTmp.IP, R50) && verScionTmp.IP === raw // @ unfold acc(verScionTmp.Mem(), R15) - // @ fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R15) + // @ fold acc(sl.Bytes(raw, 0, len(raw)), R15) // @ } return verScionTmp, nil } @@ -757,12 +757,12 @@ func parseAddr(addrType AddrType, raw []byte) (res net.Addr, err error) { // @ ensures isHostSVC(hostAddr) ==> err == nil // @ ensures err == nil ==> isIP(hostAddr) || isHostSVC(hostAddr) // @ ensures err != nil ==> err.ErrorMem() -// @ ensures err == nil && wildcard && isIP(hostAddr) ==> acc(sl.AbsSlice_Bytes(b, 0, len(b)), _) -// @ ensures err == nil && wildcard && isHostSVC(hostAddr) ==> sl.AbsSlice_Bytes(b, 0, len(b)) -// @ ensures err == nil && !wildcard && isHostSVC(hostAddr) ==> sl.AbsSlice_Bytes(b, 0, len(b)) +// @ ensures err == nil && wildcard && isIP(hostAddr) ==> acc(sl.Bytes(b, 0, len(b)), _) +// @ ensures err == nil && wildcard && isHostSVC(hostAddr) ==> sl.Bytes(b, 0, len(b)) +// @ ensures err == nil && !wildcard && isHostSVC(hostAddr) ==> sl.Bytes(b, 0, len(b)) // @ ensures err == nil && !wildcard && isHostSVC(hostAddr) ==> acc(hostAddr.Mem(), R20) -// @ ensures err == nil && !wildcard && isIP(hostAddr) ==> acc(sl.AbsSlice_Bytes(b, 0, len(b)), R20) -// @ ensures err == nil && !wildcard && isIP(hostAddr) ==> (acc(sl.AbsSlice_Bytes(b, 0, len(b)), R20) --* acc(hostAddr.Mem(), R20)) +// @ ensures err == nil && !wildcard && isIP(hostAddr) ==> acc(sl.Bytes(b, 0, len(b)), R20) +// @ ensures err == nil && !wildcard && isIP(hostAddr) ==> (acc(sl.Bytes(b, 0, len(b)), R20) --* acc(hostAddr.Mem(), R20)) // @ ensures err == nil && !wildcard && isIP(hostAddr) ==> (unfolding acc(hostAddr.Mem(), R20) in (isIPv4(hostAddr) ==> forall i int :: { &b[i] } 0 <= i && i < len(b) ==> &b[i] == &hostAddr.(*net.IPAddr).IP[i])) // @ ensures err == nil && !wildcard && isIP(hostAddr) ==> (unfolding acc(hostAddr.Mem(), R20) in (isIPv6(hostAddr) && isConvertibleToIPv4(hostAddr) ==> forall i int :: { &b[i] } 0 <= i && i < len(b) ==> &b[i] == &hostAddr.(*net.IPAddr).IP[12+i])) // @ ensures err == nil && !wildcard && isIP(hostAddr) ==> (unfolding acc(hostAddr.Mem(), R20) in (!isIPv4(hostAddr) && !isIPv6(hostAddr) ==> forall i int :: { &b[i] } 0 <= i && i < len(b) ==> &b[i] == &hostAddr.(*net.IPAddr).IP[i])) @@ -787,11 +787,11 @@ func packAddr(hostAddr net.Addr /*@ , ghost wildcard bool @*/) (addrtyp AddrType // @ } // @ assert !wildcard && isIP(hostAddr) ==> (unfolding acc(hostAddr.Mem(), R20) in (isIPv6(hostAddr) && isConvertibleToIPv4(hostAddr) ==> forall i int :: { &b[i] } 0 <= i && i < len(b) ==> &b[i] == &hostAddr.(*net.IPAddr).IP[12+i])) // @ ghost if wildcard { - // @ fold acc(sl.AbsSlice_Bytes(ip, 0, len(ip)), _) + // @ fold acc(sl.Bytes(ip, 0, len(ip)), _) // @ } else { - // @ fold acc(sl.AbsSlice_Bytes(ip, 0, len(ip)), R20) - // @ package acc(sl.AbsSlice_Bytes(ip, 0, len(ip)), R20) --* acc(hostAddr.Mem(), R20) { - // @ unfold acc(sl.AbsSlice_Bytes(ip, 0, len(ip)), R20) + // @ fold acc(sl.Bytes(ip, 0, len(ip)), R20) + // @ package acc(sl.Bytes(ip, 0, len(ip)), R20) --* acc(hostAddr.Mem(), R20) { + // @ unfold acc(sl.Bytes(ip, 0, len(ip)), R20) // @ fold acc(hostAddr.Mem(), R20) // @ } // @ } @@ -800,18 +800,18 @@ func packAddr(hostAddr net.Addr /*@ , ghost wildcard bool @*/) (addrtyp AddrType // @ assert !wildcard && isIP(hostAddr) ==> (unfolding acc(hostAddr.Mem(), R20) in (isIPv6(hostAddr) && isConvertibleToIPv4(hostAddr) ==> forall i int :: { &b[i] } 0 <= i && i < len(b) ==> &b[i] == &hostAddr.(*net.IPAddr).IP[12+i])) verScionTmp := a.IP // @ ghost if wildcard { - // @ fold acc(sl.AbsSlice_Bytes(verScionTmp, 0, len(verScionTmp)), _) + // @ fold acc(sl.Bytes(verScionTmp, 0, len(verScionTmp)), _) // @ } else { - // @ fold acc(sl.AbsSlice_Bytes(verScionTmp, 0, len(verScionTmp)), R20) - // @ package acc(sl.AbsSlice_Bytes(verScionTmp, 0, len(verScionTmp)), R20) --* acc(hostAddr.Mem(), R20) { - // @ unfold acc(sl.AbsSlice_Bytes(verScionTmp, 0, len(verScionTmp)), R20) + // @ fold acc(sl.Bytes(verScionTmp, 0, len(verScionTmp)), R20) + // @ package acc(sl.Bytes(verScionTmp, 0, len(verScionTmp)), R20) --* acc(hostAddr.Mem(), R20) { + // @ unfold acc(sl.Bytes(verScionTmp, 0, len(verScionTmp)), R20) // @ fold acc(hostAddr.Mem(), R20) // @ } // @ } return T16Ip, verScionTmp, nil case addr.HostSVC: verScionTmp := a.PackWithPad(2) - // @ fold sl.AbsSlice_Bytes(verScionTmp, 0, len(verScionTmp)) + // @ fold sl.Bytes(verScionTmp, 0, len(verScionTmp)) return T4Svc, verScionTmp, nil } return 0, nil, serrors.New("unsupported address", "addr", hostAddr) @@ -856,8 +856,8 @@ func (s *SCION) AddrHdrLen( /*@ ghost ubuf []byte, ghost insideSlayers bool @*/ // buffer. The caller must ensure that the correct address types and lengths are set in the SCION // layer, otherwise the results of this method are undefined. // @ preserves acc(s.HeaderMem(ubuf), R10) -// @ preserves sl.AbsSlice_Bytes(buf, 0, len(buf)) -// @ preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R10) +// @ preserves sl.Bytes(buf, 0, len(buf)) +// @ preserves acc(sl.Bytes(ubuf, 0, len(ubuf)), R10) // @ ensures err != nil ==> err.ErrorMem() // @ decreases func (s *SCION) SerializeAddrHdr(buf []byte /*@ , ghost ubuf []byte @*/) (err error) { @@ -871,25 +871,25 @@ func (s *SCION) SerializeAddrHdr(buf []byte /*@ , ghost ubuf []byte @*/) (err er srcAddrBytes := s.SrcAddrType.Length() offset := 0 // @ sl.SplitRange_Bytes(buf, offset, len(buf), writePerm) - // @ unfold sl.AbsSlice_Bytes(buf[offset:], 0, len(buf[offset:])) + // @ unfold sl.Bytes(buf[offset:], 0, len(buf[offset:])) binary.BigEndian.PutUint64(buf[offset:], uint64(s.DstIA)) - // @ fold sl.AbsSlice_Bytes(buf[offset:], 0, len(buf[offset:])) + // @ fold sl.Bytes(buf[offset:], 0, len(buf[offset:])) // @ sl.CombineRange_Bytes(buf, offset, len(buf), writePerm) offset += addr.IABytes // @ sl.SplitRange_Bytes(buf, offset, len(buf), writePerm) - // @ unfold sl.AbsSlice_Bytes(buf[offset:], 0, len(buf[offset:])) + // @ unfold sl.Bytes(buf[offset:], 0, len(buf[offset:])) binary.BigEndian.PutUint64(buf[offset:], uint64(s.SrcIA)) - // @ fold sl.AbsSlice_Bytes(buf[offset:], 0, len(buf[offset:])) + // @ fold sl.Bytes(buf[offset:], 0, len(buf[offset:])) // @ sl.CombineRange_Bytes(buf, offset, len(buf), writePerm) offset += addr.IABytes // @ sl.SplitRange_Bytes(buf, offset, offset+dstAddrBytes, writePerm) // @ sl.SplitRange_Bytes(ubuf, offset, offset+dstAddrBytes, R10) - // @ unfold sl.AbsSlice_Bytes(buf[offset:offset+dstAddrBytes], 0, len(buf[offset:offset+dstAddrBytes])) - // @ unfold acc(sl.AbsSlice_Bytes(ubuf[offset:offset+dstAddrBytes], 0, len(ubuf[offset:offset+dstAddrBytes])), R10) + // @ unfold sl.Bytes(buf[offset:offset+dstAddrBytes], 0, len(buf[offset:offset+dstAddrBytes])) + // @ unfold acc(sl.Bytes(ubuf[offset:offset+dstAddrBytes], 0, len(ubuf[offset:offset+dstAddrBytes])), R10) copy(buf[offset:offset+dstAddrBytes], s.RawDstAddr /*@ , R10 @*/) - // @ fold sl.AbsSlice_Bytes(buf[offset:offset+dstAddrBytes], 0, len(buf[offset:offset+dstAddrBytes])) - // @ fold acc(sl.AbsSlice_Bytes(ubuf[offset:offset+dstAddrBytes], 0, len(ubuf[offset:offset+dstAddrBytes])), R10) + // @ fold sl.Bytes(buf[offset:offset+dstAddrBytes], 0, len(buf[offset:offset+dstAddrBytes])) + // @ fold acc(sl.Bytes(ubuf[offset:offset+dstAddrBytes], 0, len(ubuf[offset:offset+dstAddrBytes])), R10) // @ sl.CombineRange_Bytes(buf, offset, offset+dstAddrBytes, writePerm) // @ sl.CombineRange_Bytes(ubuf, offset, offset+dstAddrBytes, R10) @@ -897,13 +897,13 @@ func (s *SCION) SerializeAddrHdr(buf []byte /*@ , ghost ubuf []byte @*/) (err er // @ sl.SplitRange_Bytes(buf, offset, offset+srcAddrBytes, writePerm) // @ sl.SplitRange_Bytes(ubuf, offset, offset+srcAddrBytes, R10) - // @ unfold sl.AbsSlice_Bytes(buf[offset:offset+srcAddrBytes], 0, len(buf[offset:offset+srcAddrBytes])) - // @ unfold acc(sl.AbsSlice_Bytes(ubuf[offset:offset+srcAddrBytes], 0, len(ubuf[offset:offset+srcAddrBytes])), R10) + // @ unfold sl.Bytes(buf[offset:offset+srcAddrBytes], 0, len(buf[offset:offset+srcAddrBytes])) + // @ unfold acc(sl.Bytes(ubuf[offset:offset+srcAddrBytes], 0, len(ubuf[offset:offset+srcAddrBytes])), R10) copy(buf[offset:offset+srcAddrBytes], s.RawSrcAddr /*@ , R10 @*/) - // @ fold sl.AbsSlice_Bytes(buf[offset:offset+srcAddrBytes], 0, len(buf[offset:offset+srcAddrBytes])) - // @ fold acc(sl.AbsSlice_Bytes(ubuf[offset:offset+srcAddrBytes], 0, len(ubuf[offset:offset+srcAddrBytes])), R10) + // @ fold sl.Bytes(buf[offset:offset+srcAddrBytes], 0, len(buf[offset:offset+srcAddrBytes])) + // @ fold acc(sl.Bytes(ubuf[offset:offset+srcAddrBytes], 0, len(ubuf[offset:offset+srcAddrBytes])), R10) // @ sl.CombineRange_Bytes(buf, offset, offset+srcAddrBytes, writePerm) // @ sl.CombineRange_Bytes(ubuf, offset, offset+srcAddrBytes, R10) @@ -917,7 +917,7 @@ func (s *SCION) SerializeAddrHdr(buf []byte /*@ , ghost ubuf []byte @*/) (err er // @ requires acc(&s.SrcAddrType, HalfPerm) && s.SrcAddrType.Has3Bits() // @ requires acc(&s.DstAddrType, HalfPerm) && s.DstAddrType.Has3Bits() // @ requires acc(&s.RawSrcAddr) && acc(&s.RawDstAddr) -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R41) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R41) // @ ensures res == nil ==> s.HeaderMem(data) // @ ensures res != nil ==> res.ErrorMem() // @ ensures res != nil ==> ( @@ -932,13 +932,13 @@ func (s *SCION) DecodeAddrHdr(data []byte) (res error) { "actual", len(data)) } offset := 0 - // @ unfold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R41) + // @ unfold acc(sl.Bytes(data, 0, len(data)), R41) // @ assert forall i int :: { &data[offset:][i] }{ &data[i] } 0 <= i && i < l ==> &data[offset:][i] == &data[i] s.DstIA = addr.IA(binary.BigEndian.Uint64(data[offset:])) offset += addr.IABytes // @ assert forall i int :: { &data[offset:][i] } 0 <= i && i < l ==> &data[offset:][i] == &data[offset+i] s.SrcIA = addr.IA(binary.BigEndian.Uint64(data[offset:])) - // @ fold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R41) + // @ fold acc(sl.Bytes(data, 0, len(data)), R41) offset += addr.IABytes dstAddrBytes := s.DstAddrType.Length() srcAddrBytes := s.SrcAddrType.Length() @@ -954,13 +954,13 @@ func (s *SCION) DecodeAddrHdr(data []byte) (res error) { // @ requires acc(&s.RawSrcAddr, R20) && acc(&s.RawDstAddr, R20) // @ requires len(s.RawSrcAddr) % 2 == 0 && len(s.RawDstAddr) % 2 == 0 // @ requires acc(&s.SrcIA, R20) && acc(&s.DstIA, R20) -// @ requires acc(sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) -// @ requires acc(sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) -// @ preserves acc(sl.AbsSlice_Bytes(upperLayer, 0, len(upperLayer)), R20) +// @ requires acc(sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) +// @ requires acc(sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) +// @ preserves acc(sl.Bytes(upperLayer, 0, len(upperLayer)), R20) // @ ensures acc(&s.RawSrcAddr, R20) && acc(&s.RawDstAddr, R20) // @ ensures acc(&s.SrcIA, R20) && acc(&s.DstIA, R20) -// @ ensures acc(sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) -// @ ensures acc(sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) +// @ ensures acc(sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) +// @ ensures acc(sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) // @ ensures s == nil ==> err != nil // @ ensures len(s.RawDstAddr) == 0 ==> err != nil // @ ensures len(s.RawSrcAddr) == 0 ==> err != nil @@ -983,12 +983,12 @@ func (s *SCION) computeChecksum(upperLayer []byte, protocol uint8) (res uint16, // @ requires acc(&s.RawSrcAddr, R20) && acc(&s.RawDstAddr, R20) // @ requires len(s.RawSrcAddr) % 2 == 0 && len(s.RawDstAddr) % 2 == 0 // @ requires acc(&s.SrcIA, R20) && acc(&s.DstIA, R20) -// @ requires acc(sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) -// @ requires acc(sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) +// @ requires acc(sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) +// @ requires acc(sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) // @ ensures acc(&s.RawSrcAddr, R20) && acc(&s.RawDstAddr, R20) // @ ensures acc(&s.SrcIA, R20) && acc(&s.DstIA, R20) -// @ ensures acc(sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) -// @ ensures acc(sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) +// @ ensures acc(sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) +// @ ensures acc(sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) // @ ensures len(s.RawDstAddr) == 0 ==> err != nil // @ ensures len(s.RawSrcAddr) == 0 ==> err != nil // @ ensures err != nil ==> err.ErrorMem() @@ -1018,7 +1018,7 @@ func (s *SCION) pseudoHeaderChecksum(length int, protocol uint8) (res uint32, er } // Address length is guaranteed to be a multiple of 2 by the protocol. // @ ghost var rawSrcAddrLen int = len(s.RawSrcAddr) - // @ invariant acc(&s.RawSrcAddr, R20) && acc(sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) + // @ invariant acc(&s.RawSrcAddr, R20) && acc(sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) // @ invariant len(s.RawSrcAddr) == rawSrcAddrLen // @ invariant len(s.RawSrcAddr) % 2 == 0 // @ invariant i % 2 == 0 @@ -1026,20 +1026,20 @@ func (s *SCION) pseudoHeaderChecksum(length int, protocol uint8) (res uint32, er // @ decreases len(s.RawSrcAddr) - i for i := 0; i < len(s.RawSrcAddr); i += 2 { // @ preserves err == nil - // @ requires acc(&s.RawSrcAddr, R20) && acc(sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) + // @ requires acc(&s.RawSrcAddr, R20) && acc(sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) // @ requires 0 <= i && i < len(s.RawSrcAddr) && i % 2 == 0 && len(s.RawSrcAddr) % 2 == 0 - // @ ensures acc(&s.RawSrcAddr, R20) && acc(sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) + // @ ensures acc(&s.RawSrcAddr, R20) && acc(sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) // @ ensures s.RawSrcAddr === before(s.RawSrcAddr) // @ decreases // @ outline( - // @ unfold acc(sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) + // @ unfold acc(sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) csum += uint32(s.RawSrcAddr[i]) << 8 csum += uint32(s.RawSrcAddr[i+1]) - // @ fold acc(sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) + // @ fold acc(sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) // @ ) } // @ ghost var rawDstAddrLen int = len(s.RawDstAddr) - // @ invariant acc(&s.RawDstAddr, R20) && acc(sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) + // @ invariant acc(&s.RawDstAddr, R20) && acc(sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) // @ invariant len(s.RawDstAddr) == rawDstAddrLen // @ invariant len(s.RawDstAddr) % 2 == 0 // @ invariant i % 2 == 0 @@ -1047,16 +1047,16 @@ func (s *SCION) pseudoHeaderChecksum(length int, protocol uint8) (res uint32, er // @ decreases len(s.RawDstAddr) - i for i := 0; i < len(s.RawDstAddr); i += 2 { // @ preserves err == nil - // @ requires acc(&s.RawDstAddr, R20) && acc(sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) + // @ requires acc(&s.RawDstAddr, R20) && acc(sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) // @ requires 0 <= i && i < len(s.RawDstAddr) && i % 2 == 0 && len(s.RawDstAddr) % 2 == 0 - // @ ensures acc(&s.RawDstAddr, R20) && acc(sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) + // @ ensures acc(&s.RawDstAddr, R20) && acc(sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) // @ ensures s.RawDstAddr === before(s.RawDstAddr) // @ decreases // @ outline( - // @ unfold acc(sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) + // @ unfold acc(sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) csum += uint32(s.RawDstAddr[i]) << 8 csum += uint32(s.RawDstAddr[i+1]) - // @ fold acc(sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) + // @ fold acc(sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) // @ ) } l := uint32(length) @@ -1065,13 +1065,13 @@ func (s *SCION) pseudoHeaderChecksum(length int, protocol uint8) (res uint32, er return csum, nil } -// @ preserves acc(sl.AbsSlice_Bytes(upperLayer, 0, len(upperLayer)), R20) +// @ preserves acc(sl.Bytes(upperLayer, 0, len(upperLayer)), R20) // @ decreases func (s *SCION) upperLayerChecksum(upperLayer []byte, csum uint32) uint32 { // Compute safe boundary to ensure we do not access out of bounds. // Odd lengths are handled at the end. safeBoundary := len(upperLayer) - 1 - // @ unfold acc(sl.AbsSlice_Bytes(upperLayer, 0, len(upperLayer)), R20) + // @ unfold acc(sl.Bytes(upperLayer, 0, len(upperLayer)), R20) // @ invariant 0 <= i && i < safeBoundary + 2 // @ invariant i % 2 == 0 // @ invariant forall i int :: { &upperLayer[i] } 0 <= i && i < len(upperLayer) ==> acc(&upperLayer[i], R20) @@ -1083,7 +1083,7 @@ func (s *SCION) upperLayerChecksum(upperLayer []byte, csum uint32) uint32 { if len(upperLayer)%2 == 1 { csum += uint32(upperLayer[safeBoundary]) << 8 } - // @ fold acc(sl.AbsSlice_Bytes(upperLayer, 0, len(upperLayer)), R20) + // @ fold acc(sl.Bytes(upperLayer, 0, len(upperLayer)), R20) return csum } diff --git a/pkg/slayers/scion_spec.gobra b/pkg/slayers/scion_spec.gobra index 7834eb78a..43342c2e1 100644 --- a/pkg/slayers/scion_spec.gobra +++ b/pkg/slayers/scion_spec.gobra @@ -232,8 +232,8 @@ pred (s *SCION) ChecksumMem() { acc(&s.RawSrcAddr) && acc(&s.RawDstAddr) && len(s.RawSrcAddr) % 2 == 0 && len(s.RawDstAddr) % 2 == 0 && acc(&s.SrcIA) && acc(&s.DstIA) && - sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)) && - sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)) + sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)) && + sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)) } pred (b *BaseLayer) Mem(ghost ub []byte, ghost breakPoint int) { @@ -354,7 +354,7 @@ ghost opaque pure requires acc(s.Mem(ub), _) -requires acc(sl.AbsSlice_Bytes(ub, 0, length), _) +requires acc(sl.Bytes(ub, 0, length), _) requires CmnHdrLen <= length decreases func (s *SCION) ValidHeaderOffset(ub []byte, length int) bool { @@ -364,49 +364,49 @@ func (s *SCION) ValidHeaderOffset(ub []byte, length int) bool { ghost requires acc(s.Mem(ub), R56) -requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R55) -requires acc(sl.AbsSlice_Bytes(ub, 0, length), R55) +requires acc(sl.Bytes(ub, 0, len(ub)), R55) +requires acc(sl.Bytes(ub, 0, length), R55) requires CmnHdrLen <= length && length <= len(ub) requires s.ValidHeaderOffset(ub, len(ub)) ensures acc(s.Mem(ub), R56) -ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R55) -ensures acc(sl.AbsSlice_Bytes(ub, 0, length), R55) +ensures acc(sl.Bytes(ub, 0, len(ub)), R55) +ensures acc(sl.Bytes(ub, 0, length), R55) ensures s.ValidHeaderOffset(ub, length) decreases func (s *SCION) ValidHeaderOffsetToSubSliceLemma(ub []byte, length int) { reveal s.ValidHeaderOffset(ub, len(ub)) - unfold acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) - unfold acc(sl.AbsSlice_Bytes(ub, 0, length), R56) + unfold acc(sl.Bytes(ub, 0, len(ub)), R56) + unfold acc(sl.Bytes(ub, 0, length), R56) assert reveal s.ValidHeaderOffset(ub, length) - fold acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) - fold acc(sl.AbsSlice_Bytes(ub, 0, length), R56) + fold acc(sl.Bytes(ub, 0, len(ub)), R56) + fold acc(sl.Bytes(ub, 0, length), R56) } ghost requires acc(s.Mem(ub), R56) -requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R55) -requires acc(sl.AbsSlice_Bytes(ub, 0, length), R55) +requires acc(sl.Bytes(ub, 0, len(ub)), R55) +requires acc(sl.Bytes(ub, 0, length), R55) requires CmnHdrLen <= length && length <= len(ub) requires s.ValidHeaderOffset(ub, length) ensures acc(s.Mem(ub), R56) -ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R55) -ensures acc(sl.AbsSlice_Bytes(ub, 0, length), R55) +ensures acc(sl.Bytes(ub, 0, len(ub)), R55) +ensures acc(sl.Bytes(ub, 0, length), R55) ensures s.ValidHeaderOffset(ub, len(ub)) decreases func (s *SCION) ValidHeaderOffsetFromSubSliceLemma(ub []byte, length int) { reveal s.ValidHeaderOffset(ub, len(ub)) - unfold acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) - unfold acc(sl.AbsSlice_Bytes(ub, 0, length), R56) + unfold acc(sl.Bytes(ub, 0, len(ub)), R56) + unfold acc(sl.Bytes(ub, 0, length), R56) assert reveal s.ValidHeaderOffset(ub, length) - fold acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) - fold acc(sl.AbsSlice_Bytes(ub, 0, length), R56) + fold acc(sl.Bytes(ub, 0, len(ub)), R56) + fold acc(sl.Bytes(ub, 0, length), R56) } ghost opaque pure requires acc(s.Mem(ub), _) -requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) +requires acc(sl.Bytes(ub, 0, len(ub)), _) decreases func (s *SCION) EqAbsHeader(ub []byte) bool { return unfolding acc(s.Mem(ub), _) in @@ -418,7 +418,7 @@ func (s *SCION) EqAbsHeader(ub []byte) bool { // to avoid doing these casts, especially when we add support for EPIC. typeOf(s.Path) == (*scion.Raw) && unfolding acc(s.Path.Mem(ub[low:high]), _) in - unfolding acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) in + unfolding acc(sl.Bytes(ub, 0, len(ub)), _) in let _ := Asserting(forall k int :: {&ub[low:high][k]} 0 <= k && k < high ==> &ub[low:high][k] == &ub[low + k]) in let _ := Asserting(forall k int :: {&ub[low:high][:scion.MetaLen][k]} 0 <= k && k < scion.MetaLen ==> @@ -450,7 +450,7 @@ func (s *SCION) ValidScionInitSpec(ub []byte) bool { // Checks if the common path header is valid in the serialized scion packet. ghost opaque -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) decreases pure func ValidPktMetaHdr(raw []byte) bool { return CmnHdrLen <= len(raw) && @@ -460,7 +460,7 @@ pure func ValidPktMetaHdr(raw []byte) bool { let rawHdr := raw[start:end] in let length := GetLength(raw) in length <= len(raw) && - unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in + unfolding acc(sl.Bytes(raw, 0, len(raw)), R56) in let _ := Asserting(forall k int :: {&rawHdr[k]} 0 <= k && k < scion.MetaLen ==> &rawHdr[k] == &raw[start + k]) in let hdr := binary.BigEndian.Uint32(rawHdr) in let metaHdr := scion.DecodedFrom(hdr) in @@ -476,7 +476,7 @@ pure func ValidPktMetaHdr(raw []byte) bool { ghost opaque -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) decreases pure func IsSupportedPkt(raw []byte) bool { return CmnHdrLen <= len(raw) && @@ -487,7 +487,7 @@ pure func IsSupportedPkt(raw []byte) bool { } ghost -requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) +requires acc(sl.Bytes(ub, 0, len(ub)), _) requires CmnHdrLen <= len(ub) decreases pure func GetAddressOffset(ub []byte) int { @@ -495,18 +495,18 @@ pure func GetAddressOffset(ub []byte) int { } ghost -requires acc(sl.AbsSlice_Bytes(ub, 0, length), _) +requires acc(sl.Bytes(ub, 0, length), _) requires CmnHdrLen <= length decreases pure func GetAddressOffsetWithinLength(ub []byte, length int) int { - return unfolding acc(sl.AbsSlice_Bytes(ub, 0, length), _) in + return unfolding acc(sl.Bytes(ub, 0, length), _) in let dstAddrLen := AddrType(ub[9] >> 4 & 0x7).Length() in let srcAddrLen := AddrType(ub[9] & 0x7).Length() in CmnHdrLen + 2*addr.IABytes + dstAddrLen + srcAddrLen } ghost -requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) +requires acc(sl.Bytes(ub, 0, len(ub)), _) requires CmnHdrLen <= len(ub) decreases pure func GetLength(ub []byte) int { @@ -514,33 +514,33 @@ pure func GetLength(ub []byte) int { } ghost -requires acc(sl.AbsSlice_Bytes(ub, 0, length), _) +requires acc(sl.Bytes(ub, 0, length), _) requires CmnHdrLen <= length decreases pure func GetLengthWithinLength(ub []byte, length int) int { - return unfolding acc(sl.AbsSlice_Bytes(ub, 0, length), _) in int(ub[5])*LineLen + return unfolding acc(sl.Bytes(ub, 0, length), _) in int(ub[5])*LineLen } ghost -requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) +requires acc(sl.Bytes(ub, 0, len(ub)), _) requires CmnHdrLen <= len(ub) decreases pure func GetPathType(ub []byte) int { - return unfolding acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) in int(ub[8]) + return unfolding acc(sl.Bytes(ub, 0, len(ub)), _) in int(ub[8]) } ghost -requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) +requires acc(sl.Bytes(ub, 0, len(ub)), _) requires CmnHdrLen <= len(ub) decreases pure func GetNextHdr(ub []byte) int { - return unfolding acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) in int(ub[4]) + return unfolding acc(sl.Bytes(ub, 0, len(ub)), _) in int(ub[4]) } ghost opaque requires acc(s.Mem(ub), _) -requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) +requires acc(sl.Bytes(ub, 0, len(ub)), _) decreases pure func (s *SCION) EqPathType(ub []byte) bool { return unfolding acc(s.Mem(ub), _) in diff --git a/pkg/slayers/scion_test.gobra b/pkg/slayers/scion_test.gobra index eae1258d6..3678ddaee 100644 --- a/pkg/slayers/scion_test.gobra +++ b/pkg/slayers/scion_test.gobra @@ -39,7 +39,7 @@ requires acc(src.Mem(), _) func testSrcSetterWildcard(s *SCION, src *net.IPAddr) { res := s.SetSrcAddr(src, true) // in the wildcard case we have wildcard access to the address in the SCION struct - assert acc(sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), _) + assert acc(sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), _) } // These tests show the behavior of SetSrcAddr when an HostSVC is passed @@ -50,7 +50,7 @@ func testSrcSetterSVC(s *SCION, src addr.HostSVC) { res := s.SetSrcAddr(src, false) assert src.Mem() // if the address is HostSVC we have the whole permission anyway in a slice predicate - unfold sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)) + unfold sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)) assert forall i int :: { &s.RawSrcAddr[i] } 0 <= i && i < len(s.RawSrcAddr) ==> acc(&s.RawSrcAddr[i]) } @@ -60,7 +60,7 @@ requires acc(src.Mem(), _) func testSrcSetterSVCWildcard(s *SCION, src addr.HostSVC) { res := s.SetSrcAddr(src, true) // if the address is HostSVC we have the whole permission anyway in a slice predicate - unfold sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)) + unfold sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)) assert forall i int :: { &s.RawSrcAddr[i] } 0 <= i && i < len(s.RawSrcAddr) ==> acc(&s.RawSrcAddr[i]) } @@ -82,7 +82,7 @@ requires acc(dst.Mem(), _) func testDstSetterWildcard(s *SCION, dst *net.IPAddr) { res := s.SetDstAddr(dst, true) // in the wildcard case we have wildcard access to the address in the SCION struct - assert acc(sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), _) + assert acc(sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), _) } // These tests show the behavior of SetDstAddr when an HostSVC is passed @@ -93,7 +93,7 @@ func testDstSetterSVC(s *SCION, dst addr.HostSVC) { res := s.SetDstAddr(dst, false) assert dst.Mem() // if the address is HostSVC we have the whole permission anyway in a slice predicate - unfold sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)) + unfold sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)) assert forall i int :: { &s.RawDstAddr[i] } 0 <= i && i < len(s.RawDstAddr) ==> acc(&s.RawDstAddr[i]) } @@ -103,6 +103,6 @@ requires acc(dst.Mem(), _) func testDstSetterSVCWildcard(s *SCION, dst addr.HostSVC) { res := s.SetDstAddr(dst, true) // if the address is HostSVC we have the whole permission anyway in a slice predicate - unfold sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)) + unfold sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)) assert forall i int :: { &s.RawDstAddr[i] } 0 <= i && i < len(s.RawDstAddr) ==> acc(&s.RawDstAddr[i]) } diff --git a/pkg/slayers/scmp.go b/pkg/slayers/scmp.go index e74fe6656..ab15de197 100644 --- a/pkg/slayers/scmp.go +++ b/pkg/slayers/scmp.go @@ -24,7 +24,7 @@ import ( "github.com/scionproto/scion/pkg/private/serrors" // @ . "github.com/scionproto/scion/verification/utils/definitions" - // @ "github.com/scionproto/scion/verification/utils/slices" + // @ sl "github.com/scionproto/scion/verification/utils/slices" ) // MaxSCMPPacketLen the maximum length a SCION packet including SCMP quote can @@ -130,14 +130,14 @@ func (s *SCMP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOp // @ decreases // @ outline ( // @ b.ExchangePred() - // @ slices.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) - // @ unfold slices.AbsSlice_Bytes(underlyingBufRes, 0, 2) + // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) + // @ unfold sl.Bytes(underlyingBufRes, 0, 2) // @ assert forall i int :: { &bytes[i] } 0 <= i && i < 2 ==> &bytes[i] == &underlyingBufRes[i] - // @ fold slices.AbsSlice_Bytes(bytes, 0, 2) + // @ fold sl.Bytes(bytes, 0, 2) s.TypeCode.SerializeTo(bytes) - // @ unfold slices.AbsSlice_Bytes(bytes, 0, 2) - // @ fold slices.AbsSlice_Bytes(underlyingBufRes, 0, 2) - // @ slices.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) + // @ unfold sl.Bytes(bytes, 0, 2) + // @ fold sl.Bytes(underlyingBufRes, 0, 2) + // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ b.RestoreMem(underlyingBufRes) // @ ) @@ -154,13 +154,13 @@ func (s *SCMP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOp // @ decreases // @ outline ( // @ b.ExchangePred() - // @ slices.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 4, writePerm) - // @ unfold slices.AbsSlice_Bytes(underlyingBufRes, 0, 4) + // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 4, writePerm) + // @ unfold sl.Bytes(underlyingBufRes, 0, 4) // @ assert forall i int :: { &bytes[i] } 0 <= i && i < 4 ==> &bytes[i] == &underlyingBufRes[i] bytes[2] = 0 bytes[3] = 0 - // @ fold slices.AbsSlice_Bytes(underlyingBufRes, 0, 4) - // @ slices.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 4, writePerm) + // @ fold sl.Bytes(underlyingBufRes, 0, 4) + // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 4, writePerm) // @ b.RestoreMem(underlyingBufRes) // @ ) verScionTmp := b.Bytes() @@ -182,13 +182,13 @@ func (s *SCMP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOp // @ decreases // @ outline ( // @ b.ExchangePred() - // @ slices.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 4, writePerm) - // @ unfold slices.AbsSlice_Bytes(underlyingBufRes, 0, 4) + // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 4, writePerm) + // @ unfold sl.Bytes(underlyingBufRes, 0, 4) // @ assert forall i int :: { &bytes[i] } 0 <= i && i < 4 ==> &bytes[i] == &underlyingBufRes[i] // @ assert forall i int :: { &bytes[2:][i] } 0 <= i && i < 2 ==> &bytes[2:][i] == &bytes[i + 2] binary.BigEndian.PutUint16(bytes[2:], s.Checksum) - // @ fold slices.AbsSlice_Bytes(underlyingBufRes, 0, 4) - // @ slices.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 4, writePerm) + // @ fold sl.Bytes(underlyingBufRes, 0, 4) + // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 4, writePerm) // @ b.RestoreMem(underlyingBufRes) // @ ) // @ fold s.Mem(ubufMem) @@ -197,7 +197,7 @@ func (s *SCMP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOp // DecodeFromBytes decodes the given bytes into this layer. // @ requires df != nil -// @ preserves acc(slices.AbsSlice_Bytes(data, 0, len(data)), R40) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R40) // @ requires s.NonInitMem() // @ preserves df.Mem() // @ ensures res == nil ==> s.Mem(data) @@ -210,31 +210,31 @@ func (s *SCMP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res err } // @ unfold s.NonInitMem() // @ requires len(data) >= 4 - // @ requires acc(slices.AbsSlice_Bytes(data, 0, len(data)), R40) + // @ requires acc(sl.Bytes(data, 0, len(data)), R40) // @ preserves acc(&s.TypeCode) - // @ ensures acc(slices.AbsSlice_Bytes(data, 2, len(data)), R40) - // @ ensures acc(slices.AbsSlice_Bytes(data, 0, 2), R40) + // @ ensures acc(sl.Bytes(data, 2, len(data)), R40) + // @ ensures acc(sl.Bytes(data, 0, 2), R40) // @ decreases // @ outline ( - // @ slices.SplitByIndex_Bytes(data, 0, len(data), 2, R40) - // @ unfold acc(slices.AbsSlice_Bytes(data, 0, 2), R40) + // @ sl.SplitByIndex_Bytes(data, 0, len(data), 2, R40) + // @ unfold acc(sl.Bytes(data, 0, 2), R40) s.TypeCode = CreateSCMPTypeCode(SCMPType(data[0]), SCMPCode(data[1])) - // @ fold acc(slices.AbsSlice_Bytes(data, 0, 2), R40) + // @ fold acc(sl.Bytes(data, 0, 2), R40) // @ ) // @ requires len(data) >= 4 - // @ requires acc(slices.AbsSlice_Bytes(data, 0, 2), R40) - // @ requires acc(slices.AbsSlice_Bytes(data, 2, len(data)), R40) + // @ requires acc(sl.Bytes(data, 0, 2), R40) + // @ requires acc(sl.Bytes(data, 2, len(data)), R40) // @ preserves acc(&s.Checksum) - // @ ensures acc(slices.AbsSlice_Bytes(data, 0, len(data)), R40) + // @ ensures acc(sl.Bytes(data, 0, len(data)), R40) // @ decreases // @ outline ( - // @ slices.SplitByIndex_Bytes(data, 2, len(data), 4, R40) - // @ unfold acc(slices.AbsSlice_Bytes(data, 2, 4), R40) + // @ sl.SplitByIndex_Bytes(data, 2, len(data), 4, R40) + // @ unfold acc(sl.Bytes(data, 2, 4), R40) // @ assert forall i int :: { &data[2:4][i] } 0 <= i && i < 2 ==> &data[2 + i] == &data[2:4][i] s.Checksum = binary.BigEndian.Uint16(data[2:4]) - // @ fold acc(slices.AbsSlice_Bytes(data, 2, 4), R40) - // @ slices.CombineAtIndex_Bytes(data, 0, 4, 2, R40) - // @ slices.CombineAtIndex_Bytes(data, 0, len(data), 4, R40) + // @ fold acc(sl.Bytes(data, 2, 4), R40) + // @ sl.CombineAtIndex_Bytes(data, 0, 4, 2, R40) + // @ sl.CombineAtIndex_Bytes(data, 0, len(data), 4, R40) // @ ) s.BaseLayer = BaseLayer{Contents: data[:4], Payload: data[4:]} // @ fold s.BaseLayer.Mem(data, 4) @@ -259,7 +259,7 @@ func (s *SCMP) SetNetworkLayerForChecksum(scn *SCION) { } // @ requires pb != nil -// @ requires slices.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ preserves pb.Mem() // @ ensures res != nil ==> res.ErrorMem() // @ decreases diff --git a/pkg/slayers/scmp_msg.go b/pkg/slayers/scmp_msg.go index 94ac8c449..80748dddd 100644 --- a/pkg/slayers/scmp_msg.go +++ b/pkg/slayers/scmp_msg.go @@ -64,10 +64,10 @@ func (i *SCMPExternalInterfaceDown) NextLayerType() gopacket.LayerType { // DecodeFromBytes decodes the given bytes into this layer. // @ requires df != nil // @ requires i.NonInitMem() -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ preserves df.Mem() // @ ensures res == nil ==> i.Mem(data) -// @ ensures res != nil ==> (i.NonInitMem() && sl.AbsSlice_Bytes(data, 0, len(data))) +// @ ensures res != nil ==> (i.NonInitMem() && sl.Bytes(data, 0, len(data))) // @ ensures res != nil ==> res.ErrorMem() // @ decreases func (i *SCMPExternalInterfaceDown) DecodeFromBytes(data []byte, @@ -81,16 +81,16 @@ func (i *SCMPExternalInterfaceDown) DecodeFromBytes(data []byte, // @ unfold i.NonInitMem() offset := 0 // @ sl.SplitRange_Bytes(data, offset, len(data), R15) - // @ unfold acc(sl.AbsSlice_Bytes(data[offset:], 0, len(data[offset:])), R15) + // @ unfold acc(sl.Bytes(data[offset:], 0, len(data[offset:])), R15) i.IA = addr.IA(binary.BigEndian.Uint64(data[offset:])) - // @ fold acc(sl.AbsSlice_Bytes(data[offset:], 0, len(data[offset:])), R15) + // @ fold acc(sl.Bytes(data[offset:], 0, len(data[offset:])), R15) // @ sl.CombineRange_Bytes(data, offset, len(data), R15) offset += addr.IABytes // @ sl.SplitRange_Bytes(data, offset, offset+scmpRawInterfaceLen, R15) // @ ghost newSlice := data[offset : offset+scmpRawInterfaceLen] - // @ unfold acc(sl.AbsSlice_Bytes(newSlice, 0, len(newSlice)), R15) + // @ unfold acc(sl.Bytes(newSlice, 0, len(newSlice)), R15) i.IfID = binary.BigEndian.Uint64(data[offset : offset+scmpRawInterfaceLen]) - // @ fold acc(sl.AbsSlice_Bytes(newSlice, 0, len(newSlice)), R15) + // @ fold acc(sl.Bytes(newSlice, 0, len(newSlice)), R15) // @ sl.CombineRange_Bytes(data, offset, offset+scmpRawInterfaceLen, R15) offset += scmpRawInterfaceLen // @ sl.SplitByIndex_Bytes(data, 0, len(data), offset, writePerm) @@ -126,16 +126,16 @@ func (i *SCMPExternalInterfaceDown) SerializeTo(b gopacket.SerializeBuffer, opts // @ b.ExchangePred() // @ assert buf === underlyingBufRes[:addr.IABytes+scmpRawInterfaceLen] // @ sl.SplitRange_Bytes(underlyingBufRes, 0, len(buf), writePerm) - // @ assert sl.AbsSlice_Bytes(buf, 0, len(buf)) - // @ unfold sl.AbsSlice_Bytes(buf, 0, len(buf)) + // @ assert sl.Bytes(buf, 0, len(buf)) + // @ unfold sl.Bytes(buf, 0, len(buf)) binary.BigEndian.PutUint64(buf[offset:], uint64(i.IA)) - // @ fold sl.AbsSlice_Bytes(buf, 0, len(buf)) + // @ fold sl.Bytes(buf, 0, len(buf)) offset += addr.IABytes // @ sl.SplitRange_Bytes(buf, offset, offset+scmpRawInterfaceLen, writePerm) // @ ghost newSlice := buf[offset:offset+scmpRawInterfaceLen] - // @ unfold sl.AbsSlice_Bytes(newSlice, 0, len(newSlice)) + // @ unfold sl.Bytes(newSlice, 0, len(newSlice)) binary.BigEndian.PutUint64(buf[offset:offset+scmpRawInterfaceLen], i.IfID) - // @ fold sl.AbsSlice_Bytes(newSlice, 0, len(newSlice)) + // @ fold sl.Bytes(newSlice, 0, len(newSlice)) // @ sl.CombineRange_Bytes(buf, offset, offset+scmpRawInterfaceLen, writePerm) // @ sl.CombineRange_Bytes(underlyingBufRes, 0, len(buf), writePerm) // @ b.RestoreMem(underlyingBufRes) @@ -144,7 +144,7 @@ func (i *SCMPExternalInterfaceDown) SerializeTo(b gopacket.SerializeBuffer, opts // @ requires pb != nil // @ preserves pb.Mem() -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ ensures res != nil ==> res.ErrorMem() // @ decreases func decodeSCMPExternalInterfaceDown(data []byte, pb gopacket.PacketBuilder) (res error) { @@ -202,11 +202,11 @@ func (*SCMPInternalConnectivityDown) NextLayerType() gopacket.LayerType { // DecodeFromBytes decodes the given bytes into this layer. // @ requires df != nil -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ requires i.NonInitMem() // @ preserves df.Mem() // @ ensures res == nil ==> i.Mem(data) -// @ ensures res != nil ==> (i.NonInitMem() && sl.AbsSlice_Bytes(data, 0, len(data))) +// @ ensures res != nil ==> (i.NonInitMem() && sl.Bytes(data, 0, len(data))) // @ ensures res != nil ==> res.ErrorMem() // @ decreases func (i *SCMPInternalConnectivityDown) DecodeFromBytes(data []byte, @@ -221,23 +221,23 @@ func (i *SCMPInternalConnectivityDown) DecodeFromBytes(data []byte, // @ defer fold i.Mem(data) offset := 0 // @ sl.SplitRange_Bytes(data, offset, len(data), R15) - // @ unfold acc(sl.AbsSlice_Bytes(data[offset:], 0, len(data[offset:])), R15) + // @ unfold acc(sl.Bytes(data[offset:], 0, len(data[offset:])), R15) i.IA = addr.IA(binary.BigEndian.Uint64(data[offset:])) - // @ fold acc(sl.AbsSlice_Bytes(data[offset:], 0, len(data[offset:])), R15) + // @ fold acc(sl.Bytes(data[offset:], 0, len(data[offset:])), R15) // @ sl.CombineRange_Bytes(data, offset, len(data), R15) offset += addr.IABytes // @ sl.SplitRange_Bytes(data, offset, offset+scmpRawInterfaceLen, R15) // @ ghost newSlice := data[offset : offset+scmpRawInterfaceLen] - // @ unfold acc(sl.AbsSlice_Bytes(newSlice, 0, len(newSlice)), R15) + // @ unfold acc(sl.Bytes(newSlice, 0, len(newSlice)), R15) i.Ingress = binary.BigEndian.Uint64(data[offset : offset+scmpRawInterfaceLen]) - // @ fold acc(sl.AbsSlice_Bytes(newSlice, 0, len(newSlice)), R15) + // @ fold acc(sl.Bytes(newSlice, 0, len(newSlice)), R15) // @ sl.CombineRange_Bytes(data, offset, offset+scmpRawInterfaceLen, R15) offset += scmpRawInterfaceLen // @ sl.SplitRange_Bytes(data, offset, offset+scmpRawInterfaceLen, R15) // @ ghost newSlice = data[offset : offset+scmpRawInterfaceLen] - // @ unfold acc(sl.AbsSlice_Bytes(newSlice, 0, len(newSlice)), R15) + // @ unfold acc(sl.Bytes(newSlice, 0, len(newSlice)), R15) i.Egress = binary.BigEndian.Uint64(data[offset : offset+scmpRawInterfaceLen]) - // @ fold acc(sl.AbsSlice_Bytes(newSlice, 0, len(newSlice)), R15) + // @ fold acc(sl.Bytes(newSlice, 0, len(newSlice)), R15) // @ sl.CombineRange_Bytes(data, offset, offset+scmpRawInterfaceLen, R15) offset += scmpRawInterfaceLen // @ sl.SplitByIndex_Bytes(data, 0, len(data), offset, writePerm) @@ -271,25 +271,25 @@ func (i *SCMPInternalConnectivityDown) SerializeTo(b gopacket.SerializeBuffer, o // @ defer fold i.Mem(ubufMem) // @ b.ExchangePred() // @ sl.SplitRange_Bytes(underlyingBufRes, 0, len(buf), writePerm) - // @ assert sl.AbsSlice_Bytes(buf, 0, len(buf)) + // @ assert sl.Bytes(buf, 0, len(buf)) // @ sl.SplitRange_Bytes(buf, offset, len(buf), writePerm) - // @ unfold sl.AbsSlice_Bytes(buf[offset:], 0, len(buf[offset:])) + // @ unfold sl.Bytes(buf[offset:], 0, len(buf[offset:])) binary.BigEndian.PutUint64(buf[offset:], uint64(i.IA)) - // @ fold sl.AbsSlice_Bytes(buf[offset:], 0, len(buf[offset:])) + // @ fold sl.Bytes(buf[offset:], 0, len(buf[offset:])) // @ sl.CombineRange_Bytes(buf, offset, len(buf), writePerm) offset += addr.IABytes // @ ghost newSlice := buf[offset:offset+scmpRawInterfaceLen] // @ sl.SplitRange_Bytes(buf, offset, offset+scmpRawInterfaceLen, writePerm) - // @ unfold sl.AbsSlice_Bytes(newSlice, 0, len(newSlice)) + // @ unfold sl.Bytes(newSlice, 0, len(newSlice)) binary.BigEndian.PutUint64(buf[offset:offset+scmpRawInterfaceLen], i.Ingress) - // @ fold sl.AbsSlice_Bytes(newSlice, 0, len(newSlice)) + // @ fold sl.Bytes(newSlice, 0, len(newSlice)) // @ sl.CombineRange_Bytes(buf, offset, offset+scmpRawInterfaceLen, writePerm) offset += scmpRawInterfaceLen // @ ghost newSlice = buf[offset:offset+scmpRawInterfaceLen] // @ sl.SplitRange_Bytes(buf, offset, offset+scmpRawInterfaceLen, writePerm) - // @ unfold sl.AbsSlice_Bytes(newSlice, 0, len(newSlice)) + // @ unfold sl.Bytes(newSlice, 0, len(newSlice)) binary.BigEndian.PutUint64(buf[offset:offset+scmpRawInterfaceLen], i.Egress) - // @ fold sl.AbsSlice_Bytes(newSlice, 0, len(newSlice)) + // @ fold sl.Bytes(newSlice, 0, len(newSlice)) // @ sl.CombineRange_Bytes(buf, offset, offset+scmpRawInterfaceLen, writePerm) // @ sl.CombineRange_Bytes(underlyingBufRes, 0, len(buf), writePerm) // @ b.RestoreMem(underlyingBufRes) @@ -298,7 +298,7 @@ func (i *SCMPInternalConnectivityDown) SerializeTo(b gopacket.SerializeBuffer, o // @ requires pb != nil // @ preserves pb.Mem() -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ ensures err != nil ==> err.ErrorMem() // @ decreases func decodeSCMPInternalConnectivityDown(data []byte, pb gopacket.PacketBuilder) (err error) { @@ -344,10 +344,10 @@ func (*SCMPEcho) NextLayerType() gopacket.LayerType { // DecodeFromBytes decodes the given bytes into this layer. // @ requires df != nil // @ requires i.NonInitMem() -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ preserves df.Mem() // @ ensures res == nil ==> i.Mem(data) -// @ ensures res != nil ==> (i.NonInitMem() && sl.AbsSlice_Bytes(data, 0, len(data))) +// @ ensures res != nil ==> (i.NonInitMem() && sl.Bytes(data, 0, len(data))) // @ ensures res != nil ==> res.ErrorMem() // @ decreases func (i *SCMPEcho) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res error) { @@ -362,44 +362,44 @@ func (i *SCMPEcho) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res // @ requires offset == 0 // @ preserves acc(&i.Identifier) // @ requires len(data) >= 4 - // @ requires sl.AbsSlice_Bytes(data, 0, len(data)) - // @ ensures sl.AbsSlice_Bytes(data, 2, len(data)) - // @ ensures sl.AbsSlice_Bytes(data, 0, 2) + // @ requires sl.Bytes(data, 0, len(data)) + // @ ensures sl.Bytes(data, 2, len(data)) + // @ ensures sl.Bytes(data, 0, 2) // @ decreases // @ outline ( // @ sl.SplitByIndex_Bytes(data, 0, len(data), 2, writePerm) - // @ unfold sl.AbsSlice_Bytes(data, 0, 2) + // @ unfold sl.Bytes(data, 0, 2) i.Identifier = binary.BigEndian.Uint16(data[:2]) - // @ fold sl.AbsSlice_Bytes(data, 0, 2) + // @ fold sl.Bytes(data, 0, 2) // @ ) offset += 2 // @ requires offset == 2 // @ preserves acc(&i.SeqNumber) // @ requires len(data) >= 4 - // @ requires sl.AbsSlice_Bytes(data, 2, len(data)) - // @ ensures sl.AbsSlice_Bytes(data, 2, 4) - // @ ensures sl.AbsSlice_Bytes(data, 4, len(data)) + // @ requires sl.Bytes(data, 2, len(data)) + // @ ensures sl.Bytes(data, 2, 4) + // @ ensures sl.Bytes(data, 4, len(data)) // @ decreases // @ outline ( // @ sl.SplitByIndex_Bytes(data, 2, len(data), 4, writePerm) - // @ unfold sl.AbsSlice_Bytes(data, 2, 4) + // @ unfold sl.Bytes(data, 2, 4) // @ assert forall i int :: { &data[offset:offset+2][i] } 0 <= i && i < 2 ==> &data[offset + i] == &data[offset : offset+2][i] i.SeqNumber = binary.BigEndian.Uint16(data[offset : offset+2]) - // @ fold sl.AbsSlice_Bytes(data, 2, 4) + // @ fold sl.Bytes(data, 2, 4) // @ ) offset += 2 // @ requires offset == 4 // @ requires len(data) >= 4 // @ requires acc(&i.BaseLayer) - // @ requires sl.AbsSlice_Bytes(data, 0, 2) - // @ requires sl.AbsSlice_Bytes(data, 2, 4) - // @ requires sl.AbsSlice_Bytes(data, 4, len(data)) + // @ requires sl.Bytes(data, 0, 2) + // @ requires sl.Bytes(data, 2, 4) + // @ requires sl.Bytes(data, 4, len(data)) // @ ensures acc(i.BaseLayer.Mem(data, 4)) // @ decreases // @ outline ( // @ sl.CombineAtIndex_Bytes(data, 0, 4, 2, writePerm) - // @ unfold sl.AbsSlice_Bytes(data, 0, 4) - // @ unfold sl.AbsSlice_Bytes(data, 4, len(data)) + // @ unfold sl.Bytes(data, 0, 4) + // @ unfold sl.Bytes(data, 4, len(data)) // @ assert forall i int :: { &data[offset:][i] } 0 <= i && i < len(data) - offset ==> &data[offset:][i] == &data[offset + i] i.BaseLayer = BaseLayer{ Contents: data[:offset], @@ -407,8 +407,8 @@ func (i *SCMPEcho) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res } // @ assert forall l int :: { &i.Payload[l] } 0 <= l && l < len(i.Payload) ==> &data[offset+l] == &i.Payload[l] // @ assert forall l int :: { &i.Payload[l] } 0 <= l && l < len(i.Payload) ==> acc(&i.Payload[l]) - // @ fold sl.AbsSlice_Bytes(i.Contents, 0, len(i.Contents)) - // @ fold sl.AbsSlice_Bytes(i.Payload, 0, len(i.Payload)) + // @ fold sl.Bytes(i.Contents, 0, len(i.Contents)) + // @ fold sl.Bytes(i.Payload, 0, len(i.Payload)) // @ fold i.BaseLayer.Mem(data, 4) // @ ) return nil @@ -441,9 +441,9 @@ func (i *SCMPEcho) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.Seriali // @ outline ( // @ b.ExchangePred() // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) - // @ unfold sl.AbsSlice_Bytes(underlyingBufRes, 0, 2) + // @ unfold sl.Bytes(underlyingBufRes, 0, 2) binary.BigEndian.PutUint16(buf[:2], i.Identifier) - // @ fold sl.AbsSlice_Bytes(underlyingBufRes, 0, 2) + // @ fold sl.Bytes(underlyingBufRes, 0, 2) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ b.RestoreMem(underlyingBufRes) // @ ) @@ -459,10 +459,10 @@ func (i *SCMPEcho) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.Seriali // @ b.ExchangePred() // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ sl.SplitByIndex_Bytes(underlyingBufRes, 2, len(underlyingBufRes), 4, writePerm) - // @ unfold sl.AbsSlice_Bytes(underlyingBufRes, 2, 4) + // @ unfold sl.Bytes(underlyingBufRes, 2, 4) // @ assert forall i int :: { &buf[offset:offset+2][i] } 0 <= i && i < 2 ==> &buf[offset:offset+2][i] == &buf[offset + i] binary.BigEndian.PutUint16(buf[offset:offset+2], i.SeqNumber) - // @ fold sl.AbsSlice_Bytes(underlyingBufRes, 2, 4) + // @ fold sl.Bytes(underlyingBufRes, 2, 4) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 2, len(underlyingBufRes), 4, writePerm) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ b.RestoreMem(underlyingBufRes) @@ -472,7 +472,7 @@ func (i *SCMPEcho) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.Seriali // @ requires pb != nil // @ preserves pb.Mem() -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ ensures err != nil ==> err.ErrorMem() // @ decreases func decodeSCMPEcho(data []byte, pb gopacket.PacketBuilder) (err error) { @@ -514,10 +514,10 @@ func (*SCMPParameterProblem) NextLayerType() gopacket.LayerType { // DecodeFromBytes decodes the given bytes into this layer. // @ requires df != nil // @ requires i.NonInitMem() -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ preserves df.Mem() // @ ensures res == nil ==> i.Mem(data) -// @ ensures res != nil ==> (i.NonInitMem() && sl.AbsSlice_Bytes(data, 0, len(data))) +// @ ensures res != nil ==> (i.NonInitMem() && sl.Bytes(data, 0, len(data))) // @ ensures res != nil ==> res.ErrorMem() // @ decreases func (i *SCMPParameterProblem) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res error) { @@ -530,33 +530,33 @@ func (i *SCMPParameterProblem) DecodeFromBytes(data []byte, df gopacket.DecodeFe // @ defer fold i.Mem(data) // @ preserves acc(&i.Pointer) // @ requires len(data) >= 4 - // @ preserves sl.AbsSlice_Bytes(data, 0, len(data)) + // @ preserves sl.Bytes(data, 0, len(data)) // @ decreases // @ outline ( // @ sl.SplitByIndex_Bytes(data, 0, len(data), 2, writePerm) // @ sl.SplitByIndex_Bytes(data, 2, len(data), 4, writePerm) - // @ unfold sl.AbsSlice_Bytes(data, 2, 4) + // @ unfold sl.Bytes(data, 2, 4) // @ assert forall i int :: { &data[2:4][i] } 0 <= i && i < 2 ==> &data[2:4][i] == &data[2 + i] i.Pointer = binary.BigEndian.Uint16(data[2:4]) - // @ fold sl.AbsSlice_Bytes(data, 2, 4) + // @ fold sl.Bytes(data, 2, 4) // @ sl.CombineAtIndex_Bytes(data, 0, 4, 2, writePerm) // @ sl.CombineAtIndex_Bytes(data, 0, len(data), 4, writePerm) // @ ) // @ requires len(data) >= 4 // @ requires acc(&i.BaseLayer) // @ ensures i.BaseLayer.Mem(data, 4) - // @ requires sl.AbsSlice_Bytes(data, 0, len(data)) + // @ requires sl.Bytes(data, 0, len(data)) // @ decreases // @ outline ( - // @ unfold sl.AbsSlice_Bytes(data, 0, len(data)) + // @ unfold sl.Bytes(data, 0, len(data)) // @ assert forall i int :: { &data[4:][i] } 0 <= i && i < len(data) ==> &data[4:][i] == &data[4 + i] i.BaseLayer = BaseLayer{ Contents: data[:4], Payload: data[4:], } // @ assert forall l int :: { &i.Payload[l] } 0 <= l && l < len(i.Payload) ==> &data[4+l] == &i.Payload[l] - // @ fold sl.AbsSlice_Bytes(i.Contents, 0, len(i.Contents)) - // @ fold sl.AbsSlice_Bytes(i.Payload, 0, len(i.Payload)) + // @ fold sl.Bytes(i.Contents, 0, len(i.Contents)) + // @ fold sl.Bytes(i.Payload, 0, len(i.Payload)) // @ fold i.BaseLayer.Mem(data, 4) // @ ) return nil @@ -587,9 +587,9 @@ func (i *SCMPParameterProblem) SerializeTo(b gopacket.SerializeBuffer, opts gopa // @ outline ( // @ b.ExchangePred() // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) - // @ unfold sl.AbsSlice_Bytes(underlyingBufRes, 0, 2) + // @ unfold sl.Bytes(underlyingBufRes, 0, 2) binary.BigEndian.PutUint16(buf[0:2], uint16(0)) //Reserved - // @ fold sl.AbsSlice_Bytes(underlyingBufRes, 0, 2) + // @ fold sl.Bytes(underlyingBufRes, 0, 2) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ b.RestoreMem(underlyingBufRes) // @ ) @@ -603,10 +603,10 @@ func (i *SCMPParameterProblem) SerializeTo(b gopacket.SerializeBuffer, opts gopa // @ b.ExchangePred() // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ sl.SplitByIndex_Bytes(underlyingBufRes, 2, len(underlyingBufRes), 4, writePerm) - // @ unfold sl.AbsSlice_Bytes(underlyingBufRes, 2, 4) + // @ unfold sl.Bytes(underlyingBufRes, 2, 4) // @ assert forall i int :: { &buf[2:4][i] } 0 <= i && i < 2 ==> &buf[2:4][i] == &buf[2 + i] binary.BigEndian.PutUint16(buf[2:4], i.Pointer) - // @ fold sl.AbsSlice_Bytes(underlyingBufRes, 2, 4) + // @ fold sl.Bytes(underlyingBufRes, 2, 4) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 2, len(underlyingBufRes), 4, writePerm) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ b.RestoreMem(underlyingBufRes) @@ -616,7 +616,7 @@ func (i *SCMPParameterProblem) SerializeTo(b gopacket.SerializeBuffer, opts gopa // @ requires pb != nil // @ preserves pb.Mem() -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ ensures err != nil ==> err.ErrorMem() // @ decreases func decodeSCMPParameterProblem(data []byte, pb gopacket.PacketBuilder) (err error) { @@ -672,7 +672,7 @@ func (*SCMPTraceroute) NextLayerType() gopacket.LayerType { // DecodeFromBytes decodes the given bytes into this layer. // @ requires df != nil // @ requires i.NonInitMem() -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R40) // @ preserves df.Mem() // @ ensures res == nil ==> i.Mem(data) // @ ensures res != nil ==> i.NonInitMem() @@ -690,60 +690,60 @@ func (i *SCMPTraceroute) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback // @ requires offset == 0 // @ preserves acc(&i.Identifier) // @ requires len(data) >= 2 + 2 + addr.IABytes + scmpRawInterfaceLen - // @ requires acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) - // @ ensures acc(sl.AbsSlice_Bytes(data, 0, 2), R40) - // @ ensures acc(sl.AbsSlice_Bytes(data, 2, len(data)), R40) + // @ requires acc(sl.Bytes(data, 0, len(data)), R40) + // @ ensures acc(sl.Bytes(data, 0, 2), R40) + // @ ensures acc(sl.Bytes(data, 2, len(data)), R40) // @ decreases // @ outline ( // @ sl.SplitByIndex_Bytes(data, 0, len(data), 2, R40) - // @ unfold acc(sl.AbsSlice_Bytes(data, 0, 2), R40) + // @ unfold acc(sl.Bytes(data, 0, 2), R40) i.Identifier = binary.BigEndian.Uint16(data[offset : offset+2]) - // @ fold acc(sl.AbsSlice_Bytes(data, 0, 2), R40) + // @ fold acc(sl.Bytes(data, 0, 2), R40) // @ ) offset += 2 // @ requires offset == 2 // @ preserves acc(&i.Sequence) // @ requires len(data) >= 2 + 2 + addr.IABytes + scmpRawInterfaceLen - // @ requires acc(sl.AbsSlice_Bytes(data, 2, len(data)), R40) - // @ ensures acc(sl.AbsSlice_Bytes(data, 2, 2+2), R40) - // @ ensures acc(sl.AbsSlice_Bytes(data, 2+2, len(data)), R40) + // @ requires acc(sl.Bytes(data, 2, len(data)), R40) + // @ ensures acc(sl.Bytes(data, 2, 2+2), R40) + // @ ensures acc(sl.Bytes(data, 2+2, len(data)), R40) // @ decreases // @ outline ( // @ sl.SplitByIndex_Bytes(data, 2, len(data), 2+2, R40) - // @ unfold acc(sl.AbsSlice_Bytes(data, 2, 2+2), R40) + // @ unfold acc(sl.Bytes(data, 2, 2+2), R40) // @ assert forall i int :: { &data[offset:offset+2][i] } 0 <= i && i < 2 ==> &data[offset + i] == &data[offset : offset+2][i] i.Sequence = binary.BigEndian.Uint16(data[offset : offset+2]) - // @ fold acc(sl.AbsSlice_Bytes(data, 2, 2+2), R40) + // @ fold acc(sl.Bytes(data, 2, 2+2), R40) // @ ) offset += 2 // @ requires offset == 2 + 2 // @ preserves acc(&i.IA) // @ requires len(data) >= 2 + 2 + addr.IABytes + scmpRawInterfaceLen - // @ requires acc(sl.AbsSlice_Bytes(data, 2+2, len(data)), R40) - // @ ensures acc(sl.AbsSlice_Bytes(data, 2+2, 2+2+addr.IABytes), R40) - // @ ensures acc(sl.AbsSlice_Bytes(data, 2+2+addr.IABytes, len(data)), R40) + // @ requires acc(sl.Bytes(data, 2+2, len(data)), R40) + // @ ensures acc(sl.Bytes(data, 2+2, 2+2+addr.IABytes), R40) + // @ ensures acc(sl.Bytes(data, 2+2+addr.IABytes, len(data)), R40) // @ decreases // @ outline ( // @ sl.SplitByIndex_Bytes(data, 2+2, len(data), 2+2+addr.IABytes, R40) - // @ unfold acc(sl.AbsSlice_Bytes(data, 2+2, 2+2+addr.IABytes), R40) + // @ unfold acc(sl.Bytes(data, 2+2, 2+2+addr.IABytes), R40) // @ assert forall i int :: { &data[offset:offset+addr.IABytes][i] } 0 <= i && i < addr.IABytes ==> &data[offset + i] == &data[offset : offset+addr.IABytes][i] i.IA = addr.IA(binary.BigEndian.Uint64(data[offset : offset+addr.IABytes])) - // @ fold acc(sl.AbsSlice_Bytes(data, 2+2, 2+2+addr.IABytes), R40) + // @ fold acc(sl.Bytes(data, 2+2, 2+2+addr.IABytes), R40) // @ ) offset += addr.IABytes // @ requires offset == 2 + 2 + addr.IABytes // @ preserves acc(&i.Interface) // @ requires len(data) >= 2 + 2 + addr.IABytes + scmpRawInterfaceLen - // @ requires acc(sl.AbsSlice_Bytes(data, 2+2+addr.IABytes, len(data)), R40) - // @ ensures acc(sl.AbsSlice_Bytes(data, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen), R40) - // @ ensures acc(sl.AbsSlice_Bytes(data, 2+2+addr.IABytes+scmpRawInterfaceLen, len(data)), R40) + // @ requires acc(sl.Bytes(data, 2+2+addr.IABytes, len(data)), R40) + // @ ensures acc(sl.Bytes(data, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen), R40) + // @ ensures acc(sl.Bytes(data, 2+2+addr.IABytes+scmpRawInterfaceLen, len(data)), R40) // @ decreases // @ outline ( // @ sl.SplitByIndex_Bytes(data, 2+2+addr.IABytes, len(data), 2+2+addr.IABytes+scmpRawInterfaceLen, R40) - // @ unfold acc(sl.AbsSlice_Bytes(data, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen), R40) + // @ unfold acc(sl.Bytes(data, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen), R40) // @ assert forall i int :: { &data[offset:offset+scmpRawInterfaceLen][i] } 0 <= i && i < scmpRawInterfaceLen ==> &data[offset + i] == &data[offset : offset+addr.IABytes][i] i.Interface = binary.BigEndian.Uint64(data[offset : offset+scmpRawInterfaceLen]) - // @ fold acc(sl.AbsSlice_Bytes(data, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen), R40) + // @ fold acc(sl.Bytes(data, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen), R40) // @ ) offset += scmpRawInterfaceLen // @ sl.CombineAtIndex_Bytes(data, 0, 2+2, 2, R40) @@ -786,9 +786,9 @@ func (i *SCMPTraceroute) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.S // @ outline ( // @ b.ExchangePred() // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) - // @ unfold sl.AbsSlice_Bytes(underlyingBufRes, 0, 2) + // @ unfold sl.Bytes(underlyingBufRes, 0, 2) binary.BigEndian.PutUint16(buf[:2], i.Identifier) - // @ fold sl.AbsSlice_Bytes(underlyingBufRes, 0, 2) + // @ fold sl.Bytes(underlyingBufRes, 0, 2) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ b.RestoreMem(underlyingBufRes) // @ ) @@ -804,10 +804,10 @@ func (i *SCMPTraceroute) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.S // @ b.ExchangePred() // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ sl.SplitByIndex_Bytes(underlyingBufRes, 2, len(underlyingBufRes), 2+2, writePerm) - // @ unfold sl.AbsSlice_Bytes(underlyingBufRes, 2, 2+2) + // @ unfold sl.Bytes(underlyingBufRes, 2, 2+2) // @ assert forall i int :: { &buf[offset:offset+2][i] } 0 <= i && i < 2 ==> &buf[offset:offset+2][i] == &buf[offset + i] binary.BigEndian.PutUint16(buf[offset:offset+2], i.Sequence) - // @ fold sl.AbsSlice_Bytes(underlyingBufRes, 2, 2+2) + // @ fold sl.Bytes(underlyingBufRes, 2, 2+2) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 2, len(underlyingBufRes), 2+2, writePerm) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ b.RestoreMem(underlyingBufRes) @@ -824,10 +824,10 @@ func (i *SCMPTraceroute) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.S // @ b.ExchangePred() // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2+2, writePerm) // @ sl.SplitByIndex_Bytes(underlyingBufRes, 2+2, len(underlyingBufRes), 2+2+addr.IABytes, writePerm) - // @ unfold sl.AbsSlice_Bytes(underlyingBufRes, 2+2, 2+2+addr.IABytes) + // @ unfold sl.Bytes(underlyingBufRes, 2+2, 2+2+addr.IABytes) // @ assert forall i int :: { &buf[offset:offset+addr.IABytes][i] } 0 <= i && i < addr.IABytes ==> &buf[offset:offset+addr.IABytes][i] == &buf[offset + i] binary.BigEndian.PutUint64(buf[offset:offset+addr.IABytes], uint64(i.IA)) - // @ fold sl.AbsSlice_Bytes(underlyingBufRes, 2+2, 2+2+addr.IABytes) + // @ fold sl.Bytes(underlyingBufRes, 2+2, 2+2+addr.IABytes) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 2+2, len(underlyingBufRes), 2+2+addr.IABytes, writePerm) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2+2, writePerm) // @ b.RestoreMem(underlyingBufRes) @@ -844,10 +844,10 @@ func (i *SCMPTraceroute) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.S // @ b.ExchangePred() // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2+2+addr.IABytes, writePerm) // @ sl.SplitByIndex_Bytes(underlyingBufRes, 2+2+addr.IABytes, len(underlyingBufRes), 2+2+addr.IABytes+scmpRawInterfaceLen, writePerm) - // @ unfold sl.AbsSlice_Bytes(underlyingBufRes, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen) + // @ unfold sl.Bytes(underlyingBufRes, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen) // @ assert forall i int :: { &buf[offset:offset+scmpRawInterfaceLen][i] } 0 <= i && i < scmpRawInterfaceLen ==> &buf[offset:offset+scmpRawInterfaceLen][i] == &buf[offset + i] binary.BigEndian.PutUint64(buf[offset:offset+scmpRawInterfaceLen], i.Interface) - // @ fold sl.AbsSlice_Bytes(underlyingBufRes, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen) + // @ fold sl.Bytes(underlyingBufRes, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 2+2+addr.IABytes, len(underlyingBufRes), 2+2+addr.IABytes+scmpRawInterfaceLen, writePerm) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2+2+addr.IABytes, writePerm) // @ b.RestoreMem(underlyingBufRes) @@ -857,7 +857,7 @@ func (i *SCMPTraceroute) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.S // @ requires pb != nil // @ preserves pb.Mem() -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ ensures err != nil ==> err.ErrorMem() // @ decreases func decodeSCMPTraceroute(data []byte, pb gopacket.PacketBuilder) (err error) { @@ -901,10 +901,10 @@ func (*SCMPDestinationUnreachable) NextLayerType() gopacket.LayerType { // DecodeFromBytes decodes the given bytes into this layer. // @ requires df != nil // @ requires i.NonInitMem() -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ preserves df.Mem() // @ ensures res == nil ==> i.Mem(data) -// @ ensures res != nil ==> (i.NonInitMem() && sl.AbsSlice_Bytes(data, 0, len(data))) +// @ ensures res != nil ==> (i.NonInitMem() && sl.Bytes(data, 0, len(data))) // @ ensures res != nil ==> res.ErrorMem() // @ decreases func (i *SCMPDestinationUnreachable) DecodeFromBytes(data []byte, @@ -918,15 +918,15 @@ func (i *SCMPDestinationUnreachable) DecodeFromBytes(data []byte, // @ unfold i.NonInitMem() // @ defer fold i.Mem(data) // @ defer fold i.BaseLayer.Mem(data, minLength) - // @ unfold sl.AbsSlice_Bytes(data, 0, len(data)) + // @ unfold sl.Bytes(data, 0, len(data)) // @ assert forall i int :: { &data[minLength:][i] } 0 <= i && i < len(data) - minLength ==> &data[minLength:][i] == &data[minLength + i] i.BaseLayer = BaseLayer{ Contents: data[:minLength], Payload: data[minLength:], } // @ assert forall l int :: { &i.Payload[l] } 0 <= l && l < len(i.Payload) ==> &data[minLength:][l] == &i.Payload[l] - // @ fold sl.AbsSlice_Bytes(i.Contents, 0, len(i.Contents)) - // @ fold sl.AbsSlice_Bytes(i.Payload, 0, len(i.Payload)) + // @ fold sl.Bytes(i.Contents, 0, len(i.Contents)) + // @ fold sl.Bytes(i.Payload, 0, len(i.Payload)) return nil } @@ -948,16 +948,16 @@ func (i *SCMPDestinationUnreachable) SerializeTo(b gopacket.SerializeBuffer, opt // @ assert buf === underlyingBufRes[:4] // @ b.ExchangePred() // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 4, writePerm) - // @ unfold sl.AbsSlice_Bytes(underlyingBufRes, 0, 4) + // @ unfold sl.Bytes(underlyingBufRes, 0, 4) copy(buf, make([]byte, 4) /*@, writePerm@*/) - // @ fold sl.AbsSlice_Bytes(underlyingBufRes, 0, 4) + // @ fold sl.Bytes(underlyingBufRes, 0, 4) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 4, writePerm) // @ b.RestoreMem(underlyingBufRes) return nil } // @ requires pb != nil -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ preserves pb.Mem() // @ ensures err != nil ==> err.ErrorMem() // @ decreases @@ -1000,11 +1000,11 @@ func (*SCMPPacketTooBig) NextLayerType() gopacket.LayerType { // DecodeFromBytes decodes the given bytes into this layer. // @ requires df != nil -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ requires i.NonInitMem() // @ preserves df.Mem() // @ ensures res == nil ==> i.Mem(data) -// @ ensures res != nil ==> (i.NonInitMem() && sl.AbsSlice_Bytes(data, 0, len(data))) +// @ ensures res != nil ==> (i.NonInitMem() && sl.Bytes(data, 0, len(data))) // @ ensures res != nil ==> res.ErrorMem() // @ decreases func (i *SCMPPacketTooBig) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res error) { @@ -1017,33 +1017,33 @@ func (i *SCMPPacketTooBig) DecodeFromBytes(data []byte, df gopacket.DecodeFeedba // @ defer fold i.Mem(data) // @ preserves acc(&i.MTU) // @ requires len(data) >= 4 - // @ preserves sl.AbsSlice_Bytes(data, 0, len(data)) + // @ preserves sl.Bytes(data, 0, len(data)) // @ decreases // @ outline ( // @ sl.SplitByIndex_Bytes(data, 0, len(data), 2, writePerm) // @ sl.SplitByIndex_Bytes(data, 2, len(data), 4, writePerm) - // @ unfold sl.AbsSlice_Bytes(data, 2, 4) + // @ unfold sl.Bytes(data, 2, 4) // @ assert forall i int :: { &data[2:4][i] } 0 <= i && i < 2 ==> &data[2:4][i] == &data[2 + i] i.MTU = binary.BigEndian.Uint16(data[2:4]) - // @ fold sl.AbsSlice_Bytes(data, 2, 4) + // @ fold sl.Bytes(data, 2, 4) // @ sl.CombineAtIndex_Bytes(data, 0, 4, 2, writePerm) // @ sl.CombineAtIndex_Bytes(data, 0, len(data), 4, writePerm) // @ ) // @ requires len(data) >= 4 // @ requires acc(&i.BaseLayer) - // @ requires sl.AbsSlice_Bytes(data, 0, len(data)) + // @ requires sl.Bytes(data, 0, len(data)) // @ ensures i.BaseLayer.Mem(data, 4) // @ decreases // @ outline ( - // @ unfold sl.AbsSlice_Bytes(data, 0, len(data)) + // @ unfold sl.Bytes(data, 0, len(data)) // @ assert forall i int :: { &data[4:][i] } 0 <= i && i < len(data) ==> &data[4:][i] == &data[4 + i] i.BaseLayer = BaseLayer{ Contents: data[:4], Payload: data[4:], } // @ assert forall l int :: { &i.Payload[l] } 0 <= l && l < len(i.Payload) ==> &data[4+l] == &i.Payload[l] - // @ fold sl.AbsSlice_Bytes(i.Contents, 0, len(i.Contents)) - // @ fold sl.AbsSlice_Bytes(i.Payload, 0, len(i.Payload)) + // @ fold sl.Bytes(i.Contents, 0, len(i.Contents)) + // @ fold sl.Bytes(i.Payload, 0, len(i.Payload)) // @ fold i.BaseLayer.Mem(data, 4) // @ ) return nil @@ -1074,9 +1074,9 @@ func (i *SCMPPacketTooBig) SerializeTo(b gopacket.SerializeBuffer, opts gopacket // @ outline ( // @ b.ExchangePred() // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) - // @ unfold sl.AbsSlice_Bytes(underlyingBufRes, 0, 2) + // @ unfold sl.Bytes(underlyingBufRes, 0, 2) binary.BigEndian.PutUint16(buf[0:2], uint16(0)) //Reserved - // @ fold sl.AbsSlice_Bytes(underlyingBufRes, 0, 2) + // @ fold sl.Bytes(underlyingBufRes, 0, 2) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ b.RestoreMem(underlyingBufRes) // @ ) @@ -1090,10 +1090,10 @@ func (i *SCMPPacketTooBig) SerializeTo(b gopacket.SerializeBuffer, opts gopacket // @ b.ExchangePred() // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ sl.SplitByIndex_Bytes(underlyingBufRes, 2, len(underlyingBufRes), 4, writePerm) - // @ unfold sl.AbsSlice_Bytes(underlyingBufRes, 2, 4) + // @ unfold sl.Bytes(underlyingBufRes, 2, 4) // @ assert forall i int :: { &buf[2:4][i] } 0 <= i && i < 2 ==> &buf[2:4][i] == &buf[2 + i] binary.BigEndian.PutUint16(buf[2:4], i.MTU) - // @ fold sl.AbsSlice_Bytes(underlyingBufRes, 2, 4) + // @ fold sl.Bytes(underlyingBufRes, 2, 4) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 2, len(underlyingBufRes), 4, writePerm) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ b.RestoreMem(underlyingBufRes) @@ -1103,7 +1103,7 @@ func (i *SCMPPacketTooBig) SerializeTo(b gopacket.SerializeBuffer, opts gopacket // @ requires pb != nil // @ preserves pb.Mem() -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ ensures err != nil ==> err.ErrorMem() // @ decreases func decodeSCMPPacketTooBig(data []byte, pb gopacket.PacketBuilder) (err error) { diff --git a/pkg/slayers/scmp_typecode.go b/pkg/slayers/scmp_typecode.go index f0fe0fb17..b7d45666a 100644 --- a/pkg/slayers/scmp_typecode.go +++ b/pkg/slayers/scmp_typecode.go @@ -20,7 +20,7 @@ package slayers import ( "encoding/binary" "fmt" - //@ "github.com/scionproto/scion/verification/utils/slices" + //@ sl "github.com/scionproto/scion/verification/utils/slices" //@ . "github.com/scionproto/scion/verification/utils/definitions" ) @@ -133,11 +133,11 @@ func (a SCMPTypeCode) String() string { // SerializeTo writes the SCMPTypeCode value to the buffer. // @ requires len(bytes) >= 2 -// @ preserves slices.AbsSlice_Bytes(bytes, 0, 2) +// @ preserves sl.Bytes(bytes, 0, 2) // @ decreases func (a SCMPTypeCode) SerializeTo(bytes []byte) { - //@ unfold slices.AbsSlice_Bytes(bytes, 0, 2) - //@ defer fold slices.AbsSlice_Bytes(bytes, 0, 2) + //@ unfold sl.Bytes(bytes, 0, 2) + //@ defer fold sl.Bytes(bytes, 0, 2) binary.BigEndian.PutUint16(bytes, uint16(a)) } diff --git a/private/topology/linktype.go b/private/topology/linktype.go index 4948c9c93..d8b47579f 100644 --- a/private/topology/linktype.go +++ b/private/topology/linktype.go @@ -22,7 +22,7 @@ import ( "github.com/scionproto/scion/pkg/private/serrors" //@ . "github.com/scionproto/scion/verification/utils/definitions" - //@ "github.com/scionproto/scion/verification/utils/slices" + //@ sl "github.com/scionproto/scion/verification/utils/slices" ) // LinkType describes inter-AS links. @@ -44,7 +44,7 @@ const ( Peer LinkType = 4 ) -//@ decreases +// @ decreases func (l LinkType) String() string { if l == Unset { return "unset" @@ -53,57 +53,57 @@ func (l LinkType) String() string { if err != nil { return err.Error() } - //@ unfold slices.AbsSlice_Bytes(s, 0, len(s)) + //@ unfold sl.Bytes(s, 0, len(s)) return string(s) } // LinkTypeFromString returns the numerical link type associated with a string description. If the // string is not recognized, an Unset link type is returned. The matching is case-insensitive. -//@ decreases +// @ decreases func LinkTypeFromString(s string) (res LinkType) { var l /*@@@*/ LinkType tmp := []byte(s) - //@ fold slices.AbsSlice_Bytes(tmp, 0, len(tmp)) + //@ fold sl.Bytes(tmp, 0, len(tmp)) if err := l.UnmarshalText(tmp); err != nil { return Unset } return l } -//@ ensures (l == Core || l == Parent || l == Child || l == Peer) == (err == nil) -//@ ensures err == nil ==> slices.AbsSlice_Bytes(res, 0, len(res)) -//@ ensures err != nil ==> err.ErrorMem() -//@ decreases +// @ ensures (l == Core || l == Parent || l == Child || l == Peer) == (err == nil) +// @ ensures err == nil ==> sl.Bytes(res, 0, len(res)) +// @ ensures err != nil ==> err.ErrorMem() +// @ decreases func (l LinkType) MarshalText() (res []byte, err error) { switch l { case Core: tmp := []byte("core") - //@ fold slices.AbsSlice_Bytes(tmp, 0, len(tmp)) + //@ fold sl.Bytes(tmp, 0, len(tmp)) return tmp, nil case Parent: tmp := []byte("parent") - //@ fold slices.AbsSlice_Bytes(tmp, 0, len(tmp)) + //@ fold sl.Bytes(tmp, 0, len(tmp)) return tmp, nil case Child: tmp := []byte("child") - //@ fold slices.AbsSlice_Bytes(tmp, 0, len(tmp)) + //@ fold sl.Bytes(tmp, 0, len(tmp)) return tmp, nil case Peer: tmp := []byte("peer") - //@ fold slices.AbsSlice_Bytes(tmp, 0, len(tmp)) + //@ fold sl.Bytes(tmp, 0, len(tmp)) return tmp, nil default: return nil, serrors.New("invalid link type") } } -//@ preserves acc(l) -//@ preserves acc(slices.AbsSlice_Bytes(data, 0, len(data)), R15) -//@ ensures err != nil ==> err.ErrorMem() -//@ decreases +// @ preserves acc(l) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R15) +// @ ensures err != nil ==> err.ErrorMem() +// @ decreases func (l *LinkType) UnmarshalText(data []byte) (err error) { - //@ unfold acc(slices.AbsSlice_Bytes(data, 0, len(data)), R15) - //@ ghost defer fold acc(slices.AbsSlice_Bytes(data, 0, len(data)), R15) + //@ unfold acc(sl.Bytes(data, 0, len(data)), R15) + //@ ghost defer fold acc(sl.Bytes(data, 0, len(data)), R15) switch strings.ToLower(string(data)) { case "core": *l = Core diff --git a/private/underlay/conn/conn.go b/private/underlay/conn/conn.go index 06d706d2a..eab1a0a9c 100644 --- a/private/underlay/conn/conn.go +++ b/private/underlay/conn/conn.go @@ -33,7 +33,7 @@ import ( "github.com/scionproto/scion/pkg/private/serrors" "github.com/scionproto/scion/private/underlay/sockctrl" //@ . "github.com/scionproto/scion/verification/utils/definitions" - //@ "github.com/scionproto/scion/verification/utils/slices" + //@ sl "github.com/scionproto/scion/verification/utils/slices" ) // Messages is a list of ipX.Messages. It is necessary to hide the type alias @@ -45,7 +45,7 @@ type Conn interface { //@ pred Mem() // (VerifiedSCION) Reads a message to b. Returns the number of read bytes. //@ requires acc(Mem(), _) - //@ preserves slices.AbsSlice_Bytes(b, 0, len(b)) + //@ preserves sl.Bytes(b, 0, len(b)) //@ ensures err == nil ==> 0 <= n && n <= len(b) //@ ensures err == nil ==> acc(addr.Mem(), _) //@ ensures err != nil ==> err.ErrorMem() @@ -56,13 +56,13 @@ type Conn interface { //@ ensures err != nil ==> err.ErrorMem() ReadBatch(m Messages) (n int, err error) //@ requires acc(Mem(), _) - //@ preserves acc(slices.AbsSlice_Bytes(b, 0, len(b)), R10) + //@ preserves acc(sl.Bytes(b, 0, len(b)), R10) //@ ensures err == nil ==> 0 <= n && n <= len(b) //@ ensures err != nil ==> err.ErrorMem() Write(b []byte) (n int, err error) //@ requires acc(u.Mem(), _) //@ requires acc(Mem(), _) - //@ preserves acc(slices.AbsSlice_Bytes(b, 0, len(b)), R10) + //@ preserves acc(sl.Bytes(b, 0, len(b)), R10) //@ ensures err == nil ==> 0 <= n && n <= len(b) //@ ensures err != nil ==> err.ErrorMem() WriteTo(b []byte, u *net.UDPAddr) (n int, err error) @@ -129,7 +129,7 @@ func New(listen, remote *net.UDPAddr, cfg *Config) (res Conn, e error) { assert remote != nil ==> a == remote assert remote == nil ==> a == listen unfold acc(a.Mem(), R15) - unfold acc(slices.AbsSlice_Bytes(a.IP, 0, len(a.IP)), R15) + unfold acc(sl.Bytes(a.IP, 0, len(a.IP)), R15) assert forall i int :: { &a.IP[i] } 0 <= i && i < len(a.IP) ==> acc(&a.IP[i], R15) @*/ if a.IP.To4( /*@ false @*/ ) != nil { @@ -395,7 +395,7 @@ func (cc *connUDPBase) initConnUDP(network string, laddr, raddr *net.UDPAddr, cf } // @ preserves acc(c.Mem(), _) -// @ preserves slices.AbsSlice_Bytes(b, 0, len(b)) +// @ preserves sl.Bytes(b, 0, len(b)) // @ preserves unfolding acc(c.Mem(), _) in c.conn == underlyingConn // @ ensures err == nil ==> 0 <= n && n <= len(b) // @ ensures err == nil ==> acc(addr.Mem(), _) @@ -406,7 +406,7 @@ func (c *connUDPBase) ReadFrom(b []byte /*@, ghost underlyingConn *net.UDPConn @ } // @ preserves acc(c.Mem(), _) -// @ preserves acc(slices.AbsSlice_Bytes(b, 0, len(b)), R15) +// @ preserves acc(sl.Bytes(b, 0, len(b)), R15) // @ preserves unfolding acc(c.Mem(), _) in c.conn == underlyingConn // @ ensures err == nil ==> 0 <= n && n <= len(b) // @ ensures err != nil ==> err.ErrorMem() @@ -418,7 +418,7 @@ func (c *connUDPBase) Write(b []byte /*@, ghost underlyingConn *net.UDPConn @*/) // @ requires acc(dst.Mem(), _) // @ preserves acc(c.Mem(), _) // @ preserves unfolding acc(c.Mem(), _) in c.conn == underlyingConn -// @ preserves acc(slices.AbsSlice_Bytes(b, 0, len(b)), R15) +// @ preserves acc(sl.Bytes(b, 0, len(b)), R15) // @ ensures err == nil ==> 0 <= n && n <= len(b) // @ ensures err != nil ==> err.ErrorMem() func (c *connUDPBase) WriteTo(b []byte, dst *net.UDPAddr /*@, ghost underlyingConn *net.UDPConn @*/) (n int, err error) { @@ -475,8 +475,8 @@ func NewReadMessages(n int) (res Messages) { for i := range m /*@ with i0 @*/ { // Allocate a single-element, to avoid allocations when setting the buffer. m[i].Buffers = make([][]byte, 1) - //@ fold slices.AbsSlice_Bytes(m[i].Buffers[0], 0, len(m[i].Buffers[0])) - //@ fold slices.AbsSlice_Bytes(m[i].OOB, 0, len(m[i].OOB)) + //@ fold sl.Bytes(m[i].Buffers[0], 0, len(m[i].Buffers[0])) + //@ fold sl.Bytes(m[i].OOB, 0, len(m[i].OOB)) //@ fold m[i].Mem() } return m diff --git a/private/underlay/conn/conn_spec.gobra b/private/underlay/conn/conn_spec.gobra index 298e98721..92fcef78f 100644 --- a/private/underlay/conn/conn_spec.gobra +++ b/private/underlay/conn/conn_spec.gobra @@ -20,7 +20,7 @@ import ( "net" . "github.com/scionproto/scion/verification/utils/definitions" - "github.com/scionproto/scion/verification/utils/slices" + sl "github.com/scionproto/scion/verification/utils/slices" "golang.org/x/net/ipv4" "golang.org/x/net/ipv6" @@ -66,7 +66,7 @@ pred (c *Config) Mem() { *connUDPIPv4 implements Conn requires acc(c.Mem(), _) -preserves slices.AbsSlice_Bytes(b, 0, len(b)) +preserves sl.Bytes(b, 0, len(b)) ensures err == nil ==> 0 <= n && n <= len(b) ensures err == nil ==> acc(addr.Mem(), _) ensures err != nil ==> err.ErrorMem() @@ -83,7 +83,7 @@ func (c *connUDPIPv4) ReadFrom(b []byte) (n int, addr *net.UDPAddr, err error) { } preserves acc(c.Mem(), _) -preserves acc(slices.AbsSlice_Bytes(b, 0, len(b)), R15) +preserves acc(sl.Bytes(b, 0, len(b)), R15) ensures err == nil ==> 0 <= n && n <= len(b) ensures err != nil ==> err.ErrorMem() func (c *connUDPIPv4) Write(b []byte) (n int, err error) { @@ -101,7 +101,7 @@ func (c *connUDPIPv4) Write(b []byte) (n int, err error) { requires acc(dst.Mem(), _) preserves acc(c.Mem(), _) -preserves acc(slices.AbsSlice_Bytes(b, 0, len(b)), R15) +preserves acc(sl.Bytes(b, 0, len(b)), R15) ensures err == nil ==> 0 <= n && n <= len(b) ensures err != nil ==> err.ErrorMem() func (c *connUDPIPv4) WriteTo(b []byte, dst *net.UDPAddr) (n int, err error) { @@ -151,7 +151,7 @@ func (c *connUDPIPv4) Close() (err error) { *connUDPIPv6 implements Conn preserves acc(c.Mem(), _) -preserves slices.AbsSlice_Bytes(b, 0, len(b)) +preserves sl.Bytes(b, 0, len(b)) ensures err == nil ==> 0 <= n && n <= len(b) ensures err == nil ==> acc(addr.Mem(), _) ensures err != nil ==> err.ErrorMem() @@ -169,7 +169,7 @@ func (c *connUDPIPv6) ReadFrom(b []byte) (n int, addr *net.UDPAddr, err error) { } preserves acc(c.Mem(), _) -preserves acc(slices.AbsSlice_Bytes(b, 0, len(b)), R15) +preserves acc(sl.Bytes(b, 0, len(b)), R15) ensures err == nil ==> 0 <= n && n <= len(b) ensures err != nil ==> err.ErrorMem() func (c *connUDPIPv6) Write(b []byte) (n int, err error) { @@ -187,7 +187,7 @@ func (c *connUDPIPv6) Write(b []byte) (n int, err error) { requires acc(dst.Mem(), _) preserves acc(c.Mem(), _) -preserves acc(slices.AbsSlice_Bytes(b, 0, len(b)), R15) +preserves acc(sl.Bytes(b, 0, len(b)), R15) ensures err == nil ==> 0 <= n && n <= len(b) ensures err != nil ==> err.ErrorMem() func (c *connUDPIPv6) WriteTo(b []byte, dst *net.UDPAddr) (n int, err error) { diff --git a/router/bfd_spec.gobra b/router/bfd_spec.gobra index 367fe90d4..84f2e4aba 100644 --- a/router/bfd_spec.gobra +++ b/router/bfd_spec.gobra @@ -32,6 +32,6 @@ pred (b *bfdSend) Mem() { acc(b.scn) && acc(b.ohp) && b.mac.Mem() && - sl.AbsSlice_Bytes(b.macBuffer, 0, path.MACBufferSize) && + sl.Bytes(b.macBuffer, 0, path.MACBufferSize) && b.buffer.Mem() } \ No newline at end of file diff --git a/router/dataplane.go b/router/dataplane.go index f77997d7e..da01b5d9f 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -103,7 +103,7 @@ type bfdSession interface { // @ requires acc(Mem(), _) // @ requires msg.Mem(ub) // (VerifiedSCION) an implementation must copy the fields it needs from msg - // @ preserves sl.AbsSlice_Bytes(ub, 0, len(ub)) + // @ preserves sl.Bytes(ub, 0, len(ub)) // @ ensures msg.NonInitMem() // @ decreases 0 if sync.IgnoreBlockingForTermination() ReceiveMessage(msg *layers.BFD /*@ , ghost ub []byte @*/) @@ -144,7 +144,7 @@ type BatchConn interface { ReadBatch(msgs underlayconn.Messages /*@, ghost ingressID uint16, ghost prophecyM int, ghost place io.Place @*/) (n int, err error) // @ requires acc(addr.Mem(), _) // @ requires acc(Mem(), _) - // @ preserves acc(sl.AbsSlice_Bytes(b, 0, len(b)), R10) + // @ preserves acc(sl.Bytes(b, 0, len(b)), R10) // @ ensures err == nil ==> 0 <= n && n <= len(b) // @ ensures err != nil ==> err.ErrorMem() WriteTo(b []byte, addr *net.UDPAddr) (n int, err error) @@ -274,7 +274,7 @@ func (d *DataPlane) SetIA(ia addr.IA) (e error) { // @ requires !d.IsRunning() // @ requires !d.KeyIsSet() // @ requires len(key) > 0 -// @ requires sl.AbsSlice_Bytes(key, 0, len(key)) +// @ requires sl.Bytes(key, 0, len(key)) // @ preserves d.mtx.LockP() // @ preserves d.mtx.LockInv() == MutexInvariant! // @ ensures acc(d.Mem(), OutMutexPerm) @@ -311,9 +311,9 @@ func (d *DataPlane) SetKey(key []byte) (res error) { } // @ d.key = &key verScionTemp := - // @ requires acc(&key, _) && acc(sl.AbsSlice_Bytes(key, 0, len(key)), _) + // @ requires acc(&key, _) && acc(sl.Bytes(key, 0, len(key)), _) // @ requires scrypto.ValidKeyForHash(key) - // @ ensures acc(&key, _) && acc(sl.AbsSlice_Bytes(key, 0, len(key)), _) + // @ ensures acc(&key, _) && acc(sl.Bytes(key, 0, len(key)), _) // @ ensures h != nil && h.Mem() // @ decreases func /*@ f @*/ () (h hash.Hash) { @@ -835,12 +835,12 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // to be able to perform this unfold. // @ unfold msgs[i0].Mem() msg := msgs[i0] - // @ ensures sl.AbsSlice_Bytes(tmp, 0, len(tmp)) + // @ ensures sl.Bytes(tmp, 0, len(tmp)) // @ decreases // @ outline( tmp := make([]byte, bufSize) // @ assert forall i int :: { &tmp[i] } 0 <= i && i < len(tmp) ==> acc(&tmp[i]) - // @ fold sl.AbsSlice_Bytes(tmp, 0, len(tmp)) + // @ fold sl.Bytes(tmp, 0, len(tmp)) // @ ) // @ assert msgs[i0] === msg msg.Buffers[0] = tmp @@ -853,7 +853,7 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ outline ( writeMsgs := make(underlayconn.Messages, 1) writeMsgs[0].Buffers = make([][]byte, 1) - // @ fold sl.AbsSlice_Bytes(writeMsgs[0].OOB, 0, len(writeMsgs[0].OOB)) + // @ fold sl.Bytes(writeMsgs[0].OOB, 0, len(writeMsgs[0].OOB)) // @ sl.NilAcc_Bytes() // @ fold writeMsgInv(writeMsgs) // @ ) @@ -1007,8 +1007,8 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ assert absPktTmpBuf.isIO_val_Pkt2 ==> absPktTmpBuf == ioValSeq[i0] // @ assert path.ifsToIO_ifs(processor.getIngressID()) == ioIngressID // @ sl.SplitRange_Bytes(p.Buffers[0], 0, p.N, HalfPerm) - // @ assert sl.AbsSlice_Bytes(tmpBuf, 0, p.N) - // @ assert sl.AbsSlice_Bytes(tmpBuf, 0, len(tmpBuf)) + // @ assert sl.Bytes(tmpBuf, 0, p.N) + // @ assert sl.Bytes(tmpBuf, 0, len(tmpBuf)) result, err /*@ , addrAliasesPkt, newAbsPkt @*/ := processor.processPkt(tmpBuf, srcAddr /*@, ioLock, ioSharedArg, dp @*/) // @ fold scmpErr.Mem() @@ -1018,7 +1018,7 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta case errors.As(err, &scmpErr): // @ unfold d.validResult(result, addrAliasesPkt) // @ ghost if addrAliasesPkt && result.OutAddr != nil { - // @ apply acc(result.OutAddr.Mem(), R15) --* acc(sl.AbsSlice_Bytes(tmpBuf, 0, len(tmpBuf)), R15) + // @ apply acc(result.OutAddr.Mem(), R15) --* acc(sl.Bytes(tmpBuf, 0, len(tmpBuf)), R15) // @ } // @ unfold scmpErr.Mem() if !scmpErr.TypeCode.InfoMsg() { @@ -1032,11 +1032,11 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta default: // @ unfold d.validResult(result, addrAliasesPkt) // @ ghost if addrAliasesPkt { - // @ apply acc(result.OutAddr.Mem(), R15) --* acc(sl.AbsSlice_Bytes(tmpBuf, 0, len(tmpBuf)), R15) + // @ apply acc(result.OutAddr.Mem(), R15) --* acc(sl.Bytes(tmpBuf, 0, len(tmpBuf)), R15) // @ } // @ sl.CombineRange_Bytes(p.Buffers[0], 0, p.N, writePerm) // @ assert acc(m) - // @ assert sl.AbsSlice_Bytes(m.OOB, 0, len(m.OOB)) + // @ assert sl.Bytes(m.OOB, 0, len(m.OOB)) // @ assert (m.Addr != nil ==> acc(m.Addr.Mem(), _)) // @ assert 0 <= m.N // @ msgs[:pkts][i0].IsActive = false @@ -1051,7 +1051,7 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta if result.OutConn == nil { // e.g. BFD case no message is forwarded // @ unfold d.validResult(result, addrAliasesPkt) // @ ghost if addrAliasesPkt { - // @ apply acc(result.OutAddr.Mem(), R15) --* acc(sl.AbsSlice_Bytes(tmpBuf, 0, len(tmpBuf)), R15) + // @ apply acc(result.OutAddr.Mem(), R15) --* acc(sl.Bytes(tmpBuf, 0, len(tmpBuf)), R15) // @ } // @ sl.CombineRange_Bytes(p.Buffers[0], 0, p.N, writePerm) // @ msgs[:pkts][i0].IsActive = false @@ -1103,7 +1103,7 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ ghost ioLock.Unlock() // @ unfold acc(writeMsgs[0].Mem(), R50) // @ ghost if addrAliasesPkt && result.OutAddr != nil { - // @ apply acc(result.OutAddr.Mem(), R15) --* acc(sl.AbsSlice_Bytes(tmpBuf, 0, len(tmpBuf)), R15) + // @ apply acc(result.OutAddr.Mem(), R15) --* acc(sl.Bytes(tmpBuf, 0, len(tmpBuf)), R15) // @ } // @ sl.CombineRange_Bytes(p.Buffers[0], 0, p.N, writePerm) // @ msgs[:pkts][i0].IsActive = false @@ -1388,7 +1388,7 @@ func newPacketProcessor(d *DataPlane, ingressID uint16) (res *scionPacketProcess epicInput: make([]byte, libepic.MACBufferSize), }, } - // @ fold sl.AbsSlice_Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) + // @ fold sl.Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) // @ fold slayers.PathPoolMem(p.scionLayer.pathPool, p.scionLayer.pathPoolRaw) p.scionLayer.RecyclePaths() // @ fold p.scionLayer.NonInitMem() @@ -1427,7 +1427,7 @@ func (p *scionPacketProcessor) reset() (err error) { } // @ requires p.sInit() -// @ requires sl.AbsSlice_Bytes(rawPkt, 0, len(rawPkt)) +// @ requires sl.Bytes(rawPkt, 0, len(rawPkt)) // @ requires acc(srcAddr.Mem(), _) // @ requires let d := p.sInitD() in // @ acc(d.Mem(), _) && @@ -1440,13 +1440,13 @@ func (p *scionPacketProcessor) reset() (err error) { // @ ensures p.sInitD() == old(p.sInitD()) // @ ensures p.getIngressID() == old(p.getIngressID()) // @ ensures p.sInitD().validResult(respr, addrAliasesPkt) -// @ ensures acc(sl.AbsSlice_Bytes(rawPkt, 0, len(rawPkt)), 1 - R15) +// @ ensures acc(sl.Bytes(rawPkt, 0, len(rawPkt)), 1 - R15) // @ ensures addrAliasesPkt ==> ( // @ respr.OutAddr != nil && -// @ (acc(respr.OutAddr.Mem(), R15) --* acc(sl.AbsSlice_Bytes(rawPkt, 0, len(rawPkt)), R15))) -// @ ensures !addrAliasesPkt ==> acc(sl.AbsSlice_Bytes(rawPkt, 0, len(rawPkt)), R15) +// @ (acc(respr.OutAddr.Mem(), R15) --* acc(sl.Bytes(rawPkt, 0, len(rawPkt)), R15))) +// @ ensures !addrAliasesPkt ==> acc(sl.Bytes(rawPkt, 0, len(rawPkt)), R15) // @ ensures respr.OutPkt !== rawPkt && respr.OutPkt != nil ==> -// @ sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() // contracts for IO-spec // @ requires dp.Valid() @@ -1520,7 +1520,7 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, assert processed[0] ==> p.hbhLayer.Mem(ubHbhLayer) assert processed[1] ==> p.e2eLayer.Mem(ubE2eLayer) @*/ - // @ assert acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), HalfPerm) + // @ assert acc(sl.Bytes(ub, 0, len(ub)), HalfPerm) pld /*@ , start, end @*/ := p.lastLayer.LayerPayload( /*@ ub @*/ ) // @ sl.SplitRange_Bytes(ub, start, end, HalfPerm) // @ sl.NilAcc_Bytes() @@ -1570,7 +1570,7 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, // @ o := offsets[lastLayerIdx] // @ sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, HalfPerm) // @ } - // @ assert sl.AbsSlice_Bytes(p.rawPkt, 0, len(p.rawPkt)) + // @ assert sl.Bytes(p.rawPkt, 0, len(p.rawPkt)) // @ unfold acc(p.d.Mem(), _) // @ assert reveal p.scionLayer.EqPathType(p.rawPkt) // @ assert !(reveal slayers.IsSupportedPkt(p.rawPkt)) @@ -1584,7 +1584,7 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, // @ o := offsets[lastLayerIdx] // @ sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, HalfPerm) // @ } - // @ assert sl.AbsSlice_Bytes(p.rawPkt, 0, len(p.rawPkt)) + // @ assert sl.Bytes(p.rawPkt, 0, len(p.rawPkt)) v1, v2 /*@ , addrAliasesPkt, newAbsPkt @*/ := p.processSCION( /*@ p.rawPkt, ub == nil, llStart, llEnd, ioLock, ioSharedArg, dp @*/ ) // @ ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, v2 == nil, hasHbhLayer, hasE2eLayer) // @ fold p.sInit() @@ -1609,7 +1609,7 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, // @ requires acc(&p.ingressID, R20) // @ requires acc(p.d.Mem(), _) // @ requires p.bfdLayer.NonInitMem() -// @ preserves sl.AbsSlice_Bytes(data, 0, len(data)) +// @ preserves sl.Bytes(data, 0, len(data)) // @ ensures acc(&p.d, R20) // @ ensures acc(&p.ingressID, R20) // @ ensures p.bfdLayer.NonInitMem() @@ -1644,11 +1644,11 @@ func (p *scionPacketProcessor) processInterBFD(oh *onehop.Path, data []byte) (er // @ requires acc(&p.srcAddr, R20) && acc(p.srcAddr.Mem(), _) // @ requires p.bfdLayer.NonInitMem() // @ requires acc(p.d.Mem(), _) -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ ensures acc(&p.d, R20) // @ ensures acc(&p.srcAddr, R20) // @ ensures p.bfdLayer.NonInitMem() -// @ ensures sl.AbsSlice_Bytes(data, 0, len(data)) +// @ ensures sl.Bytes(data, 0, len(data)) // @ ensures res != nil ==> res.ErrorMem() // @ decreases 0 if sync.IgnoreBlockingForTermination() func (p *scionPacketProcessor) processIntraBFD(data []byte) (res error) { @@ -1717,7 +1717,7 @@ func (p *scionPacketProcessor) processIntraBFD(data []byte) (res error) { // @ requires acc(&p.rawPkt, R1) && ub === p.rawPkt // @ requires acc(&p.path) // @ requires p.scionLayer.Mem(ub) -// @ requires sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires sl.Bytes(ub, 0, len(ub)) // @ requires acc(&p.segmentChange) && !p.segmentChange // @ preserves acc(&p.srcAddr, R10) && acc(p.srcAddr.Mem(), _) // @ preserves acc(&p.lastLayer, R10) @@ -1731,7 +1731,7 @@ func (p *scionPacketProcessor) processIntraBFD(data []byte) (res error) { // @ preserves acc(&p.hopField) // @ preserves acc(&p.mac, R10) && p.mac != nil && p.mac.Mem() // @ preserves acc(&p.macBuffers.scionInput, R10) -// @ preserves sl.AbsSlice_Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) +// @ preserves sl.Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) // @ preserves acc(&p.cachedMac) // @ ensures acc(&p.segmentChange) // @ ensures acc(&p.ingressID, R20) @@ -1740,14 +1740,14 @@ func (p *scionPacketProcessor) processIntraBFD(data []byte) (res error) { // @ ensures acc(&p.rawPkt, R1) // @ ensures reserr == nil ==> p.scionLayer.Mem(ub) // @ ensures reserr != nil ==> p.scionLayer.NonInitMem() -// @ ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), 1 - R15) +// @ ensures acc(sl.Bytes(ub, 0, len(ub)), 1 - R15) // @ ensures p.d.validResult(respr, addrAliasesPkt) // @ ensures addrAliasesPkt ==> ( // @ respr.OutAddr != nil && -// @ (acc(respr.OutAddr.Mem(), R15) --* acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R15))) -// @ ensures !addrAliasesPkt ==> acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R15) +// @ (acc(respr.OutAddr.Mem(), R15) --* acc(sl.Bytes(ub, 0, len(ub)), R15))) +// @ ensures !addrAliasesPkt ==> acc(sl.Bytes(ub, 0, len(ub)), R15) // @ ensures respr.OutPkt !== ub && respr.OutPkt != nil ==> -// @ sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() // contracts for IO-spec // @ requires p.d.DpAgreesWithSpec(dp) @@ -1917,8 +1917,8 @@ func (p *scionPacketProcessor) packSCMP( // @ requires acc(&p.path, R20) // @ requires p.path === p.scionLayer.GetPath(ub) // @ requires acc(&p.hopField) && acc(&p.infoField) -// @ requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R1) -// @ ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R1) +// @ requires acc(sl.Bytes(ub, 0, len(ub)), R1) +// @ ensures acc(sl.Bytes(ub, 0, len(ub)), R1) // @ ensures acc(&p.d, R50) // @ ensures acc(p.scionLayer.Mem(ub), R6) // @ ensures acc(&p.path, R20) @@ -1994,7 +1994,7 @@ func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte @*/ ) (respr proce // @ preserves acc(&p.d, R50) && acc(p.d.Mem(), _) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> -// @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ reserr != nil && sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() // contracts for IO-spec // @ ensures reserr != nil && respr.OutPkt != nil ==> @@ -2037,7 +2037,7 @@ func (p *scionPacketProcessor) validateHopExpiry() (respr processResult, reserr // @ ensures acc(&p.ingressID, R21) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> -// @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ reserr != nil && sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() // @ ensures reserr == nil && p.infoField.ConsDir ==> ( // @ p.ingressID == 0 || p.hopField.ConsIngress == p.ingressID) @@ -2086,12 +2086,12 @@ func (p *scionPacketProcessor) validateIngressID( /*@ ghost oldPkt io.IO_pkt2 @* // @ ensures acc(&p.d, R20) && acc(p.d.Mem(), _) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> -// @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ reserr != nil && sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() // contracts for IO-spec -// @ requires acc(sl.AbsSlice_Bytes(ubScionL, 0, len(ubScionL)), R20) +// @ requires acc(sl.Bytes(ubScionL, 0, len(ubScionL)), R20) // @ requires slayers.ValidPktMetaHdr(ubScionL) && p.scionLayer.EqAbsHeader(ubScionL) -// @ ensures acc(sl.AbsSlice_Bytes(ubScionL, 0, len(ubScionL)), R20) +// @ ensures acc(sl.Bytes(ubScionL, 0, len(ubScionL)), R20) // @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ubScionL) // @ ensures reserr == nil ==> p.DstIsLocalIngressID(ubScionL) // @ ensures reserr == nil ==> p.LastHopLen(ubScionL) @@ -2197,7 +2197,7 @@ func (p *scionPacketProcessor) invalidDstIA() (processResult, error) { // @ p.path.GetCurrINF(ubPath) <= p.path.GetNumINF(ubPath) // @ requires acc(&p.d, R20) && acc(p.d.Mem(), _) // @ requires acc(&p.srcAddr, R20) && acc(p.srcAddr.Mem(), _) -// @ preserves acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R4) +// @ preserves acc(sl.Bytes(ub, 0, len(ub)), R4) // @ ensures acc(&p.path, R15) // @ ensures acc(p.scionLayer.Mem(ub), R4) // @ ensures acc(&p.ingressID, R21) @@ -2255,7 +2255,7 @@ func (p *scionPacketProcessor) validateTransitUnderlaySrc( /*@ ghost ub []byte @ // @ ensures acc(&p.d, R20) && acc(p.d.Mem(), _) // @ ensures p.d.validResult(respr, false) // @ ensures reserr == nil ==> respr === processResult{} -// @ ensures reserr != nil ==> sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ ensures reserr != nil ==> sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() // contracts for IO-spec // @ requires dp.Valid() @@ -2370,7 +2370,7 @@ func (p *scionPacketProcessor) validateEgressID( /*@ ghost oldPkt io.IO_pkt2, gh // @ requires acc(p.scionLayer.Mem(ub), R19) // @ requires p.path === p.scionLayer.GetPath(ub) // @ requires acc(&p.hopField, R20) -// @ requires sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires sl.Bytes(ub, 0, len(ub)) // @ requires acc(&p.ingressID, R21) // preconditions for IO: // @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) @@ -2381,7 +2381,7 @@ func (p *scionPacketProcessor) validateEgressID( /*@ ghost oldPkt io.IO_pkt2, gh // @ requires p.EqAbsInfoField(absPkt(ub)) // @ ensures acc(&p.ingressID, R21) // @ ensures acc(&p.hopField, R20) -// @ ensures sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ ensures sl.Bytes(ub, 0, len(ub)) // @ ensures acc(&p.infoField) // @ ensures acc(&p.path, R20) // @ ensures acc(p.scionLayer.Mem(ub), R19) @@ -2482,16 +2482,16 @@ func (p *scionPacketProcessor) currentHopPointer( /*@ ghost ubScionL []byte @*/ // @ requires acc(&p.hopField, R20) // @ preserves acc(&p.mac, R20) && p.mac != nil && p.mac.Mem() // @ preserves acc(&p.macBuffers.scionInput, R20) -// @ preserves sl.AbsSlice_Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) +// @ preserves sl.Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) // @ preserves acc(&p.cachedMac) // @ preserves acc(&p.d, R50) && acc(p.d.Mem(), _) // @ ensures acc(&p.infoField, R20) // @ ensures acc(&p.hopField, R20) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> -// @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ reserr != nil && sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures len(p.cachedMac) == path.MACBufferSize -// @ ensures sl.AbsSlice_Bytes(p.cachedMac, 0, len(p.cachedMac)) +// @ ensures sl.Bytes(p.cachedMac, 0, len(p.cachedMac)) // @ ensures reserr != nil ==> reserr.ErrorMem() // contracts for IO-spec // @ requires len(oldPkt.CurrSeg.Future) > 0 @@ -2503,8 +2503,8 @@ func (p *scionPacketProcessor) currentHopPointer( /*@ ghost ubScionL []byte @*/ // @ decreases func (p *scionPacketProcessor) verifyCurrentMAC( /*@ ghost oldPkt io.IO_pkt2, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { fullMac := path.FullMAC(p.mac, p.infoField, p.hopField, p.macBuffers.scionInput) - // @ fold acc(sl.AbsSlice_Bytes(p.hopField.Mac[:path.MacLen], 0, path.MacLen), R21) - // @ defer unfold acc(sl.AbsSlice_Bytes(p.hopField.Mac[:path.MacLen], 0, path.MacLen), R21) + // @ fold acc(sl.Bytes(p.hopField.Mac[:path.MacLen], 0, path.MacLen), R21) + // @ defer unfold acc(sl.Bytes(p.hopField.Mac[:path.MacLen], 0, path.MacLen), R21) // @ sl.SplitRange_Bytes(fullMac, 0, path.MacLen, R21) // @ ghost defer sl.CombineRange_Bytes(fullMac, 0, path.MacLen, R21) if subtle.ConstantTimeCompare(p.hopField.Mac[:path.MacLen], fullMac[:path.MacLen]) == 0 { @@ -2538,17 +2538,17 @@ func (p *scionPacketProcessor) verifyCurrentMAC( /*@ ghost oldPkt io.IO_pkt2, gh // @ requires acc(&p.d, R15) // @ requires acc(p.d.Mem(), _) // @ requires p.d.getValSvc() != nil -// @ requires acc(sl.AbsSlice_Bytes(ubScionL, 0, len(ubScionL)), R15) +// @ requires acc(sl.Bytes(ubScionL, 0, len(ubScionL)), R15) // @ preserves acc(p.scionLayer.Mem(ubScionL), R10) // @ ensures acc(&p.d, R15) // @ ensures p.d.validResult(respr, addrAliasesUb) -// @ ensures !addrAliasesUb ==> acc(sl.AbsSlice_Bytes(ubScionL, 0, len(ubScionL)), R15) +// @ ensures !addrAliasesUb ==> acc(sl.Bytes(ubScionL, 0, len(ubScionL)), R15) // @ ensures !addrAliasesUb && resaddr != nil ==> acc(resaddr.Mem(), _) // @ ensures addrAliasesUb ==> resaddr != nil // @ ensures addrAliasesUb ==> acc(resaddr.Mem(), R15) -// @ ensures addrAliasesUb ==> (acc(resaddr.Mem(), R15) --* acc(sl.AbsSlice_Bytes(ubScionL, 0, len(ubScionL)), R15)) +// @ ensures addrAliasesUb ==> (acc(resaddr.Mem(), R15) --* acc(sl.Bytes(ubScionL, 0, len(ubScionL)), R15)) // @ ensures respr.OutPkt != nil ==> -// @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ reserr != nil && sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> !addrAliasesUb // @ ensures reserr != nil ==> reserr.ErrorMem() // contracts for IO-spec @@ -2563,7 +2563,7 @@ func (p *scionPacketProcessor) resolveInbound( /*@ ghost ubScionL []byte @*/ ) ( switch { case errors.Is(err, noSVCBackend): // @ ghost if addrAliases { - // @ apply acc(a.Mem(), R15) --* acc(sl.AbsSlice_Bytes(ubScionL, 0, len(ubScionL)), R15) + // @ apply acc(a.Mem(), R15) --* acc(sl.Bytes(ubScionL, 0, len(ubScionL)), R15) // @ } // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") r, err := p.packSCMP( @@ -2580,12 +2580,12 @@ func (p *scionPacketProcessor) resolveInbound( /*@ ghost ubScionL []byte @*/ ) ( // @ requires acc(&p.path, R20) // @ requires p.scionLayer.Mem(ub) // @ requires p.path === p.scionLayer.GetPath(ub) -// @ requires sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires sl.Bytes(ub, 0, len(ub)) // @ requires acc(&p.infoField) // @ requires acc(&p.hopField, R20) // @ ensures acc(&p.infoField) // @ ensures acc(&p.hopField, R20) -// @ ensures sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ ensures sl.Bytes(ub, 0, len(ub)) // @ ensures acc(&p.path, R20) // @ ensures reserr == nil ==> p.scionLayer.Mem(ub) // @ ensures reserr != nil ==> p.scionLayer.NonInitMem() @@ -2657,11 +2657,11 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr // @ requires acc(&p.path, R20) // @ requires p.scionLayer.Mem(ub) // @ requires p.path == p.scionLayer.GetPath(ub) -// @ requires sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires sl.Bytes(ub, 0, len(ub)) // @ preserves acc(&p.segmentChange) // @ preserves acc(&p.hopField) // @ preserves acc(&p.infoField) -// @ ensures sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ ensures sl.Bytes(ub, 0, len(ub)) // @ ensures acc(&p.path, R20) // @ ensures reserr == nil ==> p.scionLayer.Mem(ub) // @ ensures reserr == nil ==> p.scionLayer.UBPath(ub) === old(p.scionLayer.UBPath(ub)) @@ -2744,7 +2744,7 @@ func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte @*/ ) (respr process // @ requires acc(&p.infoField, R5) && acc(&p.hopField, R5) // @ requires p.path.GetCurrINF(ubPath) <= p.path.GetNumINF(ubPath) // @ requires p.path.GetCurrHF(ubPath) <= p.path.GetNumHops(ubPath) -// @ preserves acc(sl.AbsSlice_Bytes(ubPath, 0, len(ubPath)), R5) +// @ preserves acc(sl.Bytes(ubPath, 0, len(ubPath)), R5) // @ ensures acc(&p.path, R20) // @ ensures acc(p.path.Mem(ubPath), R5) // @ ensures acc(&p.infoField, R5) && acc(&p.hopField, R5) @@ -2801,7 +2801,7 @@ func (p *scionPacketProcessor) egressInterface( /*@ ghost oldPkt io.IO_pkt2 @*/ // @ ensures acc(&p.d, R20) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> -// @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ reserr != nil && sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() // contracts for IO-spec // @ requires len(oldPkt.CurrSeg.Future) > 0 @@ -2845,7 +2845,7 @@ func (p *scionPacketProcessor) validateEgressUp( /*@ ghost oldPkt io.IO_pkt2 @*/ // @ requires acc(p.scionLayer.Mem(ub), R10) // @ requires p.path === p.scionLayer.GetPath(ub) // @ requires acc(&p.d, R20) && acc(p.d.Mem(), _) -// @ requires sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires sl.Bytes(ub, 0, len(ub)) // @ requires acc(&p.ingressID, R21) // @ requires acc(&p.hopField) // @ preserves acc(&p.lastLayer, R19) @@ -2857,13 +2857,13 @@ func (p *scionPacketProcessor) validateEgressUp( /*@ ghost oldPkt io.IO_pkt2 @*/ // @ preserves acc(&p.infoField, R20) // @ ensures acc(&p.hopField) // @ ensures acc(&p.ingressID, R21) -// @ ensures sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ ensures sl.Bytes(ub, 0, len(ub)) // @ ensures acc(&p.path, R20) // @ ensures acc(p.scionLayer.Mem(ub), R10) // @ ensures acc(&p.d, R20) && acc(p.d.Mem(), _) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> -// @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ reserr != nil && sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() // constracts for IO-spec // @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) @@ -2952,7 +2952,7 @@ func (p *scionPacketProcessor) ingressRouterAlertFlag() (res *bool) { // @ requires acc(p.scionLayer.Mem(ub), R13) // @ requires p.path === p.scionLayer.GetPath(ub) // @ requires acc(&p.d, R20) && acc(p.d.Mem(), _) -// @ requires sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires sl.Bytes(ub, 0, len(ub)) // @ requires acc(&p.infoField, R20) // @ requires acc(&p.hopField) // @ preserves acc(&p.lastLayer, R19) @@ -2964,13 +2964,13 @@ func (p *scionPacketProcessor) ingressRouterAlertFlag() (res *bool) { // @ preserves acc(&p.ingressID, R21) // @ ensures acc(&p.infoField, R20) // @ ensures acc(&p.hopField) -// @ ensures sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ ensures sl.Bytes(ub, 0, len(ub)) // @ ensures acc(&p.path, R20) // @ ensures acc(p.scionLayer.Mem(ub), R13) // @ ensures acc(&p.d, R20) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> -// @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ reserr != nil && sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() // constracts for IO-spec // @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) @@ -3056,13 +3056,13 @@ func (p *scionPacketProcessor) egressRouterAlertFlag() (res *bool) { // @ requires acc(&p.lastLayer, R20) // @ requires p.lastLayer != nil && acc(p.lastLayer.Mem(ubLastLayer), R15) // @ requires acc(&p.d, R21) && acc(p.d.Mem(), _) -// @ preserves acc(sl.AbsSlice_Bytes(ubLastLayer, 0, len(ubLastLayer)), R1) +// @ preserves acc(sl.Bytes(ubLastLayer, 0, len(ubLastLayer)), R1) // @ ensures acc(&p.lastLayer, R20) // @ ensures acc(p.lastLayer.Mem(ubLastLayer), R15) // @ ensures acc(&p.d, R21) && acc(p.d.Mem(), _) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> -// @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ reserr != nil && sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() // contracts for IO-spec // @ ensures reserr != nil && respr.OutPkt != nil ==> @@ -3124,7 +3124,7 @@ func (p *scionPacketProcessor) handleSCMPTraceRouteRequest( // @ preserves acc(&p.d, R50) && acc(p.d.Mem(), _) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> -// @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ reserr != nil && sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr == nil ==> // @ int(p.scionLayer.GetPayloadLen(ubScionL)) == len(p.scionLayer.GetPayload(ubScionL)) // @ ensures reserr != nil ==> reserr.ErrorMem() @@ -3158,7 +3158,7 @@ func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte @*/ ) ( // @ requires acc(&p.path, R10) // @ requires p.scionLayer.Mem(ub) // @ requires p.path == p.scionLayer.GetPath(ub) -// @ requires sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires sl.Bytes(ub, 0, len(ub)) // @ requires acc(&p.ingressID, R20) // @ requires acc(&p.segmentChange) && !p.segmentChange // @ preserves acc(&p.srcAddr, R10) && acc(p.srcAddr.Mem(), _) @@ -3172,21 +3172,21 @@ func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte @*/ ) ( // @ preserves acc(&p.hopField) // @ preserves acc(&p.mac, R10) && p.mac != nil && p.mac.Mem() // @ preserves acc(&p.macBuffers.scionInput, R10) -// @ preserves sl.AbsSlice_Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) +// @ preserves sl.Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) // @ preserves acc(&p.cachedMac) // @ ensures acc(&p.segmentChange) // @ ensures acc(&p.ingressID, R20) // @ ensures acc(&p.d, R5) // @ ensures acc(&p.path, R10) // @ ensures acc(&p.rawPkt, R1) -// @ ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), 1 - R15) +// @ ensures acc(sl.Bytes(ub, 0, len(ub)), 1 - R15) // @ ensures p.d.validResult(respr, addrAliasesPkt) // @ ensures addrAliasesPkt ==> ( // @ respr.OutAddr != nil && -// @ (acc(respr.OutAddr.Mem(), R15) --* acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R15))) -// @ ensures !addrAliasesPkt ==> acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R15) +// @ (acc(respr.OutAddr.Mem(), R15) --* acc(sl.Bytes(ub, 0, len(ub)), R15))) +// @ ensures !addrAliasesPkt ==> acc(sl.Bytes(ub, 0, len(ub)), R15) // @ ensures respr.OutPkt !== ub && respr.OutPkt != nil ==> -// @ sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr == nil ==> p.scionLayer.Mem(ub) // @ ensures reserr != nil ==> p.scionLayer.NonInitMem() // @ ensures reserr != nil ==> reserr.ErrorMem() @@ -3397,25 +3397,25 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, // @ requires acc(&p.ingressID, R15) // @ requires acc(&p.d, R15) && acc(p.d.Mem(), _) && p.d.WellConfigured() // @ requires p.d.getValSvc() != nil -// @ requires sl.AbsSlice_Bytes(p.rawPkt, 0, len(p.rawPkt)) +// @ requires sl.Bytes(p.rawPkt, 0, len(p.rawPkt)) // @ preserves acc(&p.mac, R10) // @ preserves p.mac != nil && p.mac.Mem() // @ preserves acc(&p.macBuffers.scionInput, R10) -// @ preserves sl.AbsSlice_Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) +// @ preserves sl.Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) // @ preserves acc(&p.buffer, R10) && p.buffer != nil && p.buffer.Mem() // @ ensures acc(&p.rawPkt, R15) // @ ensures p.scionLayer.Mem(p.rawPkt) // @ ensures acc(&p.ingressID, R15) // @ ensures acc(&p.d, R15) // @ ensures p.d.validResult(respr, addrAliasesPkt) -// @ ensures acc(sl.AbsSlice_Bytes(p.rawPkt, 0, len(p.rawPkt)), 1 - R15) +// @ ensures acc(sl.Bytes(p.rawPkt, 0, len(p.rawPkt)), 1 - R15) // @ ensures addrAliasesPkt ==> ( // @ respr.OutAddr != nil && // @ let rawPkt := p.rawPkt in -// @ (acc(respr.OutAddr.Mem(), R15) --* acc(sl.AbsSlice_Bytes(rawPkt, 0, len(rawPkt)), R15))) -// @ ensures !addrAliasesPkt ==> acc(sl.AbsSlice_Bytes(p.rawPkt, 0, len(p.rawPkt)), R15) +// @ (acc(respr.OutAddr.Mem(), R15) --* acc(sl.Bytes(rawPkt, 0, len(rawPkt)), R15))) +// @ ensures !addrAliasesPkt ==> acc(sl.Bytes(p.rawPkt, 0, len(p.rawPkt)), R15) // @ ensures respr.OutPkt !== p.rawPkt && respr.OutPkt != nil ==> -// @ sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() // contracts for IO-spec // @ requires p.scionLayer.EqPathType(p.rawPkt) @@ -3488,16 +3488,16 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // @ preserves acc(&ohp.Info, R15) && acc(&ohp.FirstHop, R15) // @ preserves acc(&p.macBuffers.scionInput, R15) // @ preserves acc(&p.mac, R15) && p.mac != nil && p.mac.Mem() - // @ preserves sl.AbsSlice_Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) + // @ preserves sl.Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) // @ decreases // @ outline ( mac /*@@@*/ := path.MAC(p.mac, ohp.Info, ohp.FirstHop, p.macBuffers.scionInput) // (VerifiedSCION) introduced separate copy to avoid exposing quantified permissions outside the scope of this outline block. macCopy := mac - // @ fold acc(sl.AbsSlice_Bytes(ohp.FirstHop.Mac[:], 0, len(ohp.FirstHop.Mac[:])), R20) - // @ fold acc(sl.AbsSlice_Bytes(mac[:], 0, len(mac)), R20) + // @ fold acc(sl.Bytes(ohp.FirstHop.Mac[:], 0, len(ohp.FirstHop.Mac[:])), R20) + // @ fold acc(sl.Bytes(mac[:], 0, len(mac)), R20) compRes := subtle.ConstantTimeCompare(ohp.FirstHop.Mac[:], mac[:]) == 0 - // @ unfold acc(sl.AbsSlice_Bytes(ohp.FirstHop.Mac[:], 0, len(ohp.FirstHop.Mac[:])), R20) + // @ unfold acc(sl.Bytes(ohp.FirstHop.Mac[:], 0, len(ohp.FirstHop.Mac[:])), R20) // @ ) if compRes { // @ defer fold p.scionLayer.Mem(ubScionL) @@ -3593,7 +3593,7 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / a, err /*@ , addrAliases @*/ := p.d.resolveLocalDst(&p.scionLayer /* s */ /*@ , ubScionL @*/) if err != nil { // @ ghost if addrAliases { - // @ apply acc(a.Mem(), R15) --* acc(sl.AbsSlice_Bytes(ubScionL, 0, len(ubScionL)), R15) + // @ apply acc(a.Mem(), R15) --* acc(sl.Bytes(ubScionL, 0, len(ubScionL)), R15) // @ } // @ fold p.d.validResult(processResult{}, false) return processResult{}, err /*@ , false, absReturnErr(processResult{}) @*/ @@ -3607,13 +3607,13 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // @ requires acc(d.Mem(), _) // @ requires d.getValSvc() != nil -// @ requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R15) +// @ requires acc(sl.Bytes(ub, 0, len(ub)), R15) // @ preserves acc(s.Mem(ub), R14) -// @ ensures !addrAliasesUb ==> acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R15) +// @ ensures !addrAliasesUb ==> acc(sl.Bytes(ub, 0, len(ub)), R15) // @ ensures !addrAliasesUb && resaddr != nil ==> acc(resaddr.Mem(), _) // @ ensures addrAliasesUb ==> resaddr != nil // @ ensures addrAliasesUb ==> acc(resaddr.Mem(), R15) -// @ ensures addrAliasesUb ==> (acc(resaddr.Mem(), R15) --* acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R15)) +// @ ensures addrAliasesUb ==> (acc(resaddr.Mem(), R15) --* acc(sl.Bytes(ub, 0, len(ub)), R15)) // @ ensures reserr != nil ==> reserr.ErrorMem() // (VerifiedSCION) the type of 's' was changed from slayers.SCION to *slayers.SCION. This makes // specs a lot easier and, makes the implementation faster as well by avoiding passing large data-structures @@ -3623,7 +3623,7 @@ func (d *DataPlane) resolveLocalDst(s *slayers.SCION /*@, ghost ub []byte @*/) ( // @ ghost start, end := s.ExtractAcc(ub) // @ assert s.RawDstAddr === ub[start:end] // @ sl.SplitRange_Bytes(ub, start, end, R15) - // @ assert acc(sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R15) + // @ assert acc(sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R15) dst, err := s.DstAddr() // @ apply acc(s, R16) --* acc(s.Mem(ub), R15) if err != nil { @@ -3638,20 +3638,20 @@ func (d *DataPlane) resolveLocalDst(s *slayers.SCION /*@, ghost ub []byte @*/) ( // @ d.getSvcMem() a, ok := d.svc.Any(v.Base()) if !ok { - // @ apply acc(dst.Mem(), R15) --* acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R15) + // @ apply acc(dst.Mem(), R15) --* acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R15) // @ sl.CombineRange_Bytes(ub, start, end, R15) // @ establishNoSVCBackend() return nil, noSVCBackend /*@ , false @*/ } - // @ apply acc(dst.Mem(), R15) --* acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R15) + // @ apply acc(dst.Mem(), R15) --* acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R15) // @ sl.CombineRange_Bytes(ub, start, end, R15) return a, nil /*@ , false @*/ case *net.IPAddr: tmp := addEndhostPort(v) - // @ package acc(tmp.Mem(), R15) --* acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R15) { + // @ package acc(tmp.Mem(), R15) --* acc(sl.Bytes(ub, 0, len(ub)), R15) { // @ apply acc(tmp.Mem(), R15) --* acc(v.Mem(), R15) // @ assert acc(dst.Mem(), R15) - // @ apply acc(dst.Mem(), R15) --* acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R15) + // @ apply acc(dst.Mem(), R15) --* acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R15) // @ sl.CombineRange_Bytes(ub, start, end, R15) // @ } return tmp, nil /*@ , true @*/ @@ -3668,14 +3668,14 @@ func addEndhostPort(dst *net.IPAddr) (res *net.UDPAddr) { // @ unfold acc(dst.Mem(), R15) tmp := &net.UDPAddr{IP: dst.IP, Port: topology.EndhostPort} // @ assert forall i int :: { &tmp.IP[i] } 0 <= i && i < len(tmp.IP) ==> acc(&tmp.IP[i], R15) - // @ fold acc(sl.AbsSlice_Bytes(tmp.IP, 0, len(tmp.IP)), R15) + // @ fold acc(sl.Bytes(tmp.IP, 0, len(tmp.IP)), R15) // @ fold acc(tmp.Mem(), R15) // @ package (acc(tmp.Mem(), R15) --* acc(dst.Mem(), R15)) { // @ assert acc(dst, R15) // @ assert acc(tmp, R50) // @ assert dst.IP === tmp.IP // @ unfold acc(tmp.Mem(), R15) - // @ unfold acc(sl.AbsSlice_Bytes(tmp.IP, 0, len(tmp.IP)), R15) + // @ unfold acc(sl.Bytes(tmp.IP, 0, len(tmp.IP)), R15) // @ assert forall i int :: { &tmp.IP[i] } 0 <= i && i < len(tmp.IP) ==> acc(&tmp.IP[i], R15) // @ assert forall i int :: { &dst.IP[i] } 0 <= i && i < len(dst.IP) ==> acc(&dst.IP[i], R15) // @ fold acc(dst.Mem(), R15) @@ -3689,7 +3689,7 @@ func addEndhostPort(dst *net.IPAddr) (res *net.UDPAddr) { // @ requires acc(s.Mem(rawPkt), R00) // @ requires s.HasOneHopPath(rawPkt) // @ preserves buffer != nil && buffer.Mem() -// @ preserves sl.AbsSlice_Bytes(rawPkt, 0, len(rawPkt)) +// @ preserves sl.Bytes(rawPkt, 0, len(rawPkt)) // @ ensures acc(s.Mem(rawPkt), R00) // @ ensures res != nil ==> res.ErrorMem() // @ decreases @@ -3709,16 +3709,16 @@ func updateSCIONLayer(rawPkt []byte, s *slayers.SCION, buffer gopacket.Serialize rawContents := buffer.Bytes() // @ s.InferSizeOHP(rawPkt) // @ assert len(rawContents) <= len(rawPkt) - // @ unfold sl.AbsSlice_Bytes(rawPkt, 0, len(rawPkt)) - // @ unfold acc(sl.AbsSlice_Bytes(rawContents, 0, len(rawContents)), R20) + // @ unfold sl.Bytes(rawPkt, 0, len(rawPkt)) + // @ unfold acc(sl.Bytes(rawContents, 0, len(rawContents)), R20) // (VerifiedSCION) proving that the reslicing operation below is safe // was tricky and required enriching (non-modularly) the invariants of *onehop.Path // and *slayers.SCION. // @ assert forall i int :: { &rawPkt[:len(rawContents)][i] }{ &rawPkt[i] } 0 <= i && i < len(rawContents) ==> // @ &rawPkt[i] == &rawPkt[:len(rawContents)][i] copy(rawPkt[:len(rawContents)], rawContents /*@ , R20 @*/) - // @ fold sl.AbsSlice_Bytes(rawPkt, 0, len(rawPkt)) - // @ fold acc(sl.AbsSlice_Bytes(rawContents, 0, len(rawContents)), R20) + // @ fold sl.Bytes(rawPkt, 0, len(rawPkt)) + // @ fold acc(sl.Bytes(rawContents, 0, len(rawContents)), R20) // @ buffer.RestoreMem(rawContents) return nil } @@ -3820,10 +3820,10 @@ func (b *bfdSend) Send(bfd *layers.BFD) error { // @ requires acc(&p.d, _) && acc(p.d.Mem(), _) // @ requires acc(p.scionLayer.Mem(ub), R4) // @ requires p.scionLayer.ValidPathMetaData(ub) -// @ requires sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires sl.Bytes(ub, 0, len(ub)) // @ requires acc(&p.ingressID, R15) // @ ensures acc(p.scionLayer.Mem(ub), R4) -// @ ensures sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ ensures sl.Bytes(ub, 0, len(ub)) // @ ensures acc(&p.ingressID, R15) // @ decreases func (p *scionPacketProcessor) prepareSCMP( @@ -4042,7 +4042,7 @@ func (p *scionPacketProcessor) prepareSCMP( // Due to Viper's very strict injectivity constraints: // @ requires forall i, j int :: { &opts[i], &opts[j] } 0 <= i && i < j && j < len(opts) ==> // @ opts[i] !== opts[j] -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R39) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R39) // @ ensures forall i int :: { &opts[i] } 0 <= i && i < len(opts) ==> // @ (acc(&opts[i], R10) && opts[i] != nil) // @ ensures -1 <= idx && idx < len(opts) @@ -4087,7 +4087,7 @@ func decodeLayers(data []byte, base *slayers.SCION, // @ ghost oldStart := 0 // @ ghost oldEnd := len(data) - // @ invariant acc(sl.AbsSlice_Bytes(oldData, 0, len(oldData)), R39) + // @ invariant acc(sl.Bytes(oldData, 0, len(oldData)), R39) // @ invariant base.Mem(oldData) // @ invariant typeOf(base.GetPath(oldData)) == *scion.Raw ==> // @ base.EqAbsHeader(oldData) && base.ValidScionInitSpec(oldData) diff --git a/router/dataplane_spec.gobra b/router/dataplane_spec.gobra index 12b4c77af..5361ad9c2 100644 --- a/router/dataplane_spec.gobra +++ b/router/dataplane_spec.gobra @@ -68,7 +68,7 @@ pred (d *DataPlane) Mem() { (d.svc != nil ==> d.svc.Mem()) && (d.macFactory != nil ==> ( acc(d.key) && - acc(sl.AbsSlice_Bytes(*d.key, 0, len(*d.key)), _) && + acc(sl.Bytes(*d.key, 0, len(*d.key)), _) && scrypto.ValidKeyForHash(*d.key) && d.macFactory implements MacFactorySpec{d.key})) && (d.bfdSessions != nil ==> accBfdSession(d.bfdSessions)) && @@ -146,9 +146,9 @@ pred (p *scionPacketProcessor) initMem() { } // This is used as a signature, not as an assumed function. -requires acc(key, _) && acc(sl.AbsSlice_Bytes(*key, 0, len(*key)), _) +requires acc(key, _) && acc(sl.Bytes(*key, 0, len(*key)), _) requires scrypto.ValidKeyForHash(*key) -ensures acc(key, _) && acc(sl.AbsSlice_Bytes(*key, 0, len(*key)), _) +ensures acc(key, _) && acc(sl.Bytes(*key, 0, len(*key)), _) ensures res != nil && res.Mem() decreases func MacFactorySpec(ghost key *[]byte) (res hash.Hash) @@ -418,7 +418,7 @@ func (d *DataPlane) getMacFactoryMem() { ghost requires acc(d.Mem(), _) && d.getMacFactory() != nil ensures acc(&d.macFactory, _) && acc(&d.key, _) && acc(d.key, _) -ensures acc(sl.AbsSlice_Bytes(*d.key, 0, len(*d.key)), _) +ensures acc(sl.Bytes(*d.key, 0, len(*d.key)), _) ensures scrypto.ValidKeyForHash(*d.key) ensures d.macFactory implements MacFactorySpec{d.key} decreases @@ -523,8 +523,8 @@ pred writeMsgInv(writeMsgs underlayconn.Messages) { acc(&writeMsgs[0]) && len(writeMsgs[0].Buffers) == 1 && acc(&writeMsgs[0].Buffers[0]) && - // sl.AbsSlice_Bytes(writeMsgs[0].Buffers[0], 0, len(writeMsgs[0].Buffers[0])) && - sl.AbsSlice_Bytes(writeMsgs[0].OOB, 0, len(writeMsgs[0].OOB)) && + // sl.Bytes(writeMsgs[0].Buffers[0], 0, len(writeMsgs[0].Buffers[0])) && + sl.Bytes(writeMsgs[0].OOB, 0, len(writeMsgs[0].OOB)) && 0 <= writeMsgs[0].N } /** end of definitions used internally for the proof of Run **/ @@ -580,7 +580,7 @@ pred (s* scionPacketProcessor) sInit() { acc(&s.segmentChange) && acc(&s.cachedMac) && acc(&s.macBuffers) && - sl.AbsSlice_Bytes(s.macBuffers.scionInput, 0, len(s.macBuffers.scionInput)) && + sl.Bytes(s.macBuffers.scionInput, 0, len(s.macBuffers.scionInput)) && s.bfdLayer.NonInitMem() && acc(&s.srcAddr) && acc(&s.rawPkt) diff --git a/router/dataplane_spec_test.gobra b/router/dataplane_spec_test.gobra index a38be0c54..433b3bd86 100644 --- a/router/dataplane_spec_test.gobra +++ b/router/dataplane_spec_test.gobra @@ -82,7 +82,7 @@ func canModifyRunning(d *DataPlane) { requires macFactory != nil && acc(key) && - acc(sl.AbsSlice_Bytes(*key, 0, len(*key)), _) && + acc(sl.Bytes(*key, 0, len(*key)), _) && scrypto.ValidKeyForHash(*key) && macFactory implements MacFactorySpec{key} requires metrics != nil && metrics.Mem() diff --git a/router/io-spec-lemmas.gobra b/router/io-spec-lemmas.gobra index f0dc435af..60daa8c8c 100644 --- a/router/io-spec-lemmas.gobra +++ b/router/io-spec-lemmas.gobra @@ -28,7 +28,7 @@ import ( ) ghost -preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) +preserves acc(sl.Bytes(raw, 0, len(raw)), R55) ensures slayers.ValidPktMetaHdr(raw) && slayers.IsSupportedPkt(raw) ==> absIO_val(raw, ingressID).isIO_val_Pkt2 && absIO_val(raw, ingressID).IO_val_Pkt2_2 == absPkt(raw) && @@ -44,9 +44,9 @@ func absIO_valLemma(raw []byte, ingressID uint16) { } ghost -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) requires slayers.ValidPktMetaHdr(raw) -ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +ensures acc(sl.Bytes(raw, 0, len(raw)), R56) ensures slayers.ValidPktMetaHdr(raw) ensures len(absPkt(raw).CurrSeg.Future) > 0 decreases @@ -55,7 +55,7 @@ func absPktFutureLemma(raw []byte) { headerOffset := slayers.GetAddressOffset(raw) headerOffsetWithMetaLen := headerOffset + scion.MetaLen assert forall k int :: {&raw[headerOffset:headerOffset+scion.MetaLen][k]} 0 <= k && k < scion.MetaLen ==> &raw[headerOffset:headerOffset+scion.MetaLen][k] == &raw[headerOffset + k] - hdr := (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in + hdr := (unfolding acc(sl.Bytes(raw, 0, len(raw)), R56) in binary.BigEndian.Uint32(raw[headerOffset:headerOffset+scion.MetaLen])) metaHdr := scion.DecodedFrom(hdr) currInfIdx := int(metaHdr.CurrINF) @@ -116,7 +116,7 @@ opaque requires acc(p.scionLayer.Mem(ub), R50) requires acc(&p.d, R55) && acc(p.d.Mem(), _) requires acc(&p.ingressID, R55) -requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) +requires acc(sl.Bytes(ub, 0, len(ub)), R56) requires slayers.ValidPktMetaHdr(ub) decreases pure func (p *scionPacketProcessor) LastHopLen(ub []byte) bool { @@ -131,7 +131,7 @@ ghost requires acc(p.scionLayer.Mem(ub), R50) requires acc(&p.d, R55) && acc(p.d.Mem(), _) requires acc(&p.ingressID, R55) -requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) +requires acc(sl.Bytes(ub, 0, len(ub)), R56) requires slayers.ValidPktMetaHdr(ub) requires p.DstIsLocalIngressID(ub) requires p.LastHopLen(ub) @@ -141,7 +141,7 @@ requires (unfolding acc(p.scionLayer.Mem(ub), R50) in ensures acc(p.scionLayer.Mem(ub), R50) ensures acc(&p.d, R55) && acc(p.d.Mem(), _) ensures acc(&p.ingressID, R55) -ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) +ensures acc(sl.Bytes(ub, 0, len(ub)), R56) ensures slayers.ValidPktMetaHdr(ub) ensures p.ingressID != 0 ensures len(absPkt(ub).CurrSeg.Future) == 1 @@ -208,17 +208,17 @@ func (p *scionPacketProcessor) IngressIDNotZeroLemma(pkt io.IO_pkt2, egressID ui ghost requires 0 <= start && start <= end && end <= len(ub) requires acc(p.scionLayer.Mem(ub), R55) -requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R50) -requires acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R50) +requires acc(sl.Bytes(ub, 0, len(ub)), R50) +requires acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R50) requires acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) requires p.path === p.scionLayer.GetPath(ub) requires start == p.scionLayer.PathStartIdx(ub) requires end == p.scionLayer.PathEndIdx(ub) requires p.scionLayer.EqAbsHeader(ub) requires p.scionLayer.ValidScionInitSpec(ub) -ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R50) +ensures acc(sl.Bytes(ub, 0, len(ub)), R50) ensures acc(p.scionLayer.Mem(ub), R55) -ensures acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R50) +ensures acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R50) ensures acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) ensures start == p.scionLayer.PathStartIdx(ub) ensures end == p.scionLayer.PathEndIdx(ub) @@ -228,8 +228,8 @@ ensures p.path.SegsInBounds(ub[start:end]) ensures p.scionLayer.ValidHeaderOffset(ub, len(ub)) decreases func (p* scionPacketProcessor) EstablishEqAbsHeader(ub []byte, start int, end int) { - unfold acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) - unfold acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R56) + unfold acc(sl.Bytes(ub, 0, len(ub)), R56) + unfold acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R56) unfold acc(p.scionLayer.Mem(ub), R56) unfold acc(p.path.Mem(ub[start:end]), R56) reveal p.scionLayer.EqAbsHeader(ub) @@ -238,24 +238,24 @@ func (p* scionPacketProcessor) EstablishEqAbsHeader(ub []byte, start int, end in assert p.path.EqAbsHeader(ub[start:end]) fold acc(p.path.Mem(ub[start:end]), R56) fold acc(p.scionLayer.Mem(ub), R56) - fold acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R56) - fold acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) + fold acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R56) + fold acc(sl.Bytes(ub, 0, len(ub)), R56) } ghost requires 0 <= start && start <= end && end <= len(ub) requires acc(p.scionLayer.Mem(ub), R55) -requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R50) -requires acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R50) +requires acc(sl.Bytes(ub, 0, len(ub)), R50) +requires acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R50) requires acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) requires p.path === p.scionLayer.GetPath(ub) requires slayers.ValidPktMetaHdr(ub) requires start == p.scionLayer.PathStartIdx(ub) requires end == p.scionLayer.PathEndIdx(ub) requires p.scionLayer.EqAbsHeader(ub) -ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R50) +ensures acc(sl.Bytes(ub, 0, len(ub)), R50) ensures acc(p.scionLayer.Mem(ub), R55) -ensures acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R50) +ensures acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R50) ensures acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) ensures slayers.ValidPktMetaHdr(ub) ensures p.scionLayer.EqAbsHeader(ub) @@ -267,8 +267,8 @@ ensures p.scionLayer.ValidHeaderOffset(ub, len(ub)) ensures absPkt(ub) == p.path.absPkt(ub[start:end]) decreases func (p* scionPacketProcessor) AbsPktToSubSliceAbsPkt(ub []byte, start int, end int) { - unfold acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) - unfold acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R56) + unfold acc(sl.Bytes(ub, 0, len(ub)), R56) + unfold acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R56) reveal slayers.ValidPktMetaHdr(ub) reveal p.scionLayer.EqAbsHeader(ub) assert reveal scion.validPktMetaHdr(ub[start:end]) @@ -282,8 +282,8 @@ func (p* scionPacketProcessor) AbsPktToSubSliceAbsPkt(ub []byte, start int, end hdr2 := binary.BigEndian.Uint32(ub[start:end][:scion.MetaLen]) assert hdr1 == hdr2 hdr := hdr1 - fold acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R56) - fold acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) + fold acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R56) + fold acc(sl.Bytes(ub, 0, len(ub)), R56) headerOffsetWithMetaLen := start + scion.MetaLen metaHdr := scion.DecodedFrom(hdr) currInfIdx := int(metaHdr.CurrINF) @@ -306,8 +306,8 @@ func (p* scionPacketProcessor) AbsPktToSubSliceAbsPkt(ub []byte, start int, end ghost requires 0 <= start && start <= end && end <= len(ub) requires acc(p.scionLayer.Mem(ub), R55) -requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R50) -requires acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R50) +requires acc(sl.Bytes(ub, 0, len(ub)), R50) +requires acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R50) requires acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) requires p.path === p.scionLayer.GetPath(ub) requires scion.validPktMetaHdr(ub[start:end]) @@ -315,9 +315,9 @@ requires start == p.scionLayer.PathStartIdx(ub) requires end == p.scionLayer.PathEndIdx(ub) requires p.path.EqAbsHeader(ub[start:end]) requires p.scionLayer.ValidHeaderOffset(ub, len(ub)) -ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R50) +ensures acc(sl.Bytes(ub, 0, len(ub)), R50) ensures acc(p.scionLayer.Mem(ub), R55) -ensures acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R50) +ensures acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R50) ensures acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) ensures slayers.ValidPktMetaHdr(ub) ensures start == p.scionLayer.PathStartIdx(ub) @@ -327,8 +327,8 @@ ensures p.scionLayer.EqAbsHeader(ub) ensures absPkt(ub) == p.path.absPkt(ub[start:end]) decreases func (p* scionPacketProcessor) SubSliceAbsPktToAbsPkt(ub []byte, start int, end int){ - unfold acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) - unfold acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R56) + unfold acc(sl.Bytes(ub, 0, len(ub)), R56) + unfold acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R56) unfold acc(p.scionLayer.Mem(ub), R56) unfold acc(p.scionLayer.Path.Mem(ub[start:end]), R56) reveal p.scionLayer.ValidHeaderOffset(ub, len(ub)) @@ -343,8 +343,8 @@ func (p* scionPacketProcessor) SubSliceAbsPktToAbsPkt(ub []byte, start int, end hdr2 := binary.BigEndian.Uint32(ub[start:end][:scion.MetaLen]) assert hdr1 == hdr2 hdr := hdr1 - fold acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R56) - fold acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) + fold acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R56) + fold acc(sl.Bytes(ub, 0, len(ub)), R56) metaHdr := scion.DecodedFrom(hdr) currInfIdx := int(metaHdr.CurrINF) diff --git a/router/io-spec.gobra b/router/io-spec.gobra index a633f6668..fb37adf58 100644 --- a/router/io-spec.gobra +++ b/router/io-spec.gobra @@ -32,14 +32,14 @@ import ( ghost opaque -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) requires slayers.ValidPktMetaHdr(raw) decreases pure func absPkt(raw []byte) (res io.IO_pkt2) { return let _ := reveal slayers.ValidPktMetaHdr(raw) in let headerOffset := slayers.GetAddressOffset(raw) in let headerOffsetWithMetaLen := headerOffset + scion.MetaLen in - let hdr := (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in + let hdr := (unfolding acc(sl.Bytes(raw, 0, len(raw)), R56) in binary.BigEndian.Uint32(raw[headerOffset:headerOffset+scion.MetaLen])) in let metaHdr := scion.DecodedFrom(hdr) in let currInfIdx := int(metaHdr.CurrINF) in @@ -60,7 +60,7 @@ pure func absPkt(raw []byte) (res io.IO_pkt2) { } ghost -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) ensures val.isIO_val_Unsupported ensures val.IO_val_Unsupported_1 == path.ifsToIO_ifs(ingressID) decreases @@ -73,7 +73,7 @@ pure func absIO_val_Unsupported(raw []byte, ingressID uint16) (val io.IO_val) { ghost opaque -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) ensures val.isIO_val_Pkt2 || val.isIO_val_Unsupported decreases pure func absIO_val(raw []byte, ingressID uint16) (val io.IO_val) { @@ -84,7 +84,7 @@ pure func absIO_val(raw []byte, ingressID uint16) (val io.IO_val) { ghost requires respr.OutPkt != nil ==> - acc(sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)), R56) + acc(sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)), R56) decreases pure func absReturnErr(respr processResult) (val io.IO_val) { return respr.OutPkt == nil ? io.IO_val_Unit{} : diff --git a/router/widen-lemma.gobra b/router/widen-lemma.gobra index cfa9eb66e..06ed67245 100644 --- a/router/widen-lemma.gobra +++ b/router/widen-lemma.gobra @@ -30,10 +30,10 @@ import ( // is absIO_valWidenLemma. Everything else can be seen as an implementation detail. ghost requires 0 <= length && length <= len(raw) -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R49) -requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R49) -ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R49) -ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R49) +requires acc(sl.Bytes(raw, 0, len(raw)), R49) +requires acc(sl.Bytes(raw[:length], 0, len(raw[:length])), R49) +ensures acc(sl.Bytes(raw, 0, len(raw)), R49) +ensures acc(sl.Bytes(raw[:length], 0, len(raw[:length])), R49) ensures absIO_val(raw[:length], ingressID).isIO_val_Pkt2 ==> absIO_val(raw[:length], ingressID) == absIO_val(raw, ingressID) decreases @@ -62,52 +62,52 @@ func absIO_valWidenLemma(raw []byte, ingressID uint16, length int) { ghost requires 0 <= length && length <= len(raw) -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) -requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +requires acc(sl.Bytes(raw, 0, len(raw)), R51) +requires acc(sl.Bytes(raw[:length], 0, len(raw[:length])), R51) requires slayers.ValidPktMetaHdr(raw[:length]) -ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) -ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +ensures acc(sl.Bytes(raw, 0, len(raw)), R51) +ensures acc(sl.Bytes(raw[:length], 0, len(raw[:length])), R51) ensures slayers.ValidPktMetaHdr(raw) decreases func ValidPktMetaHdrWidenLemma(raw []byte, length int) { - unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) - unfold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R56) + unfold acc(sl.Bytes(raw, 0, len(raw)), R56) + unfold acc(sl.Bytes(raw[:length], 0, len(raw[:length])), R56) reveal slayers.ValidPktMetaHdr(raw[:length]) ret1 := reveal slayers.ValidPktMetaHdr(raw) ret2 := reveal slayers.ValidPktMetaHdr(raw[:length]) assert ret1 == ret2 - fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) - fold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R56) + fold acc(sl.Bytes(raw, 0, len(raw)), R56) + fold acc(sl.Bytes(raw[:length], 0, len(raw[:length])), R56) } ghost requires 0 <= length && length <= len(raw) -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) -requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +requires acc(sl.Bytes(raw, 0, len(raw)), R51) +requires acc(sl.Bytes(raw[:length], 0, len(raw[:length])), R51) requires slayers.IsSupportedPkt(raw[:length]) -ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) -ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +ensures acc(sl.Bytes(raw, 0, len(raw)), R51) +ensures acc(sl.Bytes(raw[:length], 0, len(raw[:length])), R51) ensures slayers.IsSupportedPkt(raw) decreases func IsSupportedPktWidenLemma(raw []byte, length int) { - unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) - unfold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R56) + unfold acc(sl.Bytes(raw, 0, len(raw)), R56) + unfold acc(sl.Bytes(raw[:length], 0, len(raw[:length])), R56) reveal slayers.IsSupportedPkt(raw[:length]) ret1 := reveal slayers.IsSupportedPkt(raw) ret2 := reveal slayers.IsSupportedPkt(raw[:length]) assert ret1 == ret2 - fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) - fold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R56) + fold acc(sl.Bytes(raw, 0, len(raw)), R56) + fold acc(sl.Bytes(raw[:length], 0, len(raw[:length])), R56) } ghost requires 0 <= length && length <= len(raw) -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R50) -requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R50) +requires acc(sl.Bytes(raw, 0, len(raw)), R50) +requires acc(sl.Bytes(raw[:length], 0, len(raw[:length])), R50) requires slayers.ValidPktMetaHdr(raw) requires slayers.ValidPktMetaHdr(raw[:length]) -ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R50) -ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R50) +ensures acc(sl.Bytes(raw, 0, len(raw)), R50) +ensures acc(sl.Bytes(raw[:length], 0, len(raw[:length])), R50) ensures slayers.ValidPktMetaHdr(raw) ensures slayers.ValidPktMetaHdr(raw[:length]) ensures absPkt(raw) == absPkt(raw[:length]) @@ -116,8 +116,8 @@ func absPktWidenLemma(raw []byte, length int) { reveal slayers.ValidPktMetaHdr(raw) reveal slayers.ValidPktMetaHdr(raw[:length]) - unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) - unfold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) + unfold acc(sl.Bytes(raw, 0, len(raw)), R51) + unfold acc(sl.Bytes(raw[:length], 0, len(raw[:length])), R51) headerOffset1 := slayers.GetAddressOffset(raw) headerOffset2 := slayers.GetAddressOffset(raw[:length]) assert headerOffset1 == headerOffset2 @@ -127,8 +127,8 @@ func absPktWidenLemma(raw []byte, length int) { hdr2 := binary.BigEndian.Uint32(raw[:length][headerOffset:headerOffset+scion.MetaLen]) assert hdr1 == hdr2 hdr := hdr1 - fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) - fold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) + fold acc(sl.Bytes(raw, 0, len(raw)), R51) + fold acc(sl.Bytes(raw[:length], 0, len(raw[:length])), R51) metaHdr := scion.DecodedFrom(hdr) currInfIdx := int(metaHdr.CurrINF) diff --git a/verification/dependencies/bytes/bytes.gobra b/verification/dependencies/bytes/bytes.gobra index 1ef8ae9ef..dbfcbc260 100644 --- a/verification/dependencies/bytes/bytes.gobra +++ b/verification/dependencies/bytes/bytes.gobra @@ -18,8 +18,8 @@ import sl "github.com/scionproto/scion/verification/utils/slices" // are the same length and contain the same bytes. // A nil argument is equivalent to an empty slice. trusted -requires acc(sl.AbsSlice_Bytes(a, 0, len(a)), _) -requires acc(sl.AbsSlice_Bytes(b, 0, len(b)), _) +requires acc(sl.Bytes(a, 0, len(a)), _) +requires acc(sl.Bytes(b, 0, len(b)), _) decreases pure func Equal(a, b []byte) bool { return string(a) == string(b) diff --git a/verification/dependencies/crypto/aes/cipher.gobra b/verification/dependencies/crypto/aes/cipher.gobra index 98eef7516..e9bd84255 100644 --- a/verification/dependencies/crypto/aes/cipher.gobra +++ b/verification/dependencies/crypto/aes/cipher.gobra @@ -19,7 +19,7 @@ const BlockSize = 16 // The key argument should be the AES key, // either 16, 24, or 32 bytes to select // AES-128, AES-192, or AES-256. -preserves acc(slices.AbsSlice_Bytes(key, 0, len(key)), R50) +preserves acc(slices.Bytes(key, 0, len(key)), R50) ensures err == nil ==> len(key) == 16 || len(key) == 24 || len(key) == 32 ensures err == nil ==> diff --git a/verification/dependencies/crypto/cipher/cbc.gobra b/verification/dependencies/crypto/cipher/cbc.gobra index 0dcf5511d..81eaae634 100644 --- a/verification/dependencies/crypto/cipher/cbc.gobra +++ b/verification/dependencies/crypto/cipher/cbc.gobra @@ -15,14 +15,14 @@ package cipher -import "github.com/scionproto/scion/verification/utils/slices" +import sl "github.com/scionproto/scion/verification/utils/slices" // NewCBCEncrypter returns a BlockMode which encrypts in cipher block chaining // mode, using the given Block. The length of iv must be the same as the // Block's block size. requires b != nil && b.Mem() requires len(iv) == b.BlockSize() -preserves acc(slices.AbsSlice_Bytes(iv, 0, len(iv)), _) +preserves acc(sl.Bytes(iv, 0, len(iv)), _) ensures result != nil && result.Mem() ensures result.BlockSize() == old(b.BlockSize()) decreases _ diff --git a/verification/dependencies/crypto/cipher/cipher.gobra b/verification/dependencies/crypto/cipher/cipher.gobra index 76c9d6364..1fa8e5ab4 100644 --- a/verification/dependencies/crypto/cipher/cipher.gobra +++ b/verification/dependencies/crypto/cipher/cipher.gobra @@ -14,7 +14,7 @@ package cipher import . "github.com/scionproto/scion/verification/utils/definitions" -import "github.com/scionproto/scion/verification/utils/slices" +import sl "github.com/scionproto/scion/verification/utils/slices" // A Block represents an implementation of block cipher // using a given key. It provides the capability to encrypt @@ -33,16 +33,16 @@ type Block interface { // Encrypt encrypts the first block in src into dst. // Dst and src must overlap entirely or not at all. preserves Mem() - preserves slices.AbsSlice_Bytes(dst, 0, len(dst)) - preserves acc(slices.AbsSlice_Bytes(src, 0, len(src)), R10) + preserves sl.Bytes(dst, 0, len(dst)) + preserves acc(sl.Bytes(src, 0, len(src)), R10) decreases Encrypt(dst, src []byte) // Decrypt decrypts the first block in src into dst. // Dst and src must overlap entirely or not at all. preserves Mem() - preserves slices.AbsSlice_Bytes(dst, 0, len(dst)) - preserves acc(slices.AbsSlice_Bytes(src, 0, len(src)), R10) + preserves sl.Bytes(dst, 0, len(dst)) + preserves acc(sl.Bytes(src, 0, len(src)), R10) decreases Decrypt(dst, src []byte) } @@ -63,8 +63,8 @@ type Stream interface { // maintains state and does not reset at each XORKeyStream call. requires len(src) <= len(dst) preserves Mem() - preserves slices.AbsSlice_Bytes(dst, 0, len(dst)) - preserves acc(slices.AbsSlice_Bytes(src, 0, len(src)), R10) + preserves sl.Bytes(dst, 0, len(dst)) + preserves acc(sl.Bytes(src, 0, len(src)), R10) decreases XORKeyStream(dst, src []byte) } @@ -94,9 +94,9 @@ type BlockMode interface { // maintains state and does not reset at each CryptBlocks call. requires len(src) <= len(dst) preserves Mem() - preserves acc(slices.AbsSlice_Bytes(dst, 0, len(dst)), 1 - R10) - preserves dst !== src ==> acc(slices.AbsSlice_Bytes(dst, 0, len(dst)), R10) - preserves acc(slices.AbsSlice_Bytes(src, 0, len(src)), R10) + preserves acc(sl.Bytes(dst, 0, len(dst)), 1 - R10) + preserves dst !== src ==> acc(sl.Bytes(dst, 0, len(dst)), R10) + preserves acc(sl.Bytes(src, 0, len(src)), R10) ensures BlockSize() == old(BlockSize()) decreases CryptBlocks(dst, src []byte) diff --git a/verification/dependencies/crypto/subtle/constant_time.gobra b/verification/dependencies/crypto/subtle/constant_time.gobra index acdcf1c3f..8ecebd1c8 100644 --- a/verification/dependencies/crypto/subtle/constant_time.gobra +++ b/verification/dependencies/crypto/subtle/constant_time.gobra @@ -9,16 +9,16 @@ package subtle -import "github.com/scionproto/scion/verification/utils/slices" +import sl "github.com/scionproto/scion/verification/utils/slices" // ConstantTimeCompare returns 1 if the two slices, x and y, have equal contents // and 0 otherwise. The time taken is a function of the length of the slices and // is independent of the contents. -requires acc(slices.AbsSlice_Bytes(x, 0, len(x)), _) -requires acc(slices.AbsSlice_Bytes(y, 0, len(y)), _) +requires acc(sl.Bytes(x, 0, len(x)), _) +requires acc(sl.Bytes(y, 0, len(y)), _) // postconditions hidden for now: -// ensures unfolding slices.AbsSlice_Bytes(x, 0, len(x)) in (unfolding slices.AbsSlice_Bytes(y, 0, len(y)) in len(x) == len(y) ==> (forall i int :: 0 <= i && i < len(x) ==> x[i] == y[i]) ==> res == 1) -// ensures unfolding slices.AbsSlice_Bytes(x, 0, len(x)) in (unfolding slices.AbsSlice_Bytes(y, 0, len(y)) in len(x) == len(y) ==> !(forall i int :: 0 <= i && i < len(x) ==> x[i] == y[i]) ==> res == 0) +// ensures unfolding sl.Bytes(x, 0, len(x)) in (unfolding sl.Bytes(y, 0, len(y)) in len(x) == len(y) ==> (forall i int :: 0 <= i && i < len(x) ==> x[i] == y[i]) ==> res == 1) +// ensures unfolding sl.Bytes(x, 0, len(x)) in (unfolding sl.Bytes(y, 0, len(y)) in len(x) == len(y) ==> !(forall i int :: 0 <= i && i < len(x) ==> x[i] == y[i]) ==> res == 0) ensures len(x) != len(y) ==> res == 0 decreases _ pure func ConstantTimeCompare(x, y []byte) (res int) diff --git a/verification/dependencies/github.com/google/gopacket/decode.gobra b/verification/dependencies/github.com/google/gopacket/decode.gobra index 0d11ef310..37d5e0469 100644 --- a/verification/dependencies/github.com/google/gopacket/decode.gobra +++ b/verification/dependencies/github.com/google/gopacket/decode.gobra @@ -8,7 +8,7 @@ package gopacket -import "github.com/scionproto/scion/verification/utils/slices" +import sl "github.com/scionproto/scion/verification/utils/slices" type DecodeFeedback interface { pred Mem() @@ -90,7 +90,7 @@ type Decoder interface { pred Mem() requires acc(LayerTypesMem(), _) - requires slices.AbsSlice_Bytes(b, 0, len(b)) + requires sl.Bytes(b, 0, len(b)) preserves Mem() preserves p.Mem() decreases diff --git a/verification/dependencies/github.com/google/gopacket/flows.gobra b/verification/dependencies/github.com/google/gopacket/flows.gobra index 0aeac2e4b..585b46cdb 100644 --- a/verification/dependencies/github.com/google/gopacket/flows.gobra +++ b/verification/dependencies/github.com/google/gopacket/flows.gobra @@ -8,7 +8,7 @@ package gopacket -import "github.com/scionproto/scion/verification/utils/slices" +import sl "github.com/scionproto/scion/verification/utils/slices" const MaxEndpointSize = 16 @@ -29,13 +29,13 @@ type Flow struct { src, dst [MaxEndpointSize]byte } -preserves acc(slices.AbsSlice_Bytes(src, 0, len(src)), 1/10000) && acc(slices.AbsSlice_Bytes(dst, 0, len(dst)), 1/10000) +preserves acc(sl.Bytes(src, 0, len(src)), 1/10000) && acc(sl.Bytes(dst, 0, len(dst)), 1/10000) requires len(src) <= MaxEndpointSize && len(dst) <= MaxEndpointSize ensures f.slen == len(src) ensures f.dlen == len(dst) -ensures unfolding acc(slices.AbsSlice_Bytes(src, 0, len(src)), 1/10000) in +ensures unfolding acc(sl.Bytes(src, 0, len(src)), 1/10000) in forall i int :: { &src[i] } 0 <= i && i < len(src) ==> f.src[i] == src[i] -ensures unfolding acc(slices.AbsSlice_Bytes(dst, 0, len(dst)), 1/10000) in +ensures unfolding acc(sl.Bytes(dst, 0, len(dst)), 1/10000) in forall i int :: { &dst[i] } 0 <= i && i < len(dst) ==> f.dst[i] == dst[i] ensures f.typ == t decreases diff --git a/verification/dependencies/github.com/google/gopacket/layers/base.gobra b/verification/dependencies/github.com/google/gopacket/layers/base.gobra index 3c9ce26f6..edf930228 100644 --- a/verification/dependencies/github.com/google/gopacket/layers/base.gobra +++ b/verification/dependencies/github.com/google/gopacket/layers/base.gobra @@ -8,7 +8,7 @@ package layers -import "github.com/scionproto/scion/verification/utils/slices" +import sl "github.com/scionproto/scion/verification/utils/slices" type BaseLayer struct { Contents []byte @@ -28,29 +28,29 @@ pred (b *BaseLayer) PayloadMem() { } requires b.LayerMem() -ensures slices.AbsSlice_Bytes(res, 0, len(res)) -ensures slices.AbsSlice_Bytes(res, 0, len(res)) --* b.LayerMem() +ensures sl.Bytes(res, 0, len(res)) +ensures sl.Bytes(res, 0, len(res)) --* b.LayerMem() decreases func (b *BaseLayer) LayerContents() (res []byte) { unfold b.LayerMem() res = b.Contents - fold slices.AbsSlice_Bytes(res, 0, len(res)) - package slices.AbsSlice_Bytes(res, 0, len(res)) --* b.LayerMem() { - unfold slices.AbsSlice_Bytes(res, 0, len(res)) + fold sl.Bytes(res, 0, len(res)) + package sl.Bytes(res, 0, len(res)) --* b.LayerMem() { + unfold sl.Bytes(res, 0, len(res)) fold b.LayerMem() } } requires b.PayloadMem() -ensures slices.AbsSlice_Bytes(res, 0, len(res)) -ensures slices.AbsSlice_Bytes(res, 0, len(res)) --* b.PayloadMem() +ensures sl.Bytes(res, 0, len(res)) +ensures sl.Bytes(res, 0, len(res)) --* b.PayloadMem() decreases func (b *BaseLayer) LayerPayload() (res []byte) { unfold b.PayloadMem() res = b.Payload - fold slices.AbsSlice_Bytes(res, 0, len(res)) - package slices.AbsSlice_Bytes(res, 0, len(res)) --* b.PayloadMem() { - unfold slices.AbsSlice_Bytes(res, 0, len(res)) + fold sl.Bytes(res, 0, len(res)) + package sl.Bytes(res, 0, len(res)) --* b.PayloadMem() { + unfold sl.Bytes(res, 0, len(res)) fold b.PayloadMem() } } diff --git a/verification/dependencies/github.com/google/gopacket/layers/bfd.gobra b/verification/dependencies/github.com/google/gopacket/layers/bfd.gobra index 605680e3d..743d748e4 100644 --- a/verification/dependencies/github.com/google/gopacket/layers/bfd.gobra +++ b/verification/dependencies/github.com/google/gopacket/layers/bfd.gobra @@ -11,7 +11,7 @@ package layers import ( "github.com/google/gopacket" - "github.com/scionproto/scion/verification/utils/slices" + sl "github.com/scionproto/scion/verification/utils/slices" ) type BFDVersion uint8 @@ -132,7 +132,7 @@ type BFDAuthHeader struct { } pred (b *BFDAuthHeader) Mem() { - acc(b) && slices.AbsSlice_Bytes(b.Data, 0, len(b.Data)) + acc(b) && sl.Bytes(b.Data, 0, len(b.Data)) } preserves acc(h, 1/10000) @@ -185,7 +185,7 @@ pure func (d *BFD) LayerType() gopacket.LayerType requires d.NonInitMem() preserves df.Mem() -preserves slices.AbsSlice_Bytes(data, 0, len(data)) +preserves sl.Bytes(data, 0, len(data)) ensures err == nil ==> d.Mem(data) ensures err != nil ==> err.ErrorMem() && d.NonInitMem() decreases diff --git a/verification/dependencies/github.com/google/gopacket/layertype.gobra b/verification/dependencies/github.com/google/gopacket/layertype.gobra index 11f4b5851..018d721dc 100644 --- a/verification/dependencies/github.com/google/gopacket/layertype.gobra +++ b/verification/dependencies/github.com/google/gopacket/layertype.gobra @@ -19,7 +19,7 @@ initEnsures forall t LayerType :: { Registered(t) } t < 0 ==> !Registered(t) initEnsures forall t LayerType :: { Registered(t) } 3 < t ==> !Registered(t) package gopacket -import "github.com/scionproto/scion/verification/utils/slices" +import sl "github.com/scionproto/scion/verification/utils/slices" /** Types **/ type LayerType int64 @@ -185,7 +185,7 @@ func OverrideLayerType(num int, meta LayerTypeMetadata) (res LayerType) { } preserves c.Mem() -preserves slices.AbsSlice_Bytes(data, 0, len(data)) +preserves sl.Bytes(data, 0, len(data)) ensures err != nil ==> err.ErrorMem() decreases func (t LayerType) Decode(data []byte, c PacketBuilder) (err error) diff --git a/verification/dependencies/github.com/google/gopacket/packet.gobra b/verification/dependencies/github.com/google/gopacket/packet.gobra index 1790dd5e2..b410e4715 100644 --- a/verification/dependencies/github.com/google/gopacket/packet.gobra +++ b/verification/dependencies/github.com/google/gopacket/packet.gobra @@ -10,7 +10,7 @@ package gopacket import ( "time" - "github.com/scionproto/scion/verification/utils/slices" + sl "github.com/scionproto/scion/verification/utils/slices" ) type CaptureInfo struct { @@ -105,8 +105,8 @@ type Packet interface { requires Mem() // (VerifiedSCION) not sure if we need this - // ensures slices.AbsSlice_Bytes(res, 0, len(res)) - // ensures slices.AbsSlice_Bytes(res, 0, len(res)) --* Mem() + // ensures sl.Bytes(res, 0, len(res)) + // ensures sl.Bytes(res, 0, len(res)) --* Mem() decreases Data() (res []byte) diff --git a/verification/dependencies/github.com/google/gopacket/parser.gobra b/verification/dependencies/github.com/google/gopacket/parser.gobra index 978b0ee2a..df1f93d41 100644 --- a/verification/dependencies/github.com/google/gopacket/parser.gobra +++ b/verification/dependencies/github.com/google/gopacket/parser.gobra @@ -9,7 +9,7 @@ package gopacket import ( - "github.com/scionproto/scion/verification/utils/slices" + sl "github.com/scionproto/scion/verification/utils/slices" . "github.com/scionproto/scion/verification/utils/definitions" ) @@ -17,7 +17,7 @@ import ( // values into specified slice. Returns either first encountered // unsupported LayerType value or decoding error. In case of success, // returns (LayerTypeZero, nil). -preserves acc(slices.AbsSlice_Bytes(b, 0, len(b)), R13) +preserves acc(sl.Bytes(b, 0, len(b)), R13) preserves acc(ltypes, R13) && acc(*ltypes, R13) ensures err != nil ==> err.ErrorMem() func decodingLayerFunc_spec(b []byte, ltypes *[]LayerType) (l LayerType, err error) @@ -28,7 +28,7 @@ type DecodingLayer interface { requires NonInitMem() requires df != nil - preserves acc(slices.AbsSlice_Bytes(data, 0, len(data)), R40) + preserves acc(sl.Bytes(data, 0, len(data)), R40) preserves df.Mem() ensures res == nil ==> Mem(data) ensures res != nil ==> (NonInitMem() && res.ErrorMem()) diff --git a/verification/dependencies/github.com/google/gopacket/writer.gobra b/verification/dependencies/github.com/google/gopacket/writer.gobra index f20973514..7cee58d03 100644 --- a/verification/dependencies/github.com/google/gopacket/writer.gobra +++ b/verification/dependencies/github.com/google/gopacket/writer.gobra @@ -9,7 +9,7 @@ package gopacket import . "github.com/scionproto/scion/verification/utils/definitions" -import "github.com/scionproto/scion/verification/utils/slices" +import sl "github.com/scionproto/scion/verification/utils/slices" type SerializableLayer interface { pred Mem(ubuf []byte) @@ -17,7 +17,7 @@ type SerializableLayer interface { requires !opts.FixLengths requires b != nil && b.Mem() requires Mem(ubuf) - preserves slices.AbsSlice_Bytes(ubuf, 0, len(ubuf)) + preserves sl.Bytes(ubuf, 0, len(ubuf)) ensures err == nil ==> (Mem(ubuf) && b.Mem()) ensures err != nil ==> err.ErrorMem() decreases @@ -35,7 +35,7 @@ type SerializeOptions struct { type SerializeBuffer interface { pred Mem() - // morally, corresponds to slices.AbsSlice_Bytes(ub, 0, len(ub)) --* (Mem() && UBuf() === ub) + // morally, corresponds to sl.Bytes(ub, 0, len(ub)) --* (Mem() && UBuf() === ub) pred MemWithoutUBuf(ub []byte) ghost @@ -47,21 +47,21 @@ type SerializeBuffer interface { ghost requires Mem() ensures res === old(UBuf()) - ensures slices.AbsSlice_Bytes(res, 0, len(res)) + ensures sl.Bytes(res, 0, len(res)) ensures MemWithoutUBuf(res) decreases ExchangePred() (res []byte) ghost requires MemWithoutUBuf(ub) - requires slices.AbsSlice_Bytes(ub, 0, len(ub)) + requires sl.Bytes(ub, 0, len(ub)) ensures Mem() && UBuf() === ub decreases RestoreMem(ghost ub []byte) requires Mem() ensures res === old(UBuf()) - ensures slices.AbsSlice_Bytes(res, 0, len(res)) + ensures sl.Bytes(res, 0, len(res)) ensures MemWithoutUBuf(res) decreases Bytes() (res []byte) diff --git a/verification/dependencies/golang.org/x/net/internal/socket/socket.gobra b/verification/dependencies/golang.org/x/net/internal/socket/socket.gobra index e09aca4e4..b985da7b2 100644 --- a/verification/dependencies/golang.org/x/net/internal/socket/socket.gobra +++ b/verification/dependencies/golang.org/x/net/internal/socket/socket.gobra @@ -52,8 +52,8 @@ pred (m *Message) Mem() { acc(m) && len(m.Buffers) == 1 && acc(&m.Buffers[0]) && - sl.AbsSlice_Bytes(m.Buffers[0], 0, len(m.Buffers[0])) && - sl.AbsSlice_Bytes(m.OOB, 0, len(m.OOB)) && + sl.Bytes(m.Buffers[0], 0, len(m.Buffers[0])) && + sl.Bytes(m.OOB, 0, len(m.OOB)) && // typeOf(m.Addr) == type[*net.UDPAddr] && ((m.Addr != nil && m.IsActive && !m.WildcardPerm) ==> m.Addr.Mem()) && ((m.Addr != nil && m.IsActive && m.WildcardPerm) ==> acc(m.Addr.Mem(), _)) && diff --git a/verification/dependencies/golang.org/x/net/internal/socket/socket_test.gobra b/verification/dependencies/golang.org/x/net/internal/socket/socket_test.gobra index 21b652166..9008c38e0 100644 --- a/verification/dependencies/golang.org/x/net/internal/socket/socket_test.gobra +++ b/verification/dependencies/golang.org/x/net/internal/socket/socket_test.gobra @@ -16,12 +16,12 @@ package socket -import "github.com/scionproto/scion/verification/utils/slices" +import sl "github.com/scionproto/scion/verification/utils/slices" func foldMem_test() { var m@ Message - fold slices.AbsSlice_Bytes(m.OOB, 0, len(m.OOB)) + fold sl.Bytes(m.OOB, 0, len(m.OOB)) m.Buffers = make([][]byte, 1) - fold slices.AbsSlice_Bytes(m.Buffers[0], 0, len(m.Buffers[0])) + fold sl.Bytes(m.Buffers[0], 0, len(m.Buffers[0])) fold m.Mem() } \ No newline at end of file diff --git a/verification/dependencies/net/ip.gobra b/verification/dependencies/net/ip.gobra index 2619530d9..5f552f876 100644 --- a/verification/dependencies/net/ip.gobra +++ b/verification/dependencies/net/ip.gobra @@ -10,7 +10,7 @@ package net import . "github.com/scionproto/scion/verification/utils/definitions" -import "github.com/scionproto/scion/verification/utils/slices" +import sl "github.com/scionproto/scion/verification/utils/slices" // IP address lengths (bytes). const ( @@ -133,8 +133,8 @@ func (ip *IP) UnmarshalText(text []byte) error // considered to be equal. // (VerifiedSCION) we consider this function to be morally pure pure -requires acc(slices.AbsSlice_Bytes(ip, 0, len(ip)), _) -requires acc(slices.AbsSlice_Bytes(x, 0, len(x)), _) +requires acc(sl.Bytes(ip, 0, len(ip)), _) +requires acc(sl.Bytes(x, 0, len(x)), _) decreases _ func (ip IP) Equal(x IP) bool diff --git a/verification/dependencies/net/net.gobra b/verification/dependencies/net/net.gobra index e2ff126be..9b3003c20 100644 --- a/verification/dependencies/net/net.gobra +++ b/verification/dependencies/net/net.gobra @@ -13,7 +13,7 @@ import ( "time" . "github.com/scionproto/scion/verification/utils/definitions" - "github.com/scionproto/scion/verification/utils/slices" + sl "github.com/scionproto/scion/verification/utils/slices" ) // Addr represents a network end point address. @@ -36,14 +36,14 @@ type Conn interface { // Read reads data from the connection. requires acc(Mem(), _) - preserves slices.AbsSlice_Bytes(b, 0, len(b)) + preserves sl.Bytes(b, 0, len(b)) ensures err == nil ==> 0 <= n && n <= len(b) ensures err != nil ==> err.ErrorMem() Read(b []byte) (n int, err error) // Write writes data to the connection. preserves acc(Mem(), _) - preserves acc(slices.AbsSlice_Bytes(b, 0, len(b)), R15) + preserves acc(sl.Bytes(b, 0, len(b)), R15) ensures err == nil ==> 0 <= n && n <= len(b) ensures err != nil ==> err.ErrorMem() Write(b []byte) (n int, err error) @@ -93,7 +93,7 @@ type PacketConn interface { pred Mem() preserves acc(Mem(), _) - preserves slices.AbsSlice_Bytes(p, 0, len(p)) + preserves sl.Bytes(p, 0, len(p)) ensures err == nil ==> 0 <= n && n <= len(p) ensures err == nil ==> acc(addr.Mem(), _) ensures err != nil ==> err.ErrorMem() @@ -101,7 +101,7 @@ type PacketConn interface { requires acc(addr.Mem(), _) preserves acc(Mem(), _) - preserves acc(slices.AbsSlice_Bytes(p, 0, len(p)), R15) + preserves acc(sl.Bytes(p, 0, len(p)), R15) ensures err == nil ==> 0 <= n && n <= len(p) ensures err != nil ==> err.ErrorMem() WriteTo(p []byte, addr Addr) (n int, err error) diff --git a/verification/dependencies/net/udpsock.gobra b/verification/dependencies/net/udpsock.gobra index 20d23a34e..d1a9f2b71 100644 --- a/verification/dependencies/net/udpsock.gobra +++ b/verification/dependencies/net/udpsock.gobra @@ -12,7 +12,7 @@ package net import "time" import . "github.com/scionproto/scion/verification/utils/definitions" -import "github.com/scionproto/scion/verification/utils/slices" +import sl "github.com/scionproto/scion/verification/utils/slices" // UDPAddr represents the address of a UDP end point. type UDPAddr struct { @@ -24,7 +24,7 @@ type UDPAddr struct { pred (a *UDPAddr) Mem() { // The second conjunct should be eventually replaced by a.IP.Mem(). // However, doing this at the moment requires changes in the VerifiedSCION codebase. - acc(a, R5) && slices.AbsSlice_Bytes(a.IP, 0, len(a.IP)) + acc(a, R5) && sl.Bytes(a.IP, 0, len(a.IP)) } (*UDPAddr) implements Addr { @@ -58,7 +58,7 @@ pred (u *UDPConn) Mem() { // ReadFromUDP acts like ReadFrom but returns a UDPAddr. preserves acc(c.Mem(), _) -preserves slices.AbsSlice_Bytes(b, 0, len(b)) +preserves sl.Bytes(b, 0, len(b)) ensures err == nil ==> 0 <= n && n <= len(b) ensures err == nil ==> acc(addr.Mem(), _) ensures err != nil ==> err.ErrorMem() @@ -66,7 +66,7 @@ func (c *UDPConn) ReadFromUDP(b []byte) (n int, addr *UDPAddr, err error) // ReadFrom implements the PacketConn ReadFrom method. preserves acc(c.Mem(), _) -preserves slices.AbsSlice_Bytes(b, 0, len(b)) +preserves sl.Bytes(b, 0, len(b)) ensures err == nil ==> 0 <= n && n <= len(b) ensures err == nil ==> acc(addr.Mem(), _) ensures err != nil ==> err.ErrorMem() @@ -82,7 +82,7 @@ func (c *UDPConn) WriteToUDP(b []byte, addr *UDPAddr) (int, error) // WriteTo implements the PacketConn WriteTo method. requires acc(addr.Mem(), _) preserves acc(c.Mem(), _) -preserves acc(slices.AbsSlice_Bytes(b, 0, len(b)), R15) +preserves acc(sl.Bytes(b, 0, len(b)), R15) ensures err == nil ==> 0 <= n && n <= len(b) ensures err != nil ==> err.ErrorMem() func (c *UDPConn) WriteTo(b []byte, addr Addr) (n int, err error) @@ -136,7 +136,7 @@ decreases _ func (c *UDPConn) SetReadBuffer(bytes int) (err error) preserves acc(c.Mem(), _) -preserves acc(slices.AbsSlice_Bytes(b, 0, len(b)), R15) +preserves acc(sl.Bytes(b, 0, len(b)), R15) ensures err == nil ==> 0 <= n && n <= len(b) ensures err != nil ==> err.ErrorMem() func (c *UDPConn) Write(b []byte) (n int, err error) \ No newline at end of file diff --git a/verification/utils/slices/slices.gobra b/verification/utils/slices/slices.gobra index 4ca932905..d44e840b0 100644 --- a/verification/utils/slices/slices.gobra +++ b/verification/utils/slices/slices.gobra @@ -23,7 +23,7 @@ package slices // - For each type, there might be two different types of operations: those that keep track // of contents (the name of the operation ends in "C"), and those who do not. -pred AbsSlice_Bytes(s []byte, start int, end int) { +pred Bytes(s []byte, start int, end int) { // start inclusive 0 <= start && start <= end && @@ -33,71 +33,61 @@ pred AbsSlice_Bytes(s []byte, start int, end int) { } pure -requires acc(AbsSlice_Bytes(s, start, end), _) +requires acc(Bytes(s, start, end), _) requires start <= i && i < end decreases func GetByte(s []byte, start int, end int, i int) byte { - return unfolding acc(AbsSlice_Bytes(s, start, end), _) in s[i] + return unfolding acc(Bytes(s, start, end), _) in s[i] } ghost requires 0 < p -requires acc(AbsSliceC_Bytes(s, 0, len(s), contents), p) -ensures acc(AbsSlice_Bytes(s, 0, len(s)), p) -decreases -func GetAbsSlice_Bytes(s []byte, contents seq[byte], p perm) { - unfold acc(AbsSliceC_Bytes(s, 0, len(s), contents), p) - fold acc(AbsSlice_Bytes(s, 0, len(s)), p) -} - -ghost -requires 0 < p -requires acc(AbsSlice_Bytes(s, start, end), p) +requires acc(Bytes(s, start, end), p) requires start <= idx && idx <= end -ensures acc(AbsSlice_Bytes(s, start, idx), p) -ensures acc(AbsSlice_Bytes(s, idx, end), p) +ensures acc(Bytes(s, start, idx), p) +ensures acc(Bytes(s, idx, end), p) decreases func SplitByIndex_Bytes(s []byte, start int, end int, idx int, p perm) { - unfold acc(AbsSlice_Bytes(s, start, end), p) - fold acc(AbsSlice_Bytes(s, start, idx), p) - fold acc(AbsSlice_Bytes(s, idx, end), p) + unfold acc(Bytes(s, start, end), p) + fold acc(Bytes(s, start, idx), p) + fold acc(Bytes(s, idx, end), p) } ghost requires 0 < p -requires acc(AbsSlice_Bytes(s, start, idx), p) -requires acc(AbsSlice_Bytes(s, idx, end), p) -ensures acc(AbsSlice_Bytes(s, start, end), p) +requires acc(Bytes(s, start, idx), p) +requires acc(Bytes(s, idx, end), p) +ensures acc(Bytes(s, start, end), p) decreases func CombineAtIndex_Bytes(s []byte, start int, end int, idx int, p perm) { - unfold acc(AbsSlice_Bytes(s, start, idx), p) - unfold acc(AbsSlice_Bytes(s, idx, end), p) - fold acc(AbsSlice_Bytes(s, start, end), p) + unfold acc(Bytes(s, start, idx), p) + unfold acc(Bytes(s, idx, end), p) + fold acc(Bytes(s, start, end), p) } ghost requires 0 < p -requires acc(AbsSlice_Bytes(s, start, end), p) +requires acc(Bytes(s, start, end), p) // the following precondition convinces Gobra that // the slice operation is well-formed -requires unfolding acc(AbsSlice_Bytes(s, start, end), p) in true -ensures acc(AbsSlice_Bytes(s[start:end], 0, len(s[start:end])), p) +requires unfolding acc(Bytes(s, start, end), p) in true +ensures acc(Bytes(s[start:end], 0, len(s[start:end])), p) decreases func Reslice_Bytes(s []byte, start int, end int, p perm) { - unfold acc(AbsSlice_Bytes(s, start, end), p) + unfold acc(Bytes(s, start, end), p) assert forall i int :: { &s[start:end][i] }{ &s[start + i] } 0 <= i && i < (end-start) ==> &s[start:end][i] == &s[start + i] - fold acc(AbsSlice_Bytes(s[start:end], 0, len(s[start:end])), p) + fold acc(Bytes(s[start:end], 0, len(s[start:end])), p) } ghost requires 0 < p requires 0 <= start && start <= end && end <= cap(s) requires len(s[start:end]) <= cap(s) -requires acc(AbsSlice_Bytes(s[start:end], 0, len(s[start:end])), p) -ensures acc(AbsSlice_Bytes(s, start, end), p) +requires acc(Bytes(s[start:end], 0, len(s[start:end])), p) +ensures acc(Bytes(s, start, end), p) decreases func Unslice_Bytes(s []byte, start int, end int, p perm) { - unfold acc(AbsSlice_Bytes(s[start:end], 0, len(s[start:end])), p) + unfold acc(Bytes(s[start:end], 0, len(s[start:end])), p) assert 0 <= start && start <= end && end <= cap(s) assert forall i int :: { &s[start:end][i] } 0 <= i && i < len(s[start:end]) ==> acc(&s[start:end][i], p) assert forall i int :: { &s[start:end][i] }{ &s[start + i] } 0 <= i && i < len(s[start:end]) ==> &s[start:end][i] == &s[start + i] @@ -113,16 +103,16 @@ func Unslice_Bytes(s []byte, start int, end int, p perm) { assert acc(&s[start + j], p) assert forall i int :: { &s[i] } start <= i && i <= start+j ==> acc(&s[i], p) } - fold acc(AbsSlice_Bytes(s, start, end), p) + fold acc(Bytes(s, start, end), p) } ghost requires 0 < p requires 0 <= start && start <= end && end <= len(s) -requires acc(AbsSlice_Bytes(s, 0, len(s)), p) -ensures acc(AbsSlice_Bytes(s[start:end], 0, end-start), p) -ensures acc(AbsSlice_Bytes(s, 0, start), p) -ensures acc(AbsSlice_Bytes(s, end, len(s)), p) +requires acc(Bytes(s, 0, len(s)), p) +ensures acc(Bytes(s[start:end], 0, end-start), p) +ensures acc(Bytes(s, 0, start), p) +ensures acc(Bytes(s, end, len(s)), p) decreases func SplitRange_Bytes(s []byte, start int, end int, p perm) { SplitByIndex_Bytes(s, 0, len(s), start, p) @@ -133,10 +123,10 @@ func SplitRange_Bytes(s []byte, start int, end int, p perm) { ghost requires 0 < p requires 0 <= start && start <= end && end <= len(s) -requires acc(AbsSlice_Bytes(s[start:end], 0, end-start), p) -requires acc(AbsSlice_Bytes(s, 0, start), p) -requires acc(AbsSlice_Bytes(s, end, len(s)), p) -ensures acc(AbsSlice_Bytes(s, 0, len(s)), p) +requires acc(Bytes(s[start:end], 0, end-start), p) +requires acc(Bytes(s, 0, start), p) +requires acc(Bytes(s, end, len(s)), p) +ensures acc(Bytes(s, 0, len(s)), p) decreases func CombineRange_Bytes(s []byte, start int, end int, p perm) { Unslice_Bytes(s, start, end, p) @@ -145,10 +135,10 @@ func CombineRange_Bytes(s []byte, start int, end int, p perm) { } ghost -ensures AbsSlice_Bytes(nil, 0, 0) +ensures Bytes(nil, 0, 0) decreases func NilAcc_Bytes() { - fold AbsSlice_Bytes(nil, 0, 0) + fold Bytes(nil, 0, 0) } /** Auxiliar definitions Any **/ @@ -164,81 +154,3 @@ pure func NewSeq_Any(size int) (res seq[any]) // ResliceC_Any /** End of Auxiliar definitions Any **/ - -/** Slices of Any without Contents **/ -pred AbsSlice_Any(s []any, start int, end int) { - // start inclusive - 0 <= start && - start <= end && - // end exclusive - end <= cap(s) && - forall i int :: { &s[i] } start <= i && i < end ==> acc(&s[i]) -} - -ghost -requires 0 < p -requires acc(AbsSlice_Any(s, start, end), p) -requires start <= idx && idx <= end -ensures acc(AbsSlice_Any(s, start, idx), p) -ensures acc(AbsSlice_Any(s, idx, end), p) -decreases -func SplitByIndex_Any(s []any, start int, end int, idx int, p perm) { - unfold acc(AbsSlice_Any(s, start, end), p) - fold acc(AbsSlice_Any(s, start, idx), p) - fold acc(AbsSlice_Any(s, idx, end), p) -} - -ghost -requires 0 < p -requires acc(AbsSlice_Any(s, start, idx), p) -requires acc(AbsSlice_Any(s, idx, end), p) -ensures acc(AbsSlice_Any(s, start, end), p) -decreases -func CombineAtIndex_Any(s []any, start int, end int, idx int, p perm) { - unfold acc(AbsSlice_Any(s, start, idx), p) - unfold acc(AbsSlice_Any(s, idx, end), p) - fold acc(AbsSlice_Any(s, start, end), p) -} - -ghost -requires 0 < p -requires acc(AbsSlice_Any(s, start, end), p) -// the following precondition convinces Gobra that -// the slice operation is well-formed -requires unfolding acc(AbsSlice_Any(s, start, end), p) in true -ensures acc(AbsSlice_Any(s[start:end], 0, len(s[start:end])), p) -decreases -func Reslice_Any(s []any, start int, end int, p perm) { - unfold acc(AbsSlice_Any(s, start, end), p) - assert forall i int :: { &s[start:end][i] }{ &s[start + i] } 0 <= i && i < (end-start) ==> &s[start:end][i] == &s[start + i] - fold acc(AbsSlice_Any(s[start:end], 0, len(s[start:end])), p) -} - -ghost -requires 0 < p -requires 0 <= start && start <= end && end <= cap(s) -requires len(s[start:end]) <= cap(s) -requires acc(AbsSlice_Any(s[start:end], 0, len(s[start:end])), p) -ensures acc(AbsSlice_Any(s, start, end), p) -decreases -func Unslice_Any(s []any, start int, end int, p perm) { - unfold acc(AbsSlice_Any(s[start:end], 0, len(s[start:end])), p) - assert 0 <= start && start <= end && end <= cap(s) - assert forall i int :: { &s[start:end][i] } 0 <= i && i < len(s[start:end]) ==> acc(&s[start:end][i], p) - assert forall i int :: { &s[start:end][i] }{ &s[start + i] } 0 <= i && i < len(s[start:end]) ==> &s[start:end][i] == &s[start + i] - - invariant 0 <= j && j <= len(s[start:end]) - invariant forall i int :: { &s[start:end][i] } j <= i && i < len(s[start:end]) ==> acc(&s[start:end][i], p) - invariant forall i int :: { &s[start:end][i] }{ &s[start + i] } 0 <= i && i < len(s[start:end]) ==> &s[start:end][i] == &s[start + i] - invariant forall i int :: { &s[i] } start <= i && i < start+j ==> acc(&s[i], p) - decreases len(s[start:end]) - j - for j := 0; j < len(s[start:end]); j++ { - assert forall i int :: { &s[i] } start <= i && i < start+j ==> acc(&s[i], p) - assert &s[start:end][j] == &s[start + j] - assert acc(&s[start + j], p) - assert forall i int :: { &s[i] } start <= i && i <= start+j ==> acc(&s[i], p) - } - fold acc(AbsSlice_Any(s, start, end), p) -} - -/** End of slices of Any without Contents **/ diff --git a/verification/utils/slices/slices_contents.gobra b/verification/utils/slices/slices_contents.gobra deleted file mode 100644 index 2cdf3a2c1..000000000 --- a/verification/utils/slices/slices_contents.gobra +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2023 ETH Zurich -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +gobra - -// TODO!! -// When there is support for closed functions, we should drop this -// file and describe the contents of the slice in terms of closed -// heap-dependent functions. - -package slices - -// This file contains predicates for describing the contents of slices and -// functions that manipulate the knowledge in those predicates. - -// How to extend this file: -// - if we need to support slices of non-supported types, we must repeat all definitions -// for that type. For this, we should be careful to avoid introducing cylical dependencies. -// The suffix of the predicate/function should be the type of the elems of the slices. -// - For each type, there might be two different types of operations: those that keep track -// of contents (the name of the operation ends in "C"), and those who do not. - -import "github.com/scionproto/scion/verification/utils/seqs" - -pred AbsSliceC_Bytes(s []byte, start int, end int, ghost contents seq[byte]) { - // start inclusive - 0 <= start && - start <= end && - // end exclusive - end <= cap(s) && - len(contents) == end - start && - // Maybe, it is worth having different versions with - // different triggers, or using a different trigger - // than the one that is inferred. - forall i int :: { &s[i] } start <= i && i < end ==> - (acc(&s[i]) && s[i] == contents[i - start]) -} - -ghost -requires 0 < p -requires 0 <= start && start <= end && end <= len(s) -requires acc(AbsSlice_Bytes(s, start, end), p) -ensures acc(AbsSliceC_Bytes(s, start, end, contents), p) -decreases -func GetAbsSliceC_Bytes(s []byte, start int, end int, p perm) (contents seq[byte]) { - contents = seqs.NewSeqByte(end - start) - unfold acc(AbsSlice_Bytes(s, start, end), p) - invariant forall k int :: { &s[k] } start <= k && k < end ==> acc(&s[k], p) - invariant start <= i && i <= end - invariant len(contents) == end - start - invariant forall k int :: { contents[k] } start <= k && k < i ==> contents[k - start] == s[k] - decreases end - i - for i := start; i < end; i += 1 { - contents[i - start] = s[i] - } - fold acc(AbsSliceC_Bytes(s, start, end, contents), p) - return contents -} - -ghost -requires 0 < p -requires AbsSliceC_Bytes(s, start, end, contents) -requires start < idx && idx < end -ensures AbsSliceC_Bytes(s, start, idx, contents[:idx-start]) && AbsSliceC_Bytes(s, idx, end, contents[idx-start:]) -decreases -func SplitByIndexC_Bytes(s []byte, start int, end int, idx int, contents seq[byte], p perm) { - unfold AbsSliceC_Bytes(s, start, end, contents) - assert len(contents) == end - start - fold AbsSliceC_Bytes(s, start, idx, contents[:idx-start]) - fold AbsSliceC_Bytes(s, idx, end, contents[idx-start:]) -} - -ghost -requires 0 < p -requires acc(AbsSliceC_Bytes(s, start, idx, contents1), p) -requires acc(AbsSliceC_Bytes(s, idx, end, contents2), p) -ensures acc(AbsSliceC_Bytes(s, start, end, contents1 ++ contents2), p) -decreases -func CombineAtIndexC_Bytes(s []byte, start int, end int, idx int, contents1 seq[byte], contents2 seq[byte], p perm) { - unfold acc(AbsSliceC_Bytes(s, start, idx, contents1), p) - unfold acc(AbsSliceC_Bytes(s, idx, end, contents2), p) - fold acc(AbsSliceC_Bytes(s, start, end, contents1 ++ contents2), p) -} - -ghost -requires AbsSliceC_Bytes(s, start, end, contents) -// the following precondition convinces Gobra that -// the slice operation is well-formed -requires unfolding AbsSliceC_Bytes(s, start, end, contents) in true -ensures AbsSliceC_Bytes(s[start:end], 0, len(s[start:end]), contents) -decreases -func ResliceC_Bytes(s []byte, start int, end int, contents seq[byte]) { - unfold AbsSliceC_Bytes(s, start, end, contents) - assert forall i int :: { &s[start:end][i] }{ &s[start + i] } 0 <= i && i < (end-start) ==> &s[start:end][i] == &s[start + i] - fold AbsSliceC_Bytes(s[start:end], 0, len(s[start:end]), contents) -} - -/** Slices of Any with Contents **/ - -// The elements in contents are required to be comparable -pred AbsSliceC_Any(s []any, start int, end int, ghost contents seq[any]) { - // start inclusive - 0 <= start && - start < end && - // end exclusive - end <= cap(s) && - len(contents) == end - start && - // Maybe, it is worth having different versions with - // different triggers, or using a different trigger - // than the one that is inferred. - forall i int :: { &s[i] } start <= i && i < end ==> (acc(&s[i]) && s[i] === contents[i - start]) -} - -ghost -requires AbsSliceC_Any(s, start, end, contents) -requires start < idx && idx < end -ensures AbsSliceC_Any(s, start, idx, contents[:idx-start]) && AbsSliceC_Any(s, idx, end, contents[idx-start:]) -decreases -func SplitByIndexC_Any(s []any, start int, end int, idx int, contents seq[any]) { - unfold AbsSliceC_Any(s, start, end, contents) - assert len(contents) == end - start - fold AbsSliceC_Any(s, start, idx, contents[:idx-start]) - fold AbsSliceC_Any(s, idx, end, contents[idx-start:]) -} - -/** End of slices of Any with Contents **/ diff --git a/verification/utils/slices/slices_test.gobra b/verification/utils/slices/slices_test.gobra index f78f42147..2d3a10827 100644 --- a/verification/utils/slices/slices_test.gobra +++ b/verification/utils/slices/slices_test.gobra @@ -19,31 +19,9 @@ package slices import "github.com/scionproto/scion/verification/utils/seqs" /** Bytes **/ -func AbsSliceC_Bytes_test() { - s := make([]byte, 10) - ghost contents := seqs.NewSeqByte(10) - fold AbsSliceC_Bytes(s, 0, 10, contents) - // assert false // fails -} -func AbsSlice_Bytes_test() { +func Bytes_test() { s := make([]byte, 10) - fold AbsSlice_Bytes(s, 0, 10) - // assert false // fails -} - -/** Any **/ -func AbsSliceC_Any_test() { - s := make([]any, 1) - var elem interface{} = int(1) - ghost contents := seq[any]{elem} - s[0] = elem - fold AbsSliceC_Any(s, 0, 1, contents) - // assert false // fails -} - -func AbsSlice_Any_test() { - s := make([]any, 10) - fold AbsSlice_Any(s, 0, 10) + fold Bytes(s, 0, 10) // assert false // fails -} +} \ No newline at end of file From f4ed38acf70f02d13410f29f5f31db6a59a92d5b Mon Sep 17 00:00:00 2001 From: Markus Limbeck <92801626+mlimbeck@users.noreply.github.com> Date: Thu, 13 Jun 2024 15:18:28 +0200 Subject: [PATCH 43/57] Drop Assumption in SetInfoField (#350) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * proof setInfoField * fix verification errors * fix syntax error * fix verification errors * formatting * simplification attempt * Apply suggestions from code review Co-authored-by: João Pereira * refactoring * fix verification error * fixed LeftSegEquality() * formatting * Apply suggestions from code review Co-authored-by: João Pereira * feedback * renaming AbsSlice_Bytes to Bytes * adding documentation --------- Co-authored-by: João Pereira --- pkg/slayers/path/infofield.go | 34 +- pkg/slayers/path/infofield_spec.gobra | 36 +- .../path/scion/info_hop_setter_lemmas.gobra | 546 ++++++++++++++++++ pkg/slayers/path/scion/raw.go | 90 +-- pkg/slayers/path/scion/raw_spec.gobra | 92 ++- router/dataplane.go | 4 +- router/io-spec-lemmas.gobra | 14 +- router/io-spec.gobra | 4 +- router/widen-lemma.gobra | 4 +- verification/io/io-spec.gobra | 40 -- verification/io/io_spec_definitions.gobra | 127 ++++ 11 files changed, 810 insertions(+), 181 deletions(-) create mode 100644 pkg/slayers/path/scion/info_hop_setter_lemmas.gobra create mode 100644 verification/io/io_spec_definitions.gobra diff --git a/pkg/slayers/path/infofield.go b/pkg/slayers/path/infofield.go index 754b44846..f3488e768 100644 --- a/pkg/slayers/path/infofield.go +++ b/pkg/slayers/path/infofield.go @@ -62,16 +62,16 @@ type InfoField struct { // path.InfoLen. // @ requires len(raw) >= InfoLen // @ preserves acc(inf) -// @ preserves acc(slices.Bytes(raw, 0, InfoLen), R45) +// @ preserves acc(slices.Bytes(raw, 0, len(raw)), R45) // @ ensures err == nil -// @ ensures BytesToIntermediateAbsInfoField(raw, 0, 0, InfoLen) == -// @ inf.ToIntermediateAbsInfoField() +// @ ensures BytesToAbsInfoField(raw, 0) == +// @ inf.ToAbsInfoField() // @ decreases func (inf *InfoField) DecodeFromBytes(raw []byte) (err error) { if len(raw) < InfoLen { return serrors.New("InfoField raw too short", "expected", InfoLen, "actual", len(raw)) } - //@ unfold acc(slices.Bytes(raw, 0, InfoLen), R50) + //@ unfold acc(slices.Bytes(raw, 0, len(raw)), R50) inf.ConsDir = raw[0]&0x1 == 0x1 inf.Peer = raw[0]&0x2 == 0x2 //@ assert &raw[2:4][0] == &raw[2] && &raw[2:4][1] == &raw[3] @@ -79,7 +79,9 @@ func (inf *InfoField) DecodeFromBytes(raw []byte) (err error) { //@ assert &raw[4:8][0] == &raw[4] && &raw[4:8][1] == &raw[5] //@ assert &raw[4:8][2] == &raw[6] && &raw[4:8][3] == &raw[7] inf.Timestamp = binary.BigEndian.Uint32(raw[4:8]) - //@ fold acc(slices.Bytes(raw, 0, InfoLen), R50) + //@ fold acc(slices.Bytes(raw, 0, len(raw)), R50) + //@ assert reveal BytesToAbsInfoField(raw, 0) == + //@ inf.ToAbsInfoField() return nil } @@ -87,30 +89,30 @@ func (inf *InfoField) DecodeFromBytes(raw []byte) (err error) { // path.InfoLen. // @ requires len(b) >= InfoLen // @ preserves acc(inf, R10) -// @ preserves slices.Bytes(b, 0, InfoLen) +// @ preserves slices.Bytes(b, 0, len(b)) // @ ensures err == nil -// @ ensures inf.ToIntermediateAbsInfoField() == -// @ BytesToIntermediateAbsInfoField(b, 0, 0, InfoLen) +// @ ensures inf.ToAbsInfoField() == +// @ BytesToAbsInfoField(b, 0) // @ decreases func (inf *InfoField) SerializeTo(b []byte) (err error) { if len(b) < InfoLen { return serrors.New("buffer for InfoField too short", "expected", InfoLen, "actual", len(b)) } - //@ ghost targetAbsInfo := inf.ToIntermediateAbsInfoField() - //@ unfold slices.Bytes(b, 0, InfoLen) + //@ ghost targetAbsInfo := inf.ToAbsInfoField() + //@ unfold slices.Bytes(b, 0, len(b)) b[0] = 0 if inf.ConsDir { b[0] |= 0x1 } - //@ ghost tmpInfo1 := BytesToIntermediateAbsInfoFieldHelper(b, 0, InfoLen) + //@ ghost tmpInfo1 := BytesToAbsInfoFieldHelper(b, 0) //@ bits.InfoFieldFirstByteSerializationLemmas() //@ assert tmpInfo1.ConsDir == targetAbsInfo.ConsDir //@ ghost firstByte := b[0] if inf.Peer { b[0] |= 0x2 } - //@ tmpInfo2 := BytesToIntermediateAbsInfoFieldHelper(b, 0, InfoLen) + //@ tmpInfo2 := BytesToAbsInfoFieldHelper(b, 0) //@ assert tmpInfo2.Peer == (b[0] & 0x2 == 0x2) //@ assert tmpInfo2.ConsDir == (b[0] & 0x1 == 0x1) //@ assert tmpInfo2.Peer == targetAbsInfo.Peer @@ -119,14 +121,16 @@ func (inf *InfoField) SerializeTo(b []byte) (err error) { b[1] = 0 // reserved //@ assert &b[2:4][0] == &b[2] && &b[2:4][1] == &b[3] binary.BigEndian.PutUint16(b[2:4], inf.SegID) - //@ ghost tmpInfo3 := BytesToIntermediateAbsInfoFieldHelper(b, 0, InfoLen) + //@ ghost tmpInfo3 := BytesToAbsInfoFieldHelper(b, 0) //@ assert tmpInfo3.UInfo == targetAbsInfo.UInfo //@ assert &b[4:8][0] == &b[4] && &b[4:8][1] == &b[5] //@ assert &b[4:8][2] == &b[6] && &b[4:8][3] == &b[7] binary.BigEndian.PutUint32(b[4:8], inf.Timestamp) - //@ ghost tmpInfo4 := BytesToIntermediateAbsInfoFieldHelper(b, 0, InfoLen) + //@ ghost tmpInfo4 := BytesToAbsInfoFieldHelper(b, 0) //@ assert tmpInfo4.AInfo == targetAbsInfo.AInfo - //@ fold slices.Bytes(b, 0, InfoLen) + //@ fold slices.Bytes(b, 0, len(b)) + //@ assert inf.ToAbsInfoField() == + //@ reveal BytesToAbsInfoField(b, 0) return nil } diff --git a/pkg/slayers/path/infofield_spec.gobra b/pkg/slayers/path/infofield_spec.gobra index 3b52222d2..da554ab37 100644 --- a/pkg/slayers/path/infofield_spec.gobra +++ b/pkg/slayers/path/infofield_spec.gobra @@ -75,39 +75,29 @@ pure func AbsUinfo(raw []byte, currINF int, headerOffset int) set[io.IO_msgterm] AbsUInfoFromUint16(binary.BigEndian.Uint16(raw[idx:idx+2])) } -// This type simplifies the infoField, making it easier -// to use than the IO_seg3 from the IO-spec. -type IntermediateAbsInfoField adt { - IntermediateAbsInfoField_ { - AInfo io.IO_ainfo - UInfo set[io.IO_msgterm] - ConsDir bool - Peer bool - } -} - ghost -requires 0 <= start && start <= middle -requires middle+InfoLen <= end && end <= len(raw) -requires acc(sl.Bytes(raw, start, end), _) +opaque +requires 0 <= middle +requires middle+InfoLen <= len(raw) +requires acc(sl.Bytes(raw, 0, len(raw)), _) decreases -pure func BytesToIntermediateAbsInfoField(raw [] byte, start int, middle int, end int) (IntermediateAbsInfoField) { - return unfolding acc(sl.Bytes(raw, start, end), _) in - BytesToIntermediateAbsInfoFieldHelper(raw, middle, end) +pure func BytesToAbsInfoField(raw [] byte, middle int) (io.AbsInfoField) { + return unfolding acc(sl.Bytes(raw, 0, len(raw)), _) in + BytesToAbsInfoFieldHelper(raw, middle) } ghost requires 0 <= middle -requires middle+InfoLen <= end && end <= len(raw) -requires forall i int :: { &raw[i] } middle <= i && i < end ==> +requires middle+InfoLen <= len(raw) +requires forall i int :: { &raw[i] } middle <= i && i < len(raw) ==> acc(&raw[i], _) decreases -pure func BytesToIntermediateAbsInfoFieldHelper(raw [] byte, middle int, end int) (IntermediateAbsInfoField) { +pure func BytesToAbsInfoFieldHelper(raw [] byte, middle int) (io.AbsInfoField) { return let _ := Asserting(forall k int :: {&raw[middle+2:middle+4][k]} 0 <= k && k < 2 ==> &raw[middle+2:middle+4][k] == &raw[middle+2 + k]) in let _ := Asserting(forall k int :: {&raw[middle+4:middle+8][k]} 0 <= k && k < 4 ==> &raw[middle+4:middle+8][k] == &raw[middle+4 + k]) in - IntermediateAbsInfoField(IntermediateAbsInfoField_{ + io.AbsInfoField(io.AbsInfoField_{ AInfo : io.IO_ainfo(binary.BigEndian.Uint32(raw[middle+4:middle+8])), UInfo : AbsUInfoFromUint16(binary.BigEndian.Uint16(raw[middle+2:middle+4])), ConsDir : raw[middle] & 0x1 == 0x1, @@ -117,8 +107,8 @@ pure func BytesToIntermediateAbsInfoFieldHelper(raw [] byte, middle int, end int ghost decreases -pure func (inf InfoField) ToIntermediateAbsInfoField() (IntermediateAbsInfoField) { - return IntermediateAbsInfoField(IntermediateAbsInfoField_{ +pure func (inf InfoField) ToAbsInfoField() (io.AbsInfoField) { + return io.AbsInfoField(io.AbsInfoField_{ AInfo : io.IO_ainfo(inf.Timestamp), UInfo : AbsUInfoFromUint16(inf.SegID), ConsDir : inf.ConsDir, diff --git a/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra b/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra new file mode 100644 index 000000000..2ec25d54a --- /dev/null +++ b/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra @@ -0,0 +1,546 @@ +// Copyright 2022 ETH Zurich +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +gobra + +package scion + +import ( + "github.com/scionproto/scion/pkg/slayers/path" + . "verification/utils/definitions" + sl "verification/utils/slices" + "verification/io" +) + +/*** This file contains helpful lemmas for proving SetInfoField and SetHopfield. ***/ +// Our abstract translation functions (CurrSeg, LeftSeg, RightSeg, MidSeg) are defined based on the +// entire byte slice of the concrete packet. This approach makes proving updates to the bytes very difficult. +// In this file, we introduce new translation functions that rely only on the hopfields byte slice and +// the infofield of a segment. We prove that these new functions are equivalent to the original ones +// and can be translated to each other. With these new functions, the proofs for SetInfoField and SetHopfield +// are greatly simplified. + + +// InfofieldByteSlice returns the byte slice of the infofield corresponding to the +// specified currInfIdx argument. Although a packet can have only three infofields, +// we use currInfIdx == 4 to represent the first infofield in our translation from +// concrete packets to abstract packets. This requires the special case that +// currInfIdx == 4 returns the same as currInfIdx == 0. +ghost +requires 0 <= currInfIdx +requires path.InfoFieldOffset(currInfIdx, MetaLen) + path.InfoLen <= len(raw) +decreases +pure func InfofieldByteSlice(raw []byte, currInfIdx int) ([]byte) { + return let infOffset := currInfIdx == 4 ? + path.InfoFieldOffset(0, MetaLen) : + path.InfoFieldOffset(currInfIdx, MetaLen) in + raw[infOffset:infOffset+path.InfoLen] +} + +// HopfieldsStartIdx returns index of the first byte of the hopfields of a segment +// specified by the currInfIdx argument. Although a packet can have only three segments, +// we use currInfIdx == 4 to represent the first segment in our translation from +// concrete packets to abstract packets. This requires the special case that +// currInfIdx == 4 returns the same as currInfIdx == 0. +ghost +requires s.Valid() +requires 0 <= currInfIdx +decreases +pure func HopfieldsStartIdx(currInfIdx int, s io.SegLens) int { + return let numInf := s.NumInfoFields() in + let infOffset := path.InfoFieldOffset(numInf, MetaLen) in + (currInfIdx == 0 || currInfIdx == 4) ? infOffset : + currInfIdx == 1 ? infOffset+s.Seg1Len*path.HopLen : + infOffset+(s.Seg1Len+s.Seg2Len)*path.HopLen +} + +// HopfieldsStartIdx returns index of the last byte of the hopfields of a segment +// specified by the currInfIdx argument. Although a packet can have only three segments, +// we use currInfIdx == 4 to represent the first segment in our translation from +// concrete packets to abstract packets. This requires the special case that +// currInfIdx == 4 returns the same as currInfIdx == 0. +ghost +requires s.Valid() +requires 0 <= currInfIdx +decreases +pure func HopfieldsEndIdx(currInfIdx int, s io.SegLens) int { + return let numInf := s.NumInfoFields() in + let infOffset := path.InfoFieldOffset(numInf, MetaLen) in + (currInfIdx == 0 || currInfIdx == 4) ? infOffset+s.Seg1Len*path.HopLen : + currInfIdx == 1 ? infOffset+(s.Seg1Len+s.Seg2Len)*path.HopLen : + infOffset+(s.Seg1Len+s.Seg2Len+s.Seg3Len)*path.HopLen +} + +// HopfieldsStartIdx returns returns the byte slice of the hopfields of a segment +// specified by the currInfIdx argument. Although a packet can have only three segments, +// we use currInfIdx == 4 to represent the first segment in our translation from +// concrete packets to abstract packets. This requires the special case that +// currInfIdx == 4 returns the same as currInfIdx == 0. +ghost +requires s.Valid() +requires 0 <= currInfIdx +requires pktLen(s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) <= len(raw) +decreases +pure func HopfieldsByteSlice(raw []byte, currInfIdx int, s io.SegLens) ([]byte) { + return let numInf := s.NumInfoFields() in + let infOffset := path.InfoFieldOffset(numInf, MetaLen) in + let start := HopfieldsStartIdx(currInfIdx, s) in + let end := HopfieldsEndIdx(currInfIdx, s) in + raw[start:end] +} + +// SliceBytesIntoSegments splits the raw bytes of a packet into its hopfield segments +ghost +requires 0 < p +requires s.Valid() +requires pktLen(s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) <= len(raw) +requires acc(sl.Bytes(raw, 0, len(raw)), p) +ensures acc(sl.Bytes(raw[:HopfieldsStartIdx(0, s)], 0, HopfieldsStartIdx(0, s)), p) +ensures acc(sl.Bytes(HopfieldsByteSlice(raw, 0, s), 0, s.Seg1Len*path.HopLen), p) +ensures acc(sl.Bytes(HopfieldsByteSlice(raw, 1, s), 0, s.Seg2Len*path.HopLen), p) +ensures acc(sl.Bytes(HopfieldsByteSlice(raw, 2, s), 0, s.Seg3Len*path.HopLen), p) +ensures acc(sl.Bytes(raw[HopfieldsEndIdx(2, s):], 0, len(raw[HopfieldsEndIdx(2, s):])), p) +decreases +func SliceBytesIntoSegments(raw []byte, s io.SegLens, p perm) { + sl.SplitByIndex_Bytes(raw, 0, len(raw), HopfieldsStartIdx(0, s), p) + sl.SplitByIndex_Bytes(raw, HopfieldsStartIdx(0, s), len(raw), HopfieldsEndIdx(0, s), p) + sl.SplitByIndex_Bytes(raw, HopfieldsStartIdx(1, s), len(raw), HopfieldsEndIdx(1, s), p) + sl.SplitByIndex_Bytes(raw, HopfieldsStartIdx(2, s), len(raw), HopfieldsEndIdx(2, s), p) + sl.Reslice_Bytes(raw, 0, HopfieldsStartIdx(0, s), p) + sl.Reslice_Bytes(raw, HopfieldsStartIdx(0, s), HopfieldsEndIdx(0, s), p) + sl.Reslice_Bytes(raw, HopfieldsStartIdx(1, s), HopfieldsEndIdx(1, s), p) + sl.Reslice_Bytes(raw, HopfieldsStartIdx(2, s), HopfieldsEndIdx(2, s), p) + sl.Reslice_Bytes(raw, HopfieldsEndIdx(2, s), len(raw), p) +} + +// CombineBytesFromSegments combines the three hopfield segments of a packet into a single slice of bytes. +ghost +requires 0 < p +requires s.Valid() +requires pktLen(s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) <= len(raw) +requires acc(sl.Bytes(raw[:HopfieldsStartIdx(0, s)], 0, HopfieldsStartIdx(0, s)), p) +requires acc(sl.Bytes(HopfieldsByteSlice(raw, 0, s), 0, s.Seg1Len*path.HopLen), p) +requires acc(sl.Bytes(HopfieldsByteSlice(raw, 1, s), 0, s.Seg2Len*path.HopLen), p) +requires acc(sl.Bytes(HopfieldsByteSlice(raw, 2, s), 0, s.Seg3Len*path.HopLen), p) +requires acc(sl.Bytes(raw[HopfieldsEndIdx(2, s):], 0, len(raw[HopfieldsEndIdx(2, s):])), p) +ensures acc(sl.Bytes(raw, 0, len(raw)), p) +decreases +func CombineBytesFromSegments(raw []byte, s io.SegLens, p perm) { + sl.Unslice_Bytes(raw, HopfieldsEndIdx(2, s), len(raw), p) + sl.Unslice_Bytes(raw, HopfieldsStartIdx(2, s), HopfieldsEndIdx(2, s), p) + sl.Unslice_Bytes(raw, HopfieldsStartIdx(1, s), HopfieldsEndIdx(1, s), p) + sl.Unslice_Bytes(raw, HopfieldsStartIdx(0, s), HopfieldsEndIdx(0, s), p) + sl.Unslice_Bytes(raw, 0, HopfieldsStartIdx(0, s), p) + sl.CombineAtIndex_Bytes(raw, HopfieldsStartIdx(2, s), len(raw), HopfieldsEndIdx(2, s), p) + sl.CombineAtIndex_Bytes(raw, HopfieldsStartIdx(1, s), len(raw), HopfieldsEndIdx(1, s), p) + sl.CombineAtIndex_Bytes(raw, HopfieldsStartIdx(0, s), len(raw), HopfieldsEndIdx(0, s), p) + sl.CombineAtIndex_Bytes(raw, 0, len(raw), HopfieldsStartIdx(0, s), p) +} + +// SliceBytesIntoInfoFields splits the raw bytes of a packet into its infofields +ghost +requires 0 < p +requires s.Valid() +requires pktLen(s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) <= len(raw) +requires numInf == s.NumInfoFields() +requires acc(sl.Bytes(raw, 0, len(raw)), p) +ensures acc(sl.Bytes(raw[:MetaLen], 0, MetaLen), p) +ensures acc(sl.Bytes(InfofieldByteSlice(raw, 0), 0, path.InfoLen), p) +ensures 1 < numInf ==> acc(sl.Bytes(InfofieldByteSlice(raw, 1), 0, path.InfoLen), p) +ensures 2 < numInf ==> acc(sl.Bytes(InfofieldByteSlice(raw, 2), 0, path.InfoLen), p) +ensures acc(sl.Bytes(raw[HopfieldsStartIdx(0, s):], 0, len(raw[HopfieldsStartIdx(0, s):])), p) +decreases +func SliceBytesIntoInfoFields(raw []byte, numInf int, s io.SegLens, p perm) { + sl.SplitByIndex_Bytes(raw, 0, len(raw), MetaLen, p) + sl.SplitByIndex_Bytes(raw, MetaLen, len(raw), path.InfoFieldOffset(1, MetaLen), p) + sl.Reslice_Bytes(raw, 0, MetaLen, p) + sl.Reslice_Bytes(raw, MetaLen, path.InfoFieldOffset(1, MetaLen), p) + if(numInf > 1) { + sl.SplitByIndex_Bytes(raw, path.InfoFieldOffset(1, MetaLen), len(raw), + path.InfoFieldOffset(2, MetaLen), p) + sl.Reslice_Bytes(raw, path.InfoFieldOffset(1, MetaLen), + path.InfoFieldOffset(2, MetaLen), p) + } + if(numInf > 2) { + sl.SplitByIndex_Bytes(raw, path.InfoFieldOffset(2, MetaLen), len(raw), + HopfieldsStartIdx(0, s), p) + sl.Reslice_Bytes(raw, path.InfoFieldOffset(2, MetaLen), HopfieldsStartIdx(0, s), p) + } + sl.Reslice_Bytes(raw, HopfieldsStartIdx(0, s), len(raw), p) +} + +// CombineBytesFromInfoFields combines the infofields of a packet into a single slice of bytes. +ghost +requires 0 < p +requires s.Valid() +requires pktLen(s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) <= len(raw) +requires numInf == s.NumInfoFields() +requires acc(sl.Bytes(raw[:MetaLen], 0, MetaLen), p) +requires acc(sl.Bytes(InfofieldByteSlice(raw, 0), 0, path.InfoLen), p) +requires 1 < numInf ==> acc(sl.Bytes(InfofieldByteSlice(raw, 1), 0, path.InfoLen), p) +requires 2 < numInf ==> acc(sl.Bytes(InfofieldByteSlice(raw, 2), 0, path.InfoLen), p) +requires acc(sl.Bytes(raw[HopfieldsStartIdx(0, s):], 0, len(raw[HopfieldsStartIdx(0, s):])), p) +ensures acc(sl.Bytes(raw, 0, len(raw)), p) +decreases +func CombineBytesFromInfoFields(raw []byte, numInf int, s io.SegLens, p perm) { + sl.Unslice_Bytes(raw, HopfieldsStartIdx(0, s), len(raw), p) + if(numInf > 2) { + sl.Unslice_Bytes(raw, path.InfoFieldOffset(2, MetaLen), HopfieldsStartIdx(0, s), p) + sl.CombineAtIndex_Bytes(raw, path.InfoFieldOffset(2, MetaLen), len(raw), + HopfieldsStartIdx(0, s), p) + } + if(numInf > 1) { + sl.Unslice_Bytes(raw, path.InfoFieldOffset(1, MetaLen), + path.InfoFieldOffset(2, MetaLen), p) + sl.CombineAtIndex_Bytes(raw, path.InfoFieldOffset(1, MetaLen), len(raw), + path.InfoFieldOffset(2, MetaLen), p) + } + sl.Unslice_Bytes(raw, MetaLen, path.InfoFieldOffset(1, MetaLen), p) + sl.Unslice_Bytes(raw, 0, MetaLen, p) + sl.CombineAtIndex_Bytes(raw, MetaLen, len(raw), path.InfoFieldOffset(1, MetaLen), p) + sl.CombineAtIndex_Bytes(raw, 0, len(raw), MetaLen, p) +} + +// CurrSegWithInfo returns the abstract representation of the current segment of a packet. +// Unlike CurrSeg, it relies solely on the hopfield byte slice and an infofield instead of +// the entire raw bytes of the packet. This approach simplifies the verification of changes +// within a segment after updates to the packet's raw bytes. +ghost +opaque +requires 0 < SegLen +requires 0 <= currHfIdx && currHfIdx <= SegLen +requires SegLen*path.HopLen == len(hopfields) +requires acc(sl.Bytes(hopfields, 0, len(hopfields)), R56) +decreases +pure func CurrSegWithInfo(hopfields []byte, currHfIdx int, SegLen int, inf io.AbsInfoField) io.IO_seg3 { + return segment(hopfields, 0, currHfIdx, inf.AInfo, inf.UInfo, inf.ConsDir, inf.Peer, SegLen) +} + + +// LeftSegWithInfo returns the abstract representation of the next segment of a packet. +// Unlike LeftSeg, it relies solely on the hopfields byte slice and an infofield instead of +// the entire bytes of the packet. Whenever the return value is not none, LeftSegWithInfo +// requires permissions to the hopfields byte slice of the segment specified by currInfIdx. +ghost +opaque +requires s.Valid() +requires (currInfIdx == 1 && s.Seg2Len > 0) || + (currInfIdx == 2 && s.Seg2Len > 0 && s.Seg3Len > 0) ==> + let start := HopfieldsStartIdx(currInfIdx, s) in + let end := HopfieldsEndIdx(currInfIdx, s) in + inf != none[io.AbsInfoField] && + len(hopfields) == end-start && + acc(sl.Bytes(hopfields, 0, len(hopfields)), R49) +decreases +pure func LeftSegWithInfo( + hopfields []byte, + currInfIdx int, + s io.SegLens, + inf option[io.AbsInfoField]) option[io.IO_seg3] { + return (currInfIdx == 1 && s.Seg2Len > 0) ? + some(CurrSegWithInfo(hopfields, 0, s.Seg2Len, get(inf))) : + (currInfIdx == 2 && s.Seg2Len > 0 && s.Seg3Len > 0) ? + some(CurrSegWithInfo(hopfields, 0, s.Seg3Len, get(inf))) : + none[io.IO_seg3] +} + +// RightSegWithInfo returns the abstract representation of the previous segment of a packet. +// Unlike RightSeg, it relies solely on the hopfields byte slice and an infofield instead of +// the entire bytes of the packet. Whenever the return value is not none, RightSegWithInfo +// requires permissions to the hopfields byte slice of the segment specified by currInfIdx. +ghost +opaque +requires s.Valid() +requires (currInfIdx == 0 && s.Seg2Len > 0) || + (currInfIdx == 1 && s.Seg2Len > 0 && s.Seg3Len > 0) ==> + let start := HopfieldsStartIdx(currInfIdx, s) in + let end := HopfieldsEndIdx(currInfIdx, s) in + inf != none[io.AbsInfoField] && + len(hopfields) == end-start && + acc(sl.Bytes(hopfields, 0, len(hopfields)), R49) +decreases +pure func RightSegWithInfo( + hopfields []byte, + currInfIdx int, + s io.SegLens, + inf option[io.AbsInfoField]) option[io.IO_seg3] { + return (currInfIdx == 1 && s.Seg2Len > 0 && s.Seg3Len > 0) ? + some(CurrSegWithInfo(hopfields, s.Seg2Len, s.Seg2Len, get(inf))) : + (currInfIdx == 0 && s.Seg2Len > 0) ? + some(CurrSegWithInfo(hopfields, s.Seg1Len, s.Seg1Len, get(inf))) : + none[io.IO_seg3] +} + +// MidSegWithInfo returns the abstract representation of the last or first segment of a packet. +// Unlike MidSeg, it relies solely on the hopfields byte slice and an infofield instead of +// the entire bytes of the packet. Whenever the return value is not none, MidSegWithInfo +// requires permissions to the hopfields byte slice of the segment specified by currInfIdx. +ghost +opaque +requires s.Valid() +requires (s.Seg2Len > 0 && s.Seg3Len > 0 && + (currInfIdx == 2 || currInfIdx == 4)) ==> + let start := HopfieldsStartIdx(currInfIdx, s) in + let end := HopfieldsEndIdx(currInfIdx, s) in + inf != none[io.AbsInfoField] && + len(hopfields) == end-start && + acc(sl.Bytes(hopfields, 0, len(hopfields)), R49) +decreases +pure func MidSegWithInfo( + hopfields []byte, + currInfIdx int, + s io.SegLens, + inf option[io.AbsInfoField]) option[io.IO_seg3] { + return (currInfIdx == 4 && s.Seg2Len > 0 && s.Seg3Len > 0) ? + some(CurrSegWithInfo(hopfields, s.Seg1Len, s.Seg1Len, get(inf))) : + (currInfIdx == 2 && s.Seg2Len > 0 && s.Seg3Len > 0) ? + some(CurrSegWithInfo(hopfields, 0, s.Seg3Len, get(inf))) : + none[io.IO_seg3] +} + +// CurrSegEquality ensures that the two definitions of abstract segments, CurrSegWithInfo(..) +// and CurrSeg(..), represent the same abstract segment. +ghost +requires path.InfoFieldOffset(currInfIdx, MetaLen) + path.InfoLen <= offset +requires 0 < SegLen +requires offset + path.HopLen * SegLen <= len(raw) +requires 0 <= currHfIdx && currHfIdx <= SegLen +requires 0 <= currInfIdx && currInfIdx < 3 +preserves acc(sl.Bytes(raw, 0, len(raw)), R50) +preserves acc(sl.Bytes(raw[offset:offset+SegLen*path.HopLen], 0, SegLen*path.HopLen), R50) +preserves acc(sl.Bytes(InfofieldByteSlice(raw, currInfIdx), 0, path.InfoLen), R50) +ensures let inf := path.BytesToAbsInfoField(InfofieldByteSlice(raw, currInfIdx), 0) in + CurrSegWithInfo(raw[offset:offset+SegLen*path.HopLen], currHfIdx, SegLen, inf) == + CurrSeg(raw, offset, currInfIdx, currHfIdx, SegLen, MetaLen) +decreases +func CurrSegEquality(raw []byte, offset int, currInfIdx int, currHfIdx int, SegLen int) { + infoBytes := InfofieldByteSlice(raw, currInfIdx) + inf := path.BytesToAbsInfoField(infoBytes, 0) + infOffset := path.InfoFieldOffset(currInfIdx, MetaLen) + unfold acc(sl.Bytes(raw, 0, len(raw)), R56) + unfold acc(sl.Bytes(infoBytes, 0, path.InfoLen), R56) + assert reveal path.BytesToAbsInfoField(raw, infOffset) == + reveal path.BytesToAbsInfoField(infoBytes, 0) + reveal CurrSeg(raw, offset, currInfIdx, currHfIdx, SegLen, MetaLen) + reveal CurrSegWithInfo(raw[offset:offset+SegLen*path.HopLen], currHfIdx, SegLen, inf) + fold acc(sl.Bytes(raw, 0, len(raw)), R56) + fold acc(sl.Bytes(infoBytes, 0, path.InfoLen), R56) + widenSegment(raw, offset, currHfIdx, inf.AInfo, inf.UInfo, inf.ConsDir, + inf.Peer, SegLen, offset, offset+SegLen*path.HopLen) +} + +// UpdateCurrSegInfo proves that updating the infofield from inf1 to inf2 does not alter the hopfields +// of the current segment. +ghost +requires 0 < SegLen +requires 0 <= currHfIdx && currHfIdx <= SegLen +requires SegLen*path.HopLen == len(raw) +preserves acc(sl.Bytes(raw, 0, len(raw)), R50) +ensures CurrSegWithInfo(raw, currHfIdx, SegLen, inf1).UpdateCurrSeg(inf2) == + CurrSegWithInfo(raw, currHfIdx, SegLen, inf2) +decreases +func UpdateCurrSegInfo(raw []byte, currHfIdx int, SegLen int, + inf1 io.AbsInfoField, inf2 io.AbsInfoField) { + seg1 := reveal CurrSegWithInfo(raw, currHfIdx, SegLen, inf1) + seg2 := reveal CurrSegWithInfo(raw, currHfIdx, SegLen, inf2) +} + + +// LeftSegEqualitySpec defines the conditions that must hold for LeftSegWithInfo(..) +// and LeftSeg(..) to represent the same abstract segment. +ghost +requires s.Valid() +requires pktLen(s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) <= len(raw) +requires 1 <= currInfIdx && currInfIdx < 4 +requires acc(sl.Bytes(raw, 0, len(raw)), R49) +requires (currInfIdx == 1 && s.Seg2Len > 0) || + (currInfIdx == 2 && s.Seg2Len > 0 && s.Seg3Len > 0) ==> + let infoBytes := InfofieldByteSlice(raw, currInfIdx) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, s) in + acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && + acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) +decreases +pure func LeftSegEqualitySpec(raw []byte, currInfIdx int, s io.SegLens) bool { + return (currInfIdx == 1 && s.Seg2Len > 0) || + (currInfIdx == 2 && s.Seg2Len > 0 && s.Seg3Len > 0) ? + let infoBytes := InfofieldByteSlice(raw, currInfIdx) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, s) in + let inf := some(path.BytesToAbsInfoField(infoBytes, 0)) in + LeftSeg(raw, currInfIdx, s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) == + LeftSegWithInfo(hopBytes, currInfIdx, s, inf) : + LeftSeg(raw, currInfIdx, s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) == + LeftSegWithInfo(nil, currInfIdx, s, none[io.AbsInfoField]) +} + +// LeftSegEquality ensures that the two definitions of abstract segments, LeftSegWithInfo(..) +// and LeftSeg(..), represent the same abstract segment. +// The left segment corresponds to different segments of the packet depending on the currInfIdx. +// To address this, we need to consider all possible cases of currInfIdx. This results in fairly +// complex preconditions and postconditions because, for every currInfIdx, we need an offset for +// its infofield and one for its hopfields. +ghost +requires s.Valid() +requires pktLen(s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) <= len(raw) +requires 1 <= currInfIdx && currInfIdx < 4 +preserves acc(sl.Bytes(raw, 0, len(raw)), R49) +preserves (currInfIdx == 1 && s.Seg2Len > 0) || + (currInfIdx == 2 && s.Seg2Len > 0 && s.Seg3Len > 0) ==> + let infoBytes := InfofieldByteSlice(raw, currInfIdx) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, s) in + acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && + acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) +ensures LeftSegEqualitySpec(raw, currInfIdx, s) +decreases +func LeftSegEquality(raw []byte, currInfIdx int, s io.SegLens) { + reveal LeftSeg(raw, currInfIdx, s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) + if ((currInfIdx == 1 && s.Seg2Len > 0) || + (currInfIdx == 2 && s.Seg2Len > 0 && s.Seg3Len > 0)) { + infoBytes := InfofieldByteSlice(raw, currInfIdx) + hopBytes := HopfieldsByteSlice(raw, currInfIdx, s) + inf := some(reveal path.BytesToAbsInfoField(infoBytes, 0)) + offset := HopfieldsStartIdx(currInfIdx, s) + segLen := currInfIdx == 1 ? s.Seg2Len : s.Seg3Len + reveal LeftSegWithInfo(hopBytes, currInfIdx, s, inf) + CurrSegEquality(raw, offset, currInfIdx, 0, segLen) + } else { + reveal LeftSegWithInfo(nil, currInfIdx, s, none[io.AbsInfoField]) + } +} + +// RightSegEqualitySpec defines the conditions that must hold for RightSegWithInfo(..) +// and RightSeg(..) to represent the same abstract segment. +ghost +requires s.Valid() +requires pktLen(s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) <= len(raw) +requires -1 <= currInfIdx && currInfIdx < 2 +requires acc(sl.Bytes(raw, 0, len(raw)), R49) +requires (currInfIdx == 0 && s.Seg2Len > 0) || + (currInfIdx == 1 && s.Seg2Len > 0 && s.Seg3Len > 0) ==> + let infoBytes := InfofieldByteSlice(raw, currInfIdx) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, s) in + acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && + acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) +decreases +pure func RightSegEqualitySpec(raw []byte, currInfIdx int, s io.SegLens) bool { + return (currInfIdx == 0 && s.Seg2Len > 0) || + (currInfIdx == 1 && s.Seg2Len > 0 && s.Seg3Len > 0) ? + let infoBytes := InfofieldByteSlice(raw, currInfIdx) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, s) in + let inf := some(path.BytesToAbsInfoField(infoBytes, 0)) in + RightSeg(raw, currInfIdx, s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) == + RightSegWithInfo(hopBytes, currInfIdx, s, inf) : + RightSeg(raw, currInfIdx, s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) == + RightSegWithInfo(nil, currInfIdx, s, none[io.AbsInfoField]) +} + +// RightSegEquality ensures that the two definitions of abstract segments, RightSegWithInfo(..) +// and RightSeg(..), represent the same abstract segment. +// The right segment corresponds to different segments of the packet depending on the currInfIdx. +// To address this, we need to consider all possible cases of currInfIdx. This results in fairly +// complex preconditions and postconditions because, for every currInfIdx, we need an offset for +// its infofield and one for its hopfields. +ghost +requires s.Valid() +requires pktLen(s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) <= len(raw) +requires -1 <= currInfIdx && currInfIdx < 2 +preserves acc(sl.Bytes(raw, 0, len(raw)), R49) +preserves (currInfIdx == 0 && s.Seg2Len > 0) || + (currInfIdx == 1 && s.Seg2Len > 0 && s.Seg3Len > 0) ==> + let infoBytes := InfofieldByteSlice(raw, currInfIdx) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, s) in + acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && + acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) +ensures RightSegEqualitySpec(raw, currInfIdx, s) +decreases +func RightSegEquality(raw []byte, currInfIdx int, s io.SegLens) { + reveal RightSeg(raw, currInfIdx, s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) + if ((currInfIdx == 0 && s.Seg2Len > 0) || + (currInfIdx == 1 && s.Seg2Len > 0 && s.Seg3Len > 0)) { + infoBytes := InfofieldByteSlice(raw, currInfIdx) + hopBytes := HopfieldsByteSlice(raw, currInfIdx, s) + inf := some(reveal path.BytesToAbsInfoField(infoBytes, 0)) + offset := HopfieldsStartIdx(currInfIdx, s) + segLen := currInfIdx == 0 ? s.Seg1Len : s.Seg2Len + reveal RightSegWithInfo(hopBytes, currInfIdx, s, inf) + CurrSegEquality(raw, offset, currInfIdx, segLen, segLen) + } else { + reveal RightSegWithInfo(nil, currInfIdx, s, none[io.AbsInfoField]) + } +} + +// MidSegEqualitySpec defines the conditions that must hold for MidSegWithInfo(..) +// and MidSeg(..) to represent the same abstract segment. +ghost +requires s.Valid() +requires pktLen(s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) <= len(raw) +requires 2 <= currInfIdx && currInfIdx < 5 +requires acc(sl.Bytes(raw, 0, len(raw)), R49) +requires (s.Seg2Len > 0 && s.Seg3Len > 0 && + (currInfIdx == 2 || currInfIdx == 4)) ==> + let infoBytes := InfofieldByteSlice(raw, currInfIdx) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, s) in + acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && + acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) +decreases +pure func MidSegEqualitySpec(raw []byte, currInfIdx int, s io.SegLens) bool { + return (s.Seg2Len > 0 && s.Seg3Len > 0 && + (currInfIdx == 2 || currInfIdx == 4)) ? + let infoBytes := InfofieldByteSlice(raw, currInfIdx) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, s) in + let inf := some(path.BytesToAbsInfoField(infoBytes, 0)) in + MidSeg(raw, currInfIdx, s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) == + MidSegWithInfo(hopBytes, currInfIdx, s, inf) : + MidSeg(raw, currInfIdx, s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) == + MidSegWithInfo(nil, currInfIdx, s, none[io.AbsInfoField]) +} + +// MidSegEquality ensures that the two definitions of abstract segments, MidSegWithInfo(..) +// and MidSeg(..), represent the same abstract segment. +// The mid segment corresponds to different segments of the packet depending on the currInfIdx. +// To address this, we need to consider all possible cases of currInfIdx. This results in fairly +// complex preconditions and postconditions because, for every currInfIdx, we need an offset for +// its infofield and one for its hopfields. +ghost +requires s.Valid() +requires pktLen(s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) <= len(raw) +requires 2 <= currInfIdx && currInfIdx < 5 +preserves acc(sl.Bytes(raw, 0, len(raw)), R49) +preserves (s.Seg2Len > 0 && s.Seg3Len > 0 && + (currInfIdx == 2 || currInfIdx == 4)) ==> + let infoBytes := InfofieldByteSlice(raw, currInfIdx) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, s) in + acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && + acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) +ensures MidSegEqualitySpec(raw, currInfIdx, s) +decreases +func MidSegEquality(raw []byte, currInfIdx int, s io.SegLens) { + reveal MidSeg(raw, currInfIdx, s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) + if (currInfIdx == 4 && s.Seg2Len > 0 && s.Seg3Len > 0) { + infoBytes := InfofieldByteSlice(raw, 0) + hopBytes := HopfieldsByteSlice(raw, 0, s) + inf := some(reveal path.BytesToAbsInfoField(infoBytes, 0)) + offset := HopfieldsStartIdx(currInfIdx, s) + reveal MidSegWithInfo(hopBytes, currInfIdx, s, inf) + CurrSegEquality(raw, offset, 0, s.Seg1Len, s.Seg1Len) + } else if (currInfIdx == 2 && s.Seg2Len > 0 && s.Seg3Len > 0) { + infoBytes := InfofieldByteSlice(raw, currInfIdx) + hopBytes := HopfieldsByteSlice(raw, currInfIdx, s) + inf := some(reveal path.BytesToAbsInfoField(infoBytes, 0)) + offset := HopfieldsStartIdx(currInfIdx, s) + reveal MidSegWithInfo(hopBytes, currInfIdx, s, inf) + CurrSegEquality(raw, offset, currInfIdx, 0, s.Seg3Len) + } else { + reveal MidSegWithInfo(nil, currInfIdx, s, none[io.AbsInfoField]) + } +} \ No newline at end of file diff --git a/pkg/slayers/path/scion/raw.go b/pkg/slayers/path/scion/raw.go index c2ace96b6..8490963f4 100644 --- a/pkg/slayers/path/scion/raw.go +++ b/pkg/slayers/path/scion/raw.go @@ -252,7 +252,7 @@ func (s *Raw) IncPath( /*@ ghost ubuf []byte @*/ ) (r error) { //@ oldSeg3Len := int(s.PathMeta.SegLen[2]) //@ oldSegLen := LengthOfCurrSeg(oldCurrHfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) //@ oldPrevSegLen := LengthOfPrevSeg(oldCurrHfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) - //@ oldOffset := HopFieldOffset(s.Base.NumINF, 0, 0) + //@ oldOffset := HopFieldOffset(s.Base.NumINF, oldPrevSegLen, 0) //@ fold acc(s.Base.Mem(), R56) if err := s.Base.IncPath(); err != nil { //@ fold s.NonInitMem() @@ -264,14 +264,13 @@ func (s *Raw) IncPath( /*@ ghost ubuf []byte @*/ ) (r error) { //@ sl.Reslice_Bytes(ubuf, MetaLen, len(ubuf), HalfPerm) //@ tail := ubuf[MetaLen:] //@ unfold acc(sl.Bytes(tail, 0, len(tail)), R50) - //@ oldoffsetWithHops := oldOffset + path.HopLen * oldPrevSegLen //@ oldHfIdxSeg := oldCurrHfIdx-oldPrevSegLen - //@ WidenCurrSeg(ubuf, oldoffsetWithHops + MetaLen, oldCurrInfIdx, oldHfIdxSeg, + //@ WidenCurrSeg(ubuf, oldOffset + MetaLen, oldCurrInfIdx, oldHfIdxSeg, //@ oldSegLen, MetaLen, MetaLen, len(ubuf)) //@ WidenLeftSeg(ubuf, oldCurrInfIdx + 1, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) //@ WidenMidSeg(ubuf, oldCurrInfIdx + 2, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) //@ WidenRightSeg(ubuf, oldCurrInfIdx - 1, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) - //@ LenCurrSeg(tail, oldoffsetWithHops, oldCurrInfIdx, oldHfIdxSeg, oldSegLen) + //@ LenCurrSeg(tail, oldOffset, oldCurrInfIdx, oldHfIdxSeg, oldSegLen) //@ oldAbsPkt := reveal s.absPkt(ubuf) //@ sl.SplitRange_Bytes(ubuf, 0, MetaLen, HalfPerm) //@ unfold acc(s.Base.Mem(), R2) @@ -295,8 +294,8 @@ func (s *Raw) IncPath( /*@ ghost ubuf []byte @*/ ) (r error) { //@ assert currHfIdx == oldCurrHfIdx + 1 //@ ghost if(currInfIdx == oldCurrInfIdx) { - //@ IncCurrSeg(tail, oldoffsetWithHops, oldCurrInfIdx, oldHfIdxSeg, oldSegLen) - //@ WidenCurrSeg(ubuf, oldoffsetWithHops + MetaLen, oldCurrInfIdx, oldHfIdxSeg + 1, + //@ IncCurrSeg(tail, oldOffset, oldCurrInfIdx, oldHfIdxSeg, oldSegLen) + //@ WidenCurrSeg(ubuf, oldOffset + MetaLen, oldCurrInfIdx, oldHfIdxSeg + 1, //@ oldSegLen, MetaLen, MetaLen, len(ubuf)) //@ WidenLeftSeg(ubuf, oldCurrInfIdx + 1, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) //@ WidenMidSeg(ubuf, oldCurrInfIdx + 2, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) @@ -305,7 +304,7 @@ func (s *Raw) IncPath( /*@ ghost ubuf []byte @*/ ) (r error) { //@ } else { //@ segLen := LengthOfCurrSeg(currHfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) //@ prevSegLen := LengthOfPrevSeg(currHfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) - //@ offsetWithHops := oldOffset + path.HopLen * prevSegLen + MetaLen + //@ offsetWithHops := HopFieldOffset(s.Base.NumINF, prevSegLen, MetaLen) //@ hfIdxSeg := currHfIdx-prevSegLen //@ XoverSegNotNone(tail, oldCurrInfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) //@ XoverCurrSeg(tail, oldCurrInfIdx + 1, oldCurrHfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) @@ -355,8 +354,10 @@ func (s *Raw) GetInfoField(idx int /*@, ghost ubuf []byte @*/) (ifield path.Info //@ sl.CombineRange_Bytes(ubuf, infOffset, infOffset+path.InfoLen, R21) //@ unfold acc(sl.Bytes(ubuf, 0, len(ubuf)), R56) //@ unfold acc(sl.Bytes(ubuf[infOffset : infOffset+path.InfoLen], 0, path.InfoLen), R56) - //@ assert info.ToIntermediateAbsInfoField() == - //@ path.BytesToIntermediateAbsInfoField(ubuf, 0, infOffset, len(ubuf)) + //@ assert reveal path.BytesToAbsInfoField(ubuf, infOffset) == + //@ reveal path.BytesToAbsInfoField(ubuf[infOffset : infOffset+path.InfoLen], 0) + //@ assert info.ToAbsInfoField() == + //@ reveal path.BytesToAbsInfoField(ubuf, infOffset) //@ fold acc(sl.Bytes(ubuf, 0, len(ubuf)), R56) //@ fold acc(sl.Bytes(ubuf[infOffset : infOffset+path.InfoLen], 0, path.InfoLen), R56) //@ sl.CombineRange_Bytes(ubuf, infOffset, infOffset+path.InfoLen, R21) @@ -397,18 +398,29 @@ func (s *Raw) GetCurrentInfoField( /*@ ghost ubuf []byte @*/ ) (res path.InfoFie // @ ensures sl.Bytes(ubuf, 0, len(ubuf)) // @ ensures r != nil ==> r.ErrorMem() // posts for IO: -// @ ensures r == nil && idx == int(old(s.GetCurrINF(ubuf))) ==> +// @ ensures r == nil ==> // @ validPktMetaHdr(ubuf) && s.EqAbsHeader(ubuf) // @ ensures r == nil && idx == int(old(s.GetCurrINF(ubuf))) ==> // @ let oldPkt := old(s.absPkt(ubuf)) in -// @ let newPkt := AbsSetInfoField(oldPkt, info.ToIntermediateAbsInfoField()) in +// @ let newPkt := oldPkt.UpdateInfoField(info.ToAbsInfoField()) in // @ s.absPkt(ubuf) == newPkt // @ decreases +// @ #backend[exhaleMode(1)] func (s *Raw) SetInfoField(info path.InfoField, idx int /*@, ghost ubuf []byte @*/) (r error) { //@ share info - //@ ghost oldCurrINF := int(old(s.GetCurrINF(ubuf))) + //@ reveal validPktMetaHdr(ubuf) //@ unfold acc(s.Mem(ubuf), R50) //@ unfold acc(s.Base.Mem(), R50) + //@ currInfIdx := int(s.PathMeta.CurrINF) + //@ currHfIdx := int(s.PathMeta.CurrHF) + //@ seg1Len := int(s.PathMeta.SegLen[0]) + //@ seg2Len := int(s.PathMeta.SegLen[1]) + //@ seg3Len := int(s.PathMeta.SegLen[2]) + //@ segLen := LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) + //@ prevSegLen := LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) + //@ offset := HopFieldOffset(s.Base.NumINF, prevSegLen, MetaLen) + //@ hopfieldOffset := MetaLen + s.NumINF*path.InfoLen + //@ segLens := io.CombineSegLens(seg1Len, seg2Len, seg3Len) if idx >= s.NumINF { err := serrors.New("InfoField index out of bounds", "max", s.NumINF-1, "actual", idx) //@ fold acc(s.Base.Mem(), R50) @@ -416,35 +428,41 @@ func (s *Raw) SetInfoField(info path.InfoField, idx int /*@, ghost ubuf []byte @ return err } infOffset := MetaLen + idx*path.InfoLen - //@ assert idx == oldCurrINF ==> reveal validPktMetaHdr(ubuf) - //@ assert idx == oldCurrINF ==> s.EqAbsHeader(ubuf) - - //@ sl.SplitRange_Bytes(ubuf, 0, len(s.Raw), HalfPerm) - //@ ValidPktMetaHdrSublice(ubuf, len(s.Raw)) - //@ sl.SplitRange_Bytes(ubuf, 0, len(s.Raw), HalfPerm) - //@ assert idx == oldCurrINF ==> RawBytesToBase(ubuf[:len(s.Raw)]).ValidCurrIdxsSpec() - //@ assert sl.Bytes(s.Raw, 0, len(s.Raw)) - //@ sl.SplitRange_Bytes(s.Raw, infOffset, infOffset+path.InfoLen, HalfPerm) - //@ assert acc(sl.Bytes(s.Raw, 0, infOffset), HalfPerm) - //@ sl.Reslice_Bytes(s.Raw, 0, infOffset, HalfPerm/2) - //@ ValidPktMetaHdrSublice(s.Raw, infOffset) - //@ sl.SplitRange_Bytes(s.Raw, infOffset, infOffset+path.InfoLen, HalfPerm) - //@ assert idx == oldCurrINF ==> RawBytesToBase(s.Raw[:infOffset]).ValidCurrIdxsSpec() + //@ SliceBytesIntoInfoFields(ubuf, s.NumINF, segLens, HalfPerm) + //@ SliceBytesIntoSegments(ubuf, segLens, R40) + //@ ValidPktMetaHdrSublice(ubuf, MetaLen) + //@ oldInfo := path.BytesToAbsInfoField(ubuf[infOffset : infOffset+path.InfoLen], 0) + //@ newInfo := info.ToAbsInfoField() + //@ hfIdxSeg := currHfIdx-prevSegLen + //@ hopfields := ubuf[offset:offset + segLen*path.HopLen] + //@ ghost if idx == currInfIdx { + //@ CurrSegEquality(ubuf, offset, currInfIdx, hfIdxSeg, segLen) + //@ LeftSegEquality(ubuf, currInfIdx+1, segLens) + //@ MidSegEquality(ubuf, currInfIdx+2, segLens) + //@ RightSegEquality(ubuf, currInfIdx-1, segLens) + //@ } + //@ reveal s.absPkt(ubuf) + //@ sl.SplitRange_Bytes(ubuf[:hopfieldOffset], infOffset, infOffset+path.InfoLen, R40) + //@ sl.SplitRange_Bytes(ubuf, infOffset, infOffset+path.InfoLen, HalfPerm-R40) ret := info.SerializeTo(s.Raw[infOffset : infOffset+path.InfoLen]) - //@ sl.CombineRange_Bytes(s.Raw, infOffset, infOffset+path.InfoLen, HalfPerm) - //@ sl.CombineRange_Bytes(ubuf, 0, len(s.Raw), HalfPerm) - //@ ValidPktMetaHdrSublice(ubuf, infOffset) - - //@ sl.Unslice_Bytes(s.Raw, 0, infOffset, HalfPerm/2) - //@ sl.CombineRange_Bytes(s.Raw, infOffset, infOffset+path.InfoLen, HalfPerm) - //@ assert idx == oldCurrINF ==> RawBytesToBase(ubuf).ValidCurrIdxsSpec() - //@ sl.CombineRange_Bytes(ubuf, 0, len(s.Raw), HalfPerm) + //@ sl.CombineRange_Bytes(ubuf[:hopfieldOffset], infOffset, infOffset+path.InfoLen, R40) + //@ sl.CombineRange_Bytes(ubuf, infOffset, infOffset+path.InfoLen, HalfPerm-R40) + //@ ValidPktMetaHdrSublice(ubuf, MetaLen) + //@ assert reveal validPktMetaHdr(ubuf) + //@ ghost if idx == currInfIdx { + //@ CurrSegEquality(ubuf, offset, currInfIdx, hfIdxSeg, segLen) + //@ UpdateCurrSegInfo(hopfields, hfIdxSeg, segLen, oldInfo, newInfo) + //@ LeftSegEquality(ubuf, currInfIdx+1, segLens) + //@ MidSegEquality(ubuf, currInfIdx+2, segLens) + //@ RightSegEquality(ubuf, currInfIdx-1, segLens) + //@ reveal s.absPkt(ubuf) + //@ } + //@ CombineBytesFromSegments(ubuf, segLens, R40) + //@ CombineBytesFromInfoFields(ubuf, s.NumINF, segLens, HalfPerm) //@ fold acc(s.Base.Mem(), R50) //@ fold acc(s.Mem(ubuf), R50) - //@ assert idx == oldCurrINF ==> reveal validPktMetaHdr(ubuf) - //@ TemporaryAssumeForIO(idx == oldCurrINF ==> s.absPkt(ubuf) == AbsSetInfoField(old(s.absPkt(ubuf)), info.ToIntermediateAbsInfoField())) return ret } diff --git a/pkg/slayers/path/scion/raw_spec.gobra b/pkg/slayers/path/scion/raw_spec.gobra index c17c5eb61..53914f8b5 100644 --- a/pkg/slayers/path/scion/raw_spec.gobra +++ b/pkg/slayers/path/scion/raw_spec.gobra @@ -452,7 +452,7 @@ pure func MidSeg( seg3Len int, headerOffset int) option[io.IO_seg3] { return let offset := HopFieldOffset(NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) in - (currInfIdx == 4 && seg2Len > 0) ? + (currInfIdx == 4 && seg2Len > 0 && seg3Len > 0) ? some(CurrSeg(raw, offset, 0, seg1Len, seg1Len, headerOffset)) : ((currInfIdx == 2 && seg2Len > 0 && seg3Len > 0) ? some(CurrSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currInfIdx, 0, seg3Len, headerOffset)) : @@ -475,17 +475,15 @@ pure func (s *Raw) absPkt(raw []byte) (res io.IO_pkt2) { let segLen := LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) in let prevSegLen := LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) in let numINF := NumInfoFields(seg1Len, seg2Len, seg3Len) in - let offset := HopFieldOffset(numINF, 0, MetaLen) in + let offset := HopFieldOffset(numINF, prevSegLen, MetaLen) in io.IO_Packet2 { - CurrSeg : CurrSeg(raw, offset + path.HopLen * prevSegLen, currInfIdx, currHfIdx-prevSegLen, segLen, MetaLen), + CurrSeg : CurrSeg(raw, offset, currInfIdx, currHfIdx-prevSegLen, segLen, MetaLen), LeftSeg : LeftSeg(raw, currInfIdx + 1, seg1Len, seg2Len , seg3Len, MetaLen), MidSeg : MidSeg(raw, currInfIdx + 2, seg1Len, seg2Len , seg3Len, MetaLen), RightSeg : RightSeg(raw, currInfIdx - 1, seg1Len, seg2Len , seg3Len, MetaLen), } } -// In the future, this should probably use AbsMetaHdr as -// the return type. ghost requires MetaLen <= len(raw) requires acc(sl.Bytes(raw, 0, len(raw)), R56) @@ -496,8 +494,6 @@ pure func RawBytesToMetaHdr(raw []byte) MetaHdr { DecodedFrom(hdr) } -// In the future, this should probably use AbsBase as -// the return type. ghost requires MetaLen <= len(raw) requires acc(sl.Bytes(raw, 0, len(raw)), R56) @@ -575,20 +571,6 @@ func (s *Raw) EstablishValidPktMetaHdr(ghost ub []byte) { fold acc(s.Mem(ub), R55) } -ghost -decreases -pure func AbsSetInfoField(oldPkt io.IO_pkt2, info path.IntermediateAbsInfoField) (newPkt io.IO_pkt2) { - return let newCurrSeg := io.IO_seg3_ { - info.AInfo, - info.UInfo, - info.ConsDir, - info.Peer, - oldPkt.CurrSeg.Past, - oldPkt.CurrSeg.Future, - oldPkt.CurrSeg.History} in - io.IO_Packet2{newCurrSeg, oldPkt.LeftSeg, oldPkt.MidSeg, oldPkt.RightSeg} -} - ghost requires oldPkt.LeftSeg != none[io.IO_seg2] requires len(oldPkt.CurrSeg.Future) > 0 @@ -649,8 +631,8 @@ pure func (s *Raw) CorrectlyDecodedInfWithIdx(ub []byte, idx int, info path.Info unfolding acc(s.Base.Mem(), _) in let infOffset := MetaLen + idx*path.InfoLen in infOffset+path.InfoLen <= len(ub) && - info.ToIntermediateAbsInfoField() == - path.BytesToIntermediateAbsInfoField(ub, 0, infOffset, len(ub)) + info.ToAbsInfoField() == + reveal path.BytesToAbsInfoField(ub, infOffset) } ghost @@ -664,8 +646,8 @@ pure func (s *Raw) CorrectlyDecodedInf(ub []byte, info path.InfoField) bool { unfolding acc(s.Base.Mem(), _) in let infOffset := MetaLen + int(s.Base.PathMeta.CurrINF)*path.InfoLen in infOffset+path.InfoLen <= len(ub) && - info.ToIntermediateAbsInfoField() == - path.BytesToIntermediateAbsInfoField(ub, 0, infOffset, len(ub)) + info.ToAbsInfoField() == + reveal path.BytesToAbsInfoField(ub, infOffset) } ghost @@ -716,9 +698,9 @@ func (s *Raw) LastHopLemma(ubuf []byte) { segLen := LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) prevSegLen := LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) numINF := NumInfoFields(seg1Len, seg2Len, seg3Len) - offset := HopFieldOffset(numINF, 0, MetaLen) + offset := HopFieldOffset(numINF, prevSegLen, MetaLen) pkt := reveal s.absPkt(ubuf) - assert pkt.CurrSeg == reveal CurrSeg(ubuf, offset + path.HopLen * prevSegLen, currInfIdx, currHfIdx-prevSegLen, segLen, MetaLen) + assert pkt.CurrSeg == reveal CurrSeg(ubuf, offset, currInfIdx, currHfIdx-prevSegLen, segLen, MetaLen) assert len(pkt.CurrSeg.Future) == 1 } @@ -742,9 +724,9 @@ func (s *Raw) XoverLemma(ubuf []byte) { segLen := LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) prevSegLen := LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) numINF := NumInfoFields(seg1Len, seg2Len, seg3Len) - offset := HopFieldOffset(numINF, 0, MetaLen) + offset := HopFieldOffset(numINF, prevSegLen, MetaLen) pkt := reveal s.absPkt(ubuf) - assert pkt.CurrSeg == reveal CurrSeg(ubuf, offset + path.HopLen * prevSegLen, currInfIdx, currHfIdx-prevSegLen, segLen, MetaLen) + assert pkt.CurrSeg == reveal CurrSeg(ubuf, offset, currInfIdx, currHfIdx-prevSegLen, segLen, MetaLen) assert pkt.LeftSeg == reveal LeftSeg(ubuf, currInfIdx + 1, seg1Len, seg2Len , seg3Len, MetaLen) assert len(pkt.CurrSeg.Future) == 1 assert pkt.LeftSeg != none[io.IO_seg2] @@ -762,7 +744,7 @@ pure func (s *Raw) EqAbsHopField(pkt io.IO_pkt2, hop io.IO_HF) bool { ghost opaque decreases -pure func (s *Raw) EqAbsInfoField(pkt io.IO_pkt2, info path.IntermediateAbsInfoField) bool { +pure func (s *Raw) EqAbsInfoField(pkt io.IO_pkt2, info io.AbsInfoField) bool { return let currseg := pkt.CurrSeg in info.AInfo == currseg.AInfo && info.UInfo == currseg.UInfo && @@ -780,7 +762,7 @@ preserves s.ValidCurrINF(ubuf) preserves s.ValidCurrHF(ubuf) preserves s.CorrectlyDecodedInf(ubuf, info) preserves s.CorrectlyDecodedHf(ubuf, hop) -ensures s.EqAbsInfoField(s.absPkt(ubuf), info.ToIntermediateAbsInfoField()) +ensures s.EqAbsInfoField(s.absPkt(ubuf), info.ToAbsInfoField()) ensures s.EqAbsHopField(s.absPkt(ubuf), hop.ToIO_HF()) decreases func (s *Raw) DecodingLemma(ubuf []byte, info path.InfoField, hop path.HopField) { @@ -794,19 +776,20 @@ func (s *Raw) DecodingLemma(ubuf []byte, info path.InfoField, hop path.HopField) segLen := LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) prevSegLen := LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) numINF := NumInfoFields(seg1Len, seg2Len, seg3Len) - offset := HopFieldOffset(numINF, 0, MetaLen) + offset := HopFieldOffset(numINF, prevSegLen, MetaLen) + hfIdxSeg := currHfIdx-prevSegLen reveal s.CorrectlyDecodedInf(ubuf, info) reveal s.CorrectlyDecodedHf(ubuf, hop) pkt := reveal s.absPkt(ubuf) - currseg := reveal CurrSeg(ubuf, offset + path.HopLen * prevSegLen, currInfIdx, currHfIdx-prevSegLen, segLen, MetaLen) - hopFields := hopFields(ubuf, offset + path.HopLen * prevSegLen, 0, segLen) - hopFieldsBytePositionsLemma(ubuf, offset + path.HopLen * prevSegLen, 0, segLen, R54) - reveal hopFieldsBytePositions(ubuf, offset + path.HopLen * prevSegLen, 0, segLen, hopFields) - assert currseg.Future[0] == hopFields[currHfIdx-prevSegLen] - assert hopFields[currHfIdx-prevSegLen] == - path.BytesToIO_HF(ubuf, 0, offset + path.HopLen * currHfIdx, len(ubuf)) - assert currseg.Future[0] == path.BytesToIO_HF(ubuf, 0, offset + path.HopLen * currHfIdx, len(ubuf)) - assert reveal s.EqAbsInfoField(s.absPkt(ubuf), info.ToIntermediateAbsInfoField()) + currseg := reveal CurrSeg(ubuf, offset, currInfIdx, hfIdxSeg, segLen, MetaLen) + hopFields := hopFields(ubuf, offset, 0, segLen) + hopFieldsBytePositionsLemma(ubuf, offset, 0, segLen, R54) + reveal hopFieldsBytePositions(ubuf, offset, 0, segLen, hopFields) + assert currseg.Future[0] == hopFields[hfIdxSeg] + assert hopFields[hfIdxSeg] == + path.BytesToIO_HF(ubuf, 0, offset + path.HopLen * hfIdxSeg, len(ubuf)) + assert currseg.Future[0] == path.BytesToIO_HF(ubuf, 0, offset + path.HopLen * hfIdxSeg, len(ubuf)) + assert reveal s.EqAbsInfoField(s.absPkt(ubuf), info.ToAbsInfoField()) assert reveal s.EqAbsHopField(s.absPkt(ubuf), hop.ToIO_HF()) } @@ -878,23 +861,23 @@ ensures let prevSegLen := LengthOfPrevSeg(currHfIdx+1, seg1Len, seg2Len, seg3Len) in let segLen := LengthOfCurrSeg(currHfIdx+1, seg1Len, seg2Len, seg3Len) in let numInf := NumInfoFields(seg1Len, seg2Len, seg3Len) in - let offset := HopFieldOffset(numInf, 0, 0) in - CurrSeg(raw, offset + path.HopLen * prevSegLen, currInfIdx, currHfIdx-prevSegLen+1, segLen, 0) == + let offset := HopFieldOffset(numInf, prevSegLen, 0) in + CurrSeg(raw, offset, currInfIdx, currHfIdx-prevSegLen+1, segLen, 0) == get(LeftSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0)) decreases func XoverCurrSeg(raw []byte, currInfIdx int, currHfIdx int, seg1Len int, seg2Len int, seg3Len int) { prevSegLen := LengthOfPrevSeg(currHfIdx+1, seg1Len, seg2Len, seg3Len) segLen := LengthOfCurrSeg(currHfIdx+1, seg1Len, seg2Len, seg3Len) numInf := NumInfoFields(seg1Len, seg2Len, seg3Len) - offset := HopFieldOffset(numInf, 0, 0) - currseg := reveal CurrSeg(raw, offset + path.HopLen * prevSegLen, currInfIdx, 0, segLen, 0) + offset := HopFieldOffset(numInf, prevSegLen, 0) + currseg := reveal CurrSeg(raw, offset, currInfIdx, 0, segLen, 0) leftseg := reveal LeftSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) assert currseg == get(leftseg) } ghost requires 0 < seg1Len -requires 0 <= seg2Len +requires 0 < seg2Len requires 0 <= seg3Len requires pktLen(seg1Len, seg2Len, seg3Len, 0) <= len(raw) requires 2 <= currInfIdx && currInfIdx < 4 @@ -910,10 +893,11 @@ func XoverLeftSeg(raw []byte, currInfIdx int, seg1Len int, seg2Len int, seg3Len ghost requires 0 < seg1Len -requires 0 <= seg2Len +requires 0 < seg2Len requires 0 <= seg3Len requires pktLen(seg1Len, seg2Len, seg3Len, 0) <= len(raw) requires -1 <= currInfIdx && currInfIdx < 1 +requires 0 == currInfIdx ==> 0 < seg3Len preserves acc(sl.Bytes(raw, 0, len(raw)), R56) ensures MidSeg(raw, currInfIdx+4, seg1Len, seg2Len, seg3Len, 0) == RightSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) @@ -939,8 +923,8 @@ ensures let prevSegLen := LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) in let segLen := LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) in let numInf := NumInfoFields(seg1Len, seg2Len, seg3Len) in - let offset := HopFieldOffset(numInf, 0, 0) in - let currseg := CurrSeg(raw, offset + path.HopLen * prevSegLen, currInfIdx, currHfIdx-prevSegLen, segLen, 0) in + let offset := HopFieldOffset(numInf, prevSegLen, 0) in + let currseg := CurrSeg(raw, offset, currInfIdx, currHfIdx-prevSegLen, segLen, 0) in len(currseg.Future) > 0 && get(RightSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0)) == absIncPathSeg(currseg) @@ -949,11 +933,11 @@ func XoverRightSeg(raw []byte, currInfIdx int, currHfIdx int, seg1Len int, seg2L prevSegLen := LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) segLen := LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) numInf := NumInfoFields(seg1Len, seg2Len, seg3Len) - offset := HopFieldOffset(numInf, 0, 0) - LenCurrSeg(raw, offset + path.HopLen * prevSegLen, currInfIdx, segLen - 1, segLen) - IncCurrSeg(raw, offset + path.HopLen * prevSegLen, currInfIdx, segLen - 1, segLen) - currseg := CurrSeg(raw, offset + path.HopLen * prevSegLen, currInfIdx, segLen - 1, segLen, 0) - nextseg := CurrSeg(raw, offset + path.HopLen * prevSegLen, currInfIdx, segLen, segLen, 0) + offset := HopFieldOffset(numInf, prevSegLen, 0) + LenCurrSeg(raw, offset, currInfIdx, segLen - 1, segLen) + IncCurrSeg(raw, offset, currInfIdx, segLen - 1, segLen) + currseg := CurrSeg(raw, offset, currInfIdx, segLen - 1, segLen, 0) + nextseg := CurrSeg(raw, offset, currInfIdx, segLen, segLen, 0) rightseg := reveal RightSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) assert absIncPathSeg(currseg) == nextseg assert nextseg == get(rightseg) diff --git a/router/dataplane.go b/router/dataplane.go index da01b5d9f..bab621d04 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -1981,7 +1981,7 @@ func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte @*/ ) (respr proce // @ absPktFutureLemma(ub) // @ p.path.DecodingLemma(ubPath, p.infoField, p.hopField) // @ assert reveal p.path.EqAbsInfoField(p.path.absPkt(ubPath), - // @ p.infoField.ToIntermediateAbsInfoField()) + // @ p.infoField.ToAbsInfoField()) // @ assert reveal p.path.EqAbsHopField(p.path.absPkt(ubPath), // @ p.hopField.ToIO_HF()) // @ assert reveal p.EqAbsHopField(absPkt(ub)) @@ -2527,7 +2527,7 @@ func (p *scionPacketProcessor) verifyCurrentMAC( /*@ ghost oldPkt io.IO_pkt2, gh // @ reveal p.EqAbsInfoField(oldPkt) // @ reveal p.EqAbsHopField(oldPkt) // (VerifiedSCION) Assumptions for Cryptography: - // @ absInf := p.infoField.ToIntermediateAbsInfoField() + // @ absInf := p.infoField.ToAbsInfoField() // @ absHF := p.hopField.ToIO_HF() // @ AssumeForIO(dp.hf_valid(absInf.ConsDir, absInf.AInfo, absInf.UInfo, absHF)) // @ reveal AbsVerifyCurrentMACConstraint(oldPkt, dp) diff --git a/router/io-spec-lemmas.gobra b/router/io-spec-lemmas.gobra index 60daa8c8c..01377e461 100644 --- a/router/io-spec-lemmas.gobra +++ b/router/io-spec-lemmas.gobra @@ -66,9 +66,9 @@ func absPktFutureLemma(raw []byte) { segLen := scion.LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) prevSegLen := scion.LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) numINF := scion.NumInfoFields(seg1Len, seg2Len, seg3Len) - offset := scion.HopFieldOffset(numINF, 0, headerOffsetWithMetaLen) + offset := scion.HopFieldOffset(numINF, prevSegLen, headerOffsetWithMetaLen) pkt := reveal absPkt(raw) - assert pkt.CurrSeg == reveal scion.CurrSeg(raw, offset + path.HopLen * prevSegLen, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen) + assert pkt.CurrSeg == reveal scion.CurrSeg(raw, offset, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen) assert len(pkt.CurrSeg.Future) > 0 } @@ -294,9 +294,9 @@ func (p* scionPacketProcessor) AbsPktToSubSliceAbsPkt(ub []byte, start int, end segLen := scion.LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) prevSegLen := scion.LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) numINF := scion.NumInfoFields(seg1Len, seg2Len, seg3Len) - offset := scion.HopFieldOffset(numINF, 0, headerOffsetWithMetaLen) + offset := scion.HopFieldOffset(numINF, prevSegLen, headerOffsetWithMetaLen) - scion.WidenCurrSeg(ub, offset + path.HopLen * prevSegLen, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen, start, end) + scion.WidenCurrSeg(ub, offset, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen, start, end) scion.WidenLeftSeg(ub, currInfIdx + 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) scion.WidenMidSeg(ub, currInfIdx + 2, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) scion.WidenRightSeg(ub, currInfIdx - 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) @@ -355,9 +355,9 @@ func (p* scionPacketProcessor) SubSliceAbsPktToAbsPkt(ub []byte, start int, end segLen := scion.LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) prevSegLen := scion.LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) numINF := scion.NumInfoFields(seg1Len, seg2Len, seg3Len) - offset := scion.HopFieldOffset(numINF, 0, headerOffsetWithMetaLen) + offset := scion.HopFieldOffset(numINF, prevSegLen, headerOffsetWithMetaLen) - scion.WidenCurrSeg(ub, offset + path.HopLen * prevSegLen, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen, start, end) + scion.WidenCurrSeg(ub, offset, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen, start, end) scion.WidenLeftSeg(ub, currInfIdx + 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) scion.WidenMidSeg(ub, currInfIdx + 2, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) scion.WidenRightSeg(ub, currInfIdx - 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) @@ -380,7 +380,7 @@ opaque requires acc(&p.infoField, R55) decreases pure func (p* scionPacketProcessor) EqAbsInfoField(pkt io.IO_pkt2) bool { - return let absInf := p.infoField.ToIntermediateAbsInfoField() in + return let absInf := p.infoField.ToAbsInfoField() in let currseg := pkt.CurrSeg in absInf.AInfo == currseg.AInfo && absInf.UInfo == currseg.UInfo && diff --git a/router/io-spec.gobra b/router/io-spec.gobra index fb37adf58..c891e8f2f 100644 --- a/router/io-spec.gobra +++ b/router/io-spec.gobra @@ -50,9 +50,9 @@ pure func absPkt(raw []byte) (res io.IO_pkt2) { let segLen := scion.LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) in let prevSegLen := scion.LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) in let numINF := scion.NumInfoFields(seg1Len, seg2Len, seg3Len) in - let offset := scion.HopFieldOffset(numINF, 0, headerOffsetWithMetaLen) in + let offset := scion.HopFieldOffset(numINF, prevSegLen, headerOffsetWithMetaLen) in io.IO_Packet2 { - CurrSeg : scion.CurrSeg(raw, offset + path.HopLen * prevSegLen, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen), + CurrSeg : scion.CurrSeg(raw, offset, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen), LeftSeg : scion.LeftSeg(raw, currInfIdx + 1, seg1Len, seg2Len , seg3Len, headerOffsetWithMetaLen), MidSeg : scion.MidSeg(raw, currInfIdx + 2, seg1Len, seg2Len , seg3Len, headerOffsetWithMetaLen), RightSeg : scion.RightSeg(raw, currInfIdx - 1, seg1Len, seg2Len , seg3Len, headerOffsetWithMetaLen), diff --git a/router/widen-lemma.gobra b/router/widen-lemma.gobra index 06ed67245..4abb48e83 100644 --- a/router/widen-lemma.gobra +++ b/router/widen-lemma.gobra @@ -139,9 +139,9 @@ func absPktWidenLemma(raw []byte, length int) { segLen := scion.LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) prevSegLen := scion.LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) numINF := scion.NumInfoFields(seg1Len, seg2Len, seg3Len) - offset := scion.HopFieldOffset(numINF, 0, headerOffsetWithMetaLen) + offset := scion.HopFieldOffset(numINF, prevSegLen, headerOffsetWithMetaLen) - scion.WidenCurrSeg(raw, offset + path.HopLen * prevSegLen, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen, 0, length) + scion.WidenCurrSeg(raw, offset, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen, 0, length) scion.WidenLeftSeg(raw, currInfIdx + 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, 0, length) scion.WidenMidSeg(raw, currInfIdx + 2, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, 0, length) scion.WidenRightSeg(raw, currInfIdx - 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, 0, length) diff --git a/verification/io/io-spec.gobra b/verification/io/io-spec.gobra index de71c2e17..c7e98c92d 100644 --- a/verification/io/io-spec.gobra +++ b/verification/io/io-spec.gobra @@ -51,46 +51,6 @@ requires CBio_IN_bio3s_enter(t, v) decreases pure func CBio_IN_bio3s_enter_T(t Place, v IO_val) Place -/*** Helper functions, not in Isabelle ***/ -// Establishes the traversed segment for packets which are not incremented (internal). -ghost -requires len(currseg.Future) > 0 -decreases -pure func establishGuardTraversedseg(currseg IO_seg3, direction bool) IO_seg3 { - return let uinfo := direction ? - upd_uinfo(currseg.UInfo, currseg.Future[0]) : - currseg.UInfo in - IO_seg3_ { - AInfo: currseg.AInfo, - UInfo: uinfo, - ConsDir: currseg.ConsDir, - Peer: currseg.Peer, - Past: currseg.Past, - Future: currseg.Future, - History: currseg.History, - } -} - -// Establishes the traversed segment for packets that are incremented (external). -ghost -requires len(currseg.Future) > 0 -decreases -pure func establishGuardTraversedsegInc(currseg IO_seg3, direction bool) IO_seg3 { - return let uinfo := direction ? - upd_uinfo(currseg.UInfo, currseg.Future[0]) : - currseg.UInfo in - IO_seg3_ { - AInfo: currseg.AInfo, - UInfo: uinfo, - ConsDir: currseg.ConsDir, - Peer: currseg.Peer, - Past: seq[IO_HF]{currseg.Future[0]} ++ currseg.Past, - Future: currseg.Future[1:], - History: seq[IO_ahi]{currseg.Future[0].Toab()} ++ currseg.History, - } -} -/*** End of helper functions, not in Isabelle ***/ - // This corresponds to the condition of the if statement in the io-spec case for enter ghost requires v.isIO_Internal_val1 diff --git a/verification/io/io_spec_definitions.gobra b/verification/io/io_spec_definitions.gobra new file mode 100644 index 000000000..71eb1b5ed --- /dev/null +++ b/verification/io/io_spec_definitions.gobra @@ -0,0 +1,127 @@ +// Copyright 2024 ETH Zurich +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +gobra + +package io + +/*** This file contains helpful definitions that do not have a counterpart in the Isabelle formalization. ***/ + +// Establishes the traversed segment for packets which are not incremented (internal). +ghost +requires len(currseg.Future) > 0 +decreases +pure func establishGuardTraversedseg(currseg IO_seg3, direction bool) IO_seg3 { + return let uinfo := direction ? + upd_uinfo(currseg.UInfo, currseg.Future[0]) : + currseg.UInfo in + IO_seg3_ { + AInfo: currseg.AInfo, + UInfo: uinfo, + ConsDir: currseg.ConsDir, + Peer: currseg.Peer, + Past: currseg.Past, + Future: currseg.Future, + History: currseg.History, + } +} + +// Establishes the traversed segment for packets that are incremented (external). +ghost +requires len(currseg.Future) > 0 +decreases +pure func establishGuardTraversedsegInc(currseg IO_seg3, direction bool) IO_seg3 { + return let uinfo := direction ? + upd_uinfo(currseg.UInfo, currseg.Future[0]) : + currseg.UInfo in + IO_seg3_ { + AInfo: currseg.AInfo, + UInfo: uinfo, + ConsDir: currseg.ConsDir, + Peer: currseg.Peer, + Past: seq[IO_HF]{currseg.Future[0]} ++ currseg.Past, + Future: currseg.Future[1:], + History: seq[IO_ahi]{currseg.Future[0].Toab()} ++ currseg.History, + } +} + +ghost +decreases +pure func (seg IO_seg3) UpdateCurrSeg( info AbsInfoField) IO_seg3 { + return IO_seg3_ { + info.AInfo, + info.UInfo, + info.ConsDir, + info.Peer, + seg.Past, + seg.Future, + seg.History, + } +} + +ghost +decreases +pure func (pkt IO_pkt2) UpdateInfoField(info AbsInfoField) IO_pkt2 { + return let newCurrSeg := pkt.CurrSeg.UpdateCurrSeg(info) in + IO_Packet2{newCurrSeg, pkt.LeftSeg, pkt.MidSeg, pkt.RightSeg} +} + +// This type simplifies the infoField, making it easier +// to use than the IO_seg3 from the IO-spec. +type AbsInfoField adt { + AbsInfoField_ { + AInfo IO_ainfo + UInfo set[IO_msgterm] + ConsDir bool + Peer bool + } +} + +// The segment lengths of a packet are frequently used together. +// This type combines them into a single structure to simplify +// their specification. +type SegLens adt { + SegLens_ { + Seg1Len int + Seg2Len int + Seg3Len int + } +} + +ghost +decreases +pure func (s SegLens) Valid() bool { + return s.Seg1Len > 0 && + s.Seg2Len >= 0 && + s.Seg3Len >= 0 +} + +ghost +requires seg1Len > 0 +requires seg2Len >= 0 +requires seg3Len >= 0 +decreases +pure func CombineSegLens(seg1Len int, seg2Len int, seg3Len int) SegLens { + return SegLens_ { + seg1Len, + seg2Len, + seg3Len, + } +} + +ghost +decreases +pure func (s SegLens) NumInfoFields() int { + return s.Seg3Len > 0 ? 3 : (s.Seg2Len > 0 ? 2 : (s.Seg1Len > 0 ? 1 : 0)) +} \ No newline at end of file From b10cb4cef813b5d0ab0979fb566a0149ba9eae3b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Thu, 13 Jun 2024 15:11:30 -0400 Subject: [PATCH 44/57] small clean-up (#355) --- pkg/experimental/epic/epic.go | 10 ++- pkg/slayers/scion.go | 26 ++++---- private/underlay/conn/conn.go | 12 ++-- router/dataplane.go | 114 ++++++++++++++++------------------ 4 files changed, 76 insertions(+), 86 deletions(-) diff --git a/pkg/experimental/epic/epic.go b/pkg/experimental/epic/epic.go index 6c8cb443e..a183f361d 100644 --- a/pkg/experimental/epic/epic.go +++ b/pkg/experimental/epic/epic.go @@ -48,13 +48,11 @@ const ( var zeroInitVector /*@@@*/ [16]byte -/*@ // ghost init -func init() { - fold acc(sl.Bytes(zeroInitVector[:], 0, len(zeroInitVector[:])), _) - fold acc(postInitInvariant(), _) -} -@*/ +// @ func init() { +// @ fold acc(sl.Bytes(zeroInitVector[:], 0, len(zeroInitVector[:])), _) +// @ fold acc(postInitInvariant(), _) +// @ } // CreateTimestamp returns the epic timestamp, which encodes the current time (now) relative to the // input timestamp. The input timestamp must not be in the future (compared to the current time), diff --git a/pkg/slayers/scion.go b/pkg/slayers/scion.go index 0676e3961..8d2085403 100644 --- a/pkg/slayers/scion.go +++ b/pkg/slayers/scion.go @@ -835,20 +835,18 @@ func packAddr(hostAddr net.Addr /*@ , ghost wildcard bool @*/) (addrtyp AddrType // @ ensures 0 <= res // @ decreases func (s *SCION) AddrHdrLen( /*@ ghost ubuf []byte, ghost insideSlayers bool @*/ ) (res int) { - /*@ - ghost if !insideSlayers { - unfold acc(s.Mem(ubuf), R51) - defer fold acc(s.Mem(ubuf), R51) - unfold acc(s.HeaderMem(ubuf[CmnHdrLen:]), R51) - defer fold acc(s.HeaderMem(ubuf[CmnHdrLen:]), R51) - assert s.AddrHdrLenSpec(ubuf) == ( - unfolding acc(s.Mem(ubuf), R52) in - unfolding acc(s.HeaderMem(ubuf[CmnHdrLen:]), R52) in - 2*addr.IABytes + s.DstAddrType.Length() + s.SrcAddrType.Length()) - assert s.AddrHdrLenSpec(ubuf) == - 2*addr.IABytes + s.DstAddrType.Length() + s.SrcAddrType.Length() - } - @*/ + // @ ghost if !insideSlayers { + // @ unfold acc(s.Mem(ubuf), R51) + // @ defer fold acc(s.Mem(ubuf), R51) + // @ unfold acc(s.HeaderMem(ubuf[CmnHdrLen:]), R51) + // @ defer fold acc(s.HeaderMem(ubuf[CmnHdrLen:]), R51) + // @ assert s.AddrHdrLenSpec(ubuf) == ( + // @ unfolding acc(s.Mem(ubuf), R52) in + // @ unfolding acc(s.HeaderMem(ubuf[CmnHdrLen:]), R52) in + // @ 2*addr.IABytes + s.DstAddrType.Length() + s.SrcAddrType.Length()) + // @ assert s.AddrHdrLenSpec(ubuf) == + // @ 2*addr.IABytes + s.DstAddrType.Length() + s.SrcAddrType.Length() + // @ } return 2*addr.IABytes + s.DstAddrType.Length() + s.SrcAddrType.Length() } diff --git a/private/underlay/conn/conn.go b/private/underlay/conn/conn.go index eab1a0a9c..ad9611602 100644 --- a/private/underlay/conn/conn.go +++ b/private/underlay/conn/conn.go @@ -125,13 +125,11 @@ func New(listen, remote *net.UDPAddr, cfg *Config) (res Conn, e error) { if listen == nil && remote == nil { panic("either listen or remote must be set") } - /*@ - assert remote != nil ==> a == remote - assert remote == nil ==> a == listen - unfold acc(a.Mem(), R15) - unfold acc(sl.Bytes(a.IP, 0, len(a.IP)), R15) - assert forall i int :: { &a.IP[i] } 0 <= i && i < len(a.IP) ==> acc(&a.IP[i], R15) - @*/ + // @ assert remote != nil ==> a == remote + // @ assert remote == nil ==> a == listen + // @ unfold acc(a.Mem(), R15) + // @ unfold acc(sl.Bytes(a.IP, 0, len(a.IP)), R15) + // @ assert forall i int :: { &a.IP[i] } 0 <= i && i < len(a.IP) ==> acc(&a.IP[i], R15) if a.IP.To4( /*@ false @*/ ) != nil { return newConnUDPIPv4(listen, remote, cfg) } diff --git a/router/dataplane.go b/router/dataplane.go index bab621d04..e156ec590 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -1487,39 +1487,37 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, // @ fold p.sInitD().validResult(processResult{}, false) return processResult{}, err /*@, false, io.IO_val_Unit{} @*/ } - /*@ - ghost var ub []byte - ghost var ubScionLayer []byte = p.rawPkt - ghost var ubHbhLayer []byte - ghost var ubE2eLayer []byte - - ghost llStart := 0 - ghost llEnd := 0 - ghost mustCombineRanges := lastLayerIdx != -1 && !offsets[lastLayerIdx].isNil - ghost var o offsetPair - ghost if lastLayerIdx == -1 { - ub = p.rawPkt - } else { - if offsets[lastLayerIdx].isNil { - ub = nil - sl.NilAcc_Bytes() - } else { - o = offsets[lastLayerIdx] - ub = p.rawPkt[o.start:o.end] - llStart = o.start - llEnd = o.end - sl.SplitRange_Bytes(p.rawPkt, o.start, o.end, HalfPerm) - } - } - hasHbhLayer := processed[0] - oHbh := offsets[0] - ubHbhLayer = hasHbhLayer && !oHbh.isNil ? p.rawPkt[oHbh.start:oHbh.end] : ([]byte)(nil) - hasE2eLayer := processed[1] - oE2e := offsets[1] - ubE2eLayer = hasE2eLayer && !oE2e.isNil ? p.rawPkt[oE2e.start:oE2e.end] : ([]byte)(nil) - assert processed[0] ==> p.hbhLayer.Mem(ubHbhLayer) - assert processed[1] ==> p.e2eLayer.Mem(ubE2eLayer) - @*/ + // @ ghost var ub []byte + // @ ghost var ubScionLayer []byte = p.rawPkt + // @ ghost var ubHbhLayer []byte + // @ ghost var ubE2eLayer []byte + + // @ ghost llStart := 0 + // @ ghost llEnd := 0 + // @ ghost mustCombineRanges := lastLayerIdx != -1 && !offsets[lastLayerIdx].isNil + // @ ghost var o offsetPair + // @ ghost if lastLayerIdx == -1 { + // @ ub = p.rawPkt + // @ } else { + // @ if offsets[lastLayerIdx].isNil { + // @ ub = nil + // @ sl.NilAcc_Bytes() + // @ } else { + // @ o = offsets[lastLayerIdx] + // @ ub = p.rawPkt[o.start:o.end] + // @ llStart = o.start + // @ llEnd = o.end + // @ sl.SplitRange_Bytes(p.rawPkt, o.start, o.end, HalfPerm) + // @ } + // @ } + // @ hasHbhLayer := processed[0] + // @ oHbh := offsets[0] + // @ ubHbhLayer = hasHbhLayer && !oHbh.isNil ? p.rawPkt[oHbh.start:oHbh.end] : ([]byte)(nil) + // @ hasE2eLayer := processed[1] + // @ oE2e := offsets[1] + // @ ubE2eLayer = hasE2eLayer && !oE2e.isNil ? p.rawPkt[oE2e.start:oE2e.end] : ([]byte)(nil) + // @ assert processed[0] ==> p.hbhLayer.Mem(ubHbhLayer) + // @ assert processed[1] ==> p.e2eLayer.Mem(ubE2eLayer) // @ assert acc(sl.Bytes(ub, 0, len(ub)), HalfPerm) pld /*@ , start, end @*/ := p.lastLayer.LayerPayload( /*@ ub @*/ ) // @ sl.SplitRange_Bytes(ub, start, end, HalfPerm) @@ -2921,19 +2919,18 @@ func (p *scionPacketProcessor) handleIngressRouterAlert( /*@ ghost ub []byte, gh // @ assert slayers.ValidPktMetaHdr(ub) // @ assert reveal p.LastHopLen(ub) // @ assert p.scionLayer.EqAbsHeader(ub) - /*@ - ghost var ubLL []byte - ghost if &p.scionLayer === p.lastLayer { - ubLL = ub - } else if llIsNil { - ubLL = nil - sl.NilAcc_Bytes() - } else { - ubLL = ub[startLL:endLL] - sl.SplitRange_Bytes(ub, startLL, endLL, R1) - ghost defer sl.CombineRange_Bytes(ub, startLL, endLL, R1) - } - @*/ + + // @ ghost var ubLL []byte + // @ ghost if &p.scionLayer === p.lastLayer { + // @ ubLL = ub + // @ } else if llIsNil { + // @ ubLL = nil + // @ sl.NilAcc_Bytes() + // @ } else { + // @ ubLL = ub[startLL:endLL] + // @ sl.SplitRange_Bytes(ub, startLL, endLL, R1) + // @ ghost defer sl.CombineRange_Bytes(ub, startLL, endLL, R1) + // @ } return p.handleSCMPTraceRouteRequest(p.ingressID /*@, ubLL @*/) } @@ -3027,19 +3024,18 @@ func (p *scionPacketProcessor) handleEgressRouterAlert( /*@ ghost ub []byte, gho // @ TemporaryAssumeForIO(p.EqAbsInfoField(absPkt(ub))) // @ sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) // @ TemporaryAssumeForIO(absPkt(ub) == old(absPkt(ub))) - /*@ - ghost var ubLL []byte - ghost if &p.scionLayer === p.lastLayer { - ubLL = ub - } else if llIsNil { - ubLL = nil - sl.NilAcc_Bytes() - } else { - ubLL = ub[startLL:endLL] - sl.SplitRange_Bytes(ub, startLL, endLL, R1) - ghost defer sl.CombineRange_Bytes(ub, startLL, endLL, R1) - } - @*/ + + // @ ghost var ubLL []byte + // @ ghost if &p.scionLayer === p.lastLayer { + // @ ubLL = ub + // @ } else if llIsNil { + // @ ubLL = nil + // @ sl.NilAcc_Bytes() + // @ } else { + // @ ubLL = ub[startLL:endLL] + // @ sl.SplitRange_Bytes(ub, startLL, endLL, R1) + // @ ghost defer sl.CombineRange_Bytes(ub, startLL, endLL, R1) + // @ } return p.handleSCMPTraceRouteRequest(egressID /*@, ubLL@*/) } From 89fd7b610f4b1c2840e3e979d7f3745ecb6d23d8 Mon Sep 17 00:00:00 2001 From: Markus Limbeck <92801626+mlimbeck@users.noreply.github.com> Date: Fri, 14 Jun 2024 17:22:44 +0200 Subject: [PATCH 45/57] Simplification of segLens (#356) * simplification of SegLens * remove preconditions on CombineSegLens * fix verification error * renaming --- .../path/scion/info_hop_setter_lemmas.gobra | 362 +++++++++--------- pkg/slayers/path/scion/raw.go | 43 ++- pkg/slayers/path/scion/raw_spec.gobra | 255 ++++++------ pkg/slayers/path/scion/widen-lemma.gobra | 92 ++--- pkg/slayers/scion_spec.gobra | 9 +- router/io-spec-lemmas.gobra | 33 +- router/io-spec.gobra | 13 +- router/widen-lemma.gobra | 13 +- verification/io/io_spec_definitions.gobra | 23 +- 9 files changed, 412 insertions(+), 431 deletions(-) diff --git a/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra b/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra index 2ec25d54a..3c00edd4a 100644 --- a/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra +++ b/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra @@ -54,15 +54,15 @@ pure func InfofieldByteSlice(raw []byte, currInfIdx int) ([]byte) { // concrete packets to abstract packets. This requires the special case that // currInfIdx == 4 returns the same as currInfIdx == 0. ghost -requires s.Valid() +requires segs.Valid() requires 0 <= currInfIdx decreases -pure func HopfieldsStartIdx(currInfIdx int, s io.SegLens) int { - return let numInf := s.NumInfoFields() in +pure func HopfieldsStartIdx(currInfIdx int, segs io.SegLens) int { + return let numInf := segs.NumInfoFields() in let infOffset := path.InfoFieldOffset(numInf, MetaLen) in (currInfIdx == 0 || currInfIdx == 4) ? infOffset : - currInfIdx == 1 ? infOffset+s.Seg1Len*path.HopLen : - infOffset+(s.Seg1Len+s.Seg2Len)*path.HopLen + currInfIdx == 1 ? infOffset+segs.Seg1Len*path.HopLen : + infOffset+(segs.Seg1Len+segs.Seg2Len)*path.HopLen } // HopfieldsStartIdx returns index of the last byte of the hopfields of a segment @@ -71,15 +71,15 @@ pure func HopfieldsStartIdx(currInfIdx int, s io.SegLens) int { // concrete packets to abstract packets. This requires the special case that // currInfIdx == 4 returns the same as currInfIdx == 0. ghost -requires s.Valid() +requires segs.Valid() requires 0 <= currInfIdx decreases -pure func HopfieldsEndIdx(currInfIdx int, s io.SegLens) int { - return let numInf := s.NumInfoFields() in +pure func HopfieldsEndIdx(currInfIdx int, segs io.SegLens) int { + return let numInf := segs.NumInfoFields() in let infOffset := path.InfoFieldOffset(numInf, MetaLen) in - (currInfIdx == 0 || currInfIdx == 4) ? infOffset+s.Seg1Len*path.HopLen : - currInfIdx == 1 ? infOffset+(s.Seg1Len+s.Seg2Len)*path.HopLen : - infOffset+(s.Seg1Len+s.Seg2Len+s.Seg3Len)*path.HopLen + (currInfIdx == 0 || currInfIdx == 4) ? infOffset+segs.Seg1Len*path.HopLen : + currInfIdx == 1 ? infOffset+(segs.Seg1Len+segs.Seg2Len)*path.HopLen : + infOffset+(segs.Seg1Len+segs.Seg2Len+segs.Seg3Len)*path.HopLen } // HopfieldsStartIdx returns returns the byte slice of the hopfields of a segment @@ -88,80 +88,80 @@ pure func HopfieldsEndIdx(currInfIdx int, s io.SegLens) int { // concrete packets to abstract packets. This requires the special case that // currInfIdx == 4 returns the same as currInfIdx == 0. ghost -requires s.Valid() +requires segs.Valid() requires 0 <= currInfIdx -requires pktLen(s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) <= len(raw) +requires PktLen(segs, MetaLen) <= len(raw) decreases -pure func HopfieldsByteSlice(raw []byte, currInfIdx int, s io.SegLens) ([]byte) { - return let numInf := s.NumInfoFields() in +pure func HopfieldsByteSlice(raw []byte, currInfIdx int, segs io.SegLens) ([]byte) { + return let numInf := segs.NumInfoFields() in let infOffset := path.InfoFieldOffset(numInf, MetaLen) in - let start := HopfieldsStartIdx(currInfIdx, s) in - let end := HopfieldsEndIdx(currInfIdx, s) in + let start := HopfieldsStartIdx(currInfIdx, segs) in + let end := HopfieldsEndIdx(currInfIdx, segs) in raw[start:end] } // SliceBytesIntoSegments splits the raw bytes of a packet into its hopfield segments ghost requires 0 < p -requires s.Valid() -requires pktLen(s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) <= len(raw) +requires segs.Valid() +requires PktLen(segs, MetaLen) <= len(raw) requires acc(sl.Bytes(raw, 0, len(raw)), p) -ensures acc(sl.Bytes(raw[:HopfieldsStartIdx(0, s)], 0, HopfieldsStartIdx(0, s)), p) -ensures acc(sl.Bytes(HopfieldsByteSlice(raw, 0, s), 0, s.Seg1Len*path.HopLen), p) -ensures acc(sl.Bytes(HopfieldsByteSlice(raw, 1, s), 0, s.Seg2Len*path.HopLen), p) -ensures acc(sl.Bytes(HopfieldsByteSlice(raw, 2, s), 0, s.Seg3Len*path.HopLen), p) -ensures acc(sl.Bytes(raw[HopfieldsEndIdx(2, s):], 0, len(raw[HopfieldsEndIdx(2, s):])), p) +ensures acc(sl.Bytes(raw[:HopfieldsStartIdx(0, segs)], 0, HopfieldsStartIdx(0, segs)), p) +ensures acc(sl.Bytes(HopfieldsByteSlice(raw, 0, segs), 0, segs.Seg1Len*path.HopLen), p) +ensures acc(sl.Bytes(HopfieldsByteSlice(raw, 1, segs), 0, segs.Seg2Len*path.HopLen), p) +ensures acc(sl.Bytes(HopfieldsByteSlice(raw, 2, segs), 0, segs.Seg3Len*path.HopLen), p) +ensures acc(sl.Bytes(raw[HopfieldsEndIdx(2, segs):], 0, len(raw[HopfieldsEndIdx(2, segs):])), p) decreases -func SliceBytesIntoSegments(raw []byte, s io.SegLens, p perm) { - sl.SplitByIndex_Bytes(raw, 0, len(raw), HopfieldsStartIdx(0, s), p) - sl.SplitByIndex_Bytes(raw, HopfieldsStartIdx(0, s), len(raw), HopfieldsEndIdx(0, s), p) - sl.SplitByIndex_Bytes(raw, HopfieldsStartIdx(1, s), len(raw), HopfieldsEndIdx(1, s), p) - sl.SplitByIndex_Bytes(raw, HopfieldsStartIdx(2, s), len(raw), HopfieldsEndIdx(2, s), p) - sl.Reslice_Bytes(raw, 0, HopfieldsStartIdx(0, s), p) - sl.Reslice_Bytes(raw, HopfieldsStartIdx(0, s), HopfieldsEndIdx(0, s), p) - sl.Reslice_Bytes(raw, HopfieldsStartIdx(1, s), HopfieldsEndIdx(1, s), p) - sl.Reslice_Bytes(raw, HopfieldsStartIdx(2, s), HopfieldsEndIdx(2, s), p) - sl.Reslice_Bytes(raw, HopfieldsEndIdx(2, s), len(raw), p) +func SliceBytesIntoSegments(raw []byte, segs io.SegLens, p perm) { + sl.SplitByIndex_Bytes(raw, 0, len(raw), HopfieldsStartIdx(0, segs), p) + sl.SplitByIndex_Bytes(raw, HopfieldsStartIdx(0, segs), len(raw), HopfieldsEndIdx(0, segs), p) + sl.SplitByIndex_Bytes(raw, HopfieldsStartIdx(1, segs), len(raw), HopfieldsEndIdx(1, segs), p) + sl.SplitByIndex_Bytes(raw, HopfieldsStartIdx(2, segs), len(raw), HopfieldsEndIdx(2, segs), p) + sl.Reslice_Bytes(raw, 0, HopfieldsStartIdx(0, segs), p) + sl.Reslice_Bytes(raw, HopfieldsStartIdx(0, segs), HopfieldsEndIdx(0, segs), p) + sl.Reslice_Bytes(raw, HopfieldsStartIdx(1, segs), HopfieldsEndIdx(1, segs), p) + sl.Reslice_Bytes(raw, HopfieldsStartIdx(2, segs), HopfieldsEndIdx(2, segs), p) + sl.Reslice_Bytes(raw, HopfieldsEndIdx(2, segs), len(raw), p) } // CombineBytesFromSegments combines the three hopfield segments of a packet into a single slice of bytes. ghost requires 0 < p -requires s.Valid() -requires pktLen(s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) <= len(raw) -requires acc(sl.Bytes(raw[:HopfieldsStartIdx(0, s)], 0, HopfieldsStartIdx(0, s)), p) -requires acc(sl.Bytes(HopfieldsByteSlice(raw, 0, s), 0, s.Seg1Len*path.HopLen), p) -requires acc(sl.Bytes(HopfieldsByteSlice(raw, 1, s), 0, s.Seg2Len*path.HopLen), p) -requires acc(sl.Bytes(HopfieldsByteSlice(raw, 2, s), 0, s.Seg3Len*path.HopLen), p) -requires acc(sl.Bytes(raw[HopfieldsEndIdx(2, s):], 0, len(raw[HopfieldsEndIdx(2, s):])), p) +requires segs.Valid() +requires PktLen(segs, MetaLen) <= len(raw) +requires acc(sl.Bytes(raw[:HopfieldsStartIdx(0, segs)], 0, HopfieldsStartIdx(0, segs)), p) +requires acc(sl.Bytes(HopfieldsByteSlice(raw, 0, segs), 0, segs.Seg1Len*path.HopLen), p) +requires acc(sl.Bytes(HopfieldsByteSlice(raw, 1, segs), 0, segs.Seg2Len*path.HopLen), p) +requires acc(sl.Bytes(HopfieldsByteSlice(raw, 2, segs), 0, segs.Seg3Len*path.HopLen), p) +requires acc(sl.Bytes(raw[HopfieldsEndIdx(2, segs):], 0, len(raw[HopfieldsEndIdx(2, segs):])), p) ensures acc(sl.Bytes(raw, 0, len(raw)), p) decreases -func CombineBytesFromSegments(raw []byte, s io.SegLens, p perm) { - sl.Unslice_Bytes(raw, HopfieldsEndIdx(2, s), len(raw), p) - sl.Unslice_Bytes(raw, HopfieldsStartIdx(2, s), HopfieldsEndIdx(2, s), p) - sl.Unslice_Bytes(raw, HopfieldsStartIdx(1, s), HopfieldsEndIdx(1, s), p) - sl.Unslice_Bytes(raw, HopfieldsStartIdx(0, s), HopfieldsEndIdx(0, s), p) - sl.Unslice_Bytes(raw, 0, HopfieldsStartIdx(0, s), p) - sl.CombineAtIndex_Bytes(raw, HopfieldsStartIdx(2, s), len(raw), HopfieldsEndIdx(2, s), p) - sl.CombineAtIndex_Bytes(raw, HopfieldsStartIdx(1, s), len(raw), HopfieldsEndIdx(1, s), p) - sl.CombineAtIndex_Bytes(raw, HopfieldsStartIdx(0, s), len(raw), HopfieldsEndIdx(0, s), p) - sl.CombineAtIndex_Bytes(raw, 0, len(raw), HopfieldsStartIdx(0, s), p) +func CombineBytesFromSegments(raw []byte, segs io.SegLens, p perm) { + sl.Unslice_Bytes(raw, HopfieldsEndIdx(2, segs), len(raw), p) + sl.Unslice_Bytes(raw, HopfieldsStartIdx(2, segs), HopfieldsEndIdx(2, segs), p) + sl.Unslice_Bytes(raw, HopfieldsStartIdx(1, segs), HopfieldsEndIdx(1, segs), p) + sl.Unslice_Bytes(raw, HopfieldsStartIdx(0, segs), HopfieldsEndIdx(0, segs), p) + sl.Unslice_Bytes(raw, 0, HopfieldsStartIdx(0, segs), p) + sl.CombineAtIndex_Bytes(raw, HopfieldsStartIdx(2, segs), len(raw), HopfieldsEndIdx(2, segs), p) + sl.CombineAtIndex_Bytes(raw, HopfieldsStartIdx(1, segs), len(raw), HopfieldsEndIdx(1, segs), p) + sl.CombineAtIndex_Bytes(raw, HopfieldsStartIdx(0, segs), len(raw), HopfieldsEndIdx(0, segs), p) + sl.CombineAtIndex_Bytes(raw, 0, len(raw), HopfieldsStartIdx(0, segs), p) } // SliceBytesIntoInfoFields splits the raw bytes of a packet into its infofields ghost requires 0 < p -requires s.Valid() -requires pktLen(s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) <= len(raw) -requires numInf == s.NumInfoFields() +requires segs.Valid() +requires PktLen(segs, MetaLen) <= len(raw) +requires numInf == segs.NumInfoFields() requires acc(sl.Bytes(raw, 0, len(raw)), p) ensures acc(sl.Bytes(raw[:MetaLen], 0, MetaLen), p) ensures acc(sl.Bytes(InfofieldByteSlice(raw, 0), 0, path.InfoLen), p) ensures 1 < numInf ==> acc(sl.Bytes(InfofieldByteSlice(raw, 1), 0, path.InfoLen), p) ensures 2 < numInf ==> acc(sl.Bytes(InfofieldByteSlice(raw, 2), 0, path.InfoLen), p) -ensures acc(sl.Bytes(raw[HopfieldsStartIdx(0, s):], 0, len(raw[HopfieldsStartIdx(0, s):])), p) +ensures acc(sl.Bytes(raw[HopfieldsStartIdx(0, segs):], 0, len(raw[HopfieldsStartIdx(0, segs):])), p) decreases -func SliceBytesIntoInfoFields(raw []byte, numInf int, s io.SegLens, p perm) { +func SliceBytesIntoInfoFields(raw []byte, numInf int, segs io.SegLens, p perm) { sl.SplitByIndex_Bytes(raw, 0, len(raw), MetaLen, p) sl.SplitByIndex_Bytes(raw, MetaLen, len(raw), path.InfoFieldOffset(1, MetaLen), p) sl.Reslice_Bytes(raw, 0, MetaLen, p) @@ -174,31 +174,31 @@ func SliceBytesIntoInfoFields(raw []byte, numInf int, s io.SegLens, p perm) { } if(numInf > 2) { sl.SplitByIndex_Bytes(raw, path.InfoFieldOffset(2, MetaLen), len(raw), - HopfieldsStartIdx(0, s), p) - sl.Reslice_Bytes(raw, path.InfoFieldOffset(2, MetaLen), HopfieldsStartIdx(0, s), p) + HopfieldsStartIdx(0, segs), p) + sl.Reslice_Bytes(raw, path.InfoFieldOffset(2, MetaLen), HopfieldsStartIdx(0, segs), p) } - sl.Reslice_Bytes(raw, HopfieldsStartIdx(0, s), len(raw), p) + sl.Reslice_Bytes(raw, HopfieldsStartIdx(0, segs), len(raw), p) } // CombineBytesFromInfoFields combines the infofields of a packet into a single slice of bytes. ghost requires 0 < p -requires s.Valid() -requires pktLen(s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) <= len(raw) -requires numInf == s.NumInfoFields() +requires segs.Valid() +requires PktLen(segs, MetaLen) <= len(raw) +requires numInf == segs.NumInfoFields() requires acc(sl.Bytes(raw[:MetaLen], 0, MetaLen), p) requires acc(sl.Bytes(InfofieldByteSlice(raw, 0), 0, path.InfoLen), p) requires 1 < numInf ==> acc(sl.Bytes(InfofieldByteSlice(raw, 1), 0, path.InfoLen), p) requires 2 < numInf ==> acc(sl.Bytes(InfofieldByteSlice(raw, 2), 0, path.InfoLen), p) -requires acc(sl.Bytes(raw[HopfieldsStartIdx(0, s):], 0, len(raw[HopfieldsStartIdx(0, s):])), p) +requires acc(sl.Bytes(raw[HopfieldsStartIdx(0, segs):], 0, len(raw[HopfieldsStartIdx(0, segs):])), p) ensures acc(sl.Bytes(raw, 0, len(raw)), p) decreases -func CombineBytesFromInfoFields(raw []byte, numInf int, s io.SegLens, p perm) { - sl.Unslice_Bytes(raw, HopfieldsStartIdx(0, s), len(raw), p) +func CombineBytesFromInfoFields(raw []byte, numInf int, segs io.SegLens, p perm) { + sl.Unslice_Bytes(raw, HopfieldsStartIdx(0, segs), len(raw), p) if(numInf > 2) { - sl.Unslice_Bytes(raw, path.InfoFieldOffset(2, MetaLen), HopfieldsStartIdx(0, s), p) + sl.Unslice_Bytes(raw, path.InfoFieldOffset(2, MetaLen), HopfieldsStartIdx(0, segs), p) sl.CombineAtIndex_Bytes(raw, path.InfoFieldOffset(2, MetaLen), len(raw), - HopfieldsStartIdx(0, s), p) + HopfieldsStartIdx(0, segs), p) } if(numInf > 1) { sl.Unslice_Bytes(raw, path.InfoFieldOffset(1, MetaLen), @@ -234,11 +234,11 @@ pure func CurrSegWithInfo(hopfields []byte, currHfIdx int, SegLen int, inf io.Ab // requires permissions to the hopfields byte slice of the segment specified by currInfIdx. ghost opaque -requires s.Valid() -requires (currInfIdx == 1 && s.Seg2Len > 0) || - (currInfIdx == 2 && s.Seg2Len > 0 && s.Seg3Len > 0) ==> - let start := HopfieldsStartIdx(currInfIdx, s) in - let end := HopfieldsEndIdx(currInfIdx, s) in +requires segs.Valid() +requires (currInfIdx == 1 && segs.Seg2Len > 0) || + (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ==> + let start := HopfieldsStartIdx(currInfIdx, segs) in + let end := HopfieldsEndIdx(currInfIdx, segs) in inf != none[io.AbsInfoField] && len(hopfields) == end-start && acc(sl.Bytes(hopfields, 0, len(hopfields)), R49) @@ -246,12 +246,12 @@ decreases pure func LeftSegWithInfo( hopfields []byte, currInfIdx int, - s io.SegLens, + segs io.SegLens, inf option[io.AbsInfoField]) option[io.IO_seg3] { - return (currInfIdx == 1 && s.Seg2Len > 0) ? - some(CurrSegWithInfo(hopfields, 0, s.Seg2Len, get(inf))) : - (currInfIdx == 2 && s.Seg2Len > 0 && s.Seg3Len > 0) ? - some(CurrSegWithInfo(hopfields, 0, s.Seg3Len, get(inf))) : + return (currInfIdx == 1 && segs.Seg2Len > 0) ? + some(CurrSegWithInfo(hopfields, 0, segs.Seg2Len, get(inf))) : + (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? + some(CurrSegWithInfo(hopfields, 0, segs.Seg3Len, get(inf))) : none[io.IO_seg3] } @@ -261,11 +261,11 @@ pure func LeftSegWithInfo( // requires permissions to the hopfields byte slice of the segment specified by currInfIdx. ghost opaque -requires s.Valid() -requires (currInfIdx == 0 && s.Seg2Len > 0) || - (currInfIdx == 1 && s.Seg2Len > 0 && s.Seg3Len > 0) ==> - let start := HopfieldsStartIdx(currInfIdx, s) in - let end := HopfieldsEndIdx(currInfIdx, s) in +requires segs.Valid() +requires (currInfIdx == 0 && segs.Seg2Len > 0) || + (currInfIdx == 1 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ==> + let start := HopfieldsStartIdx(currInfIdx, segs) in + let end := HopfieldsEndIdx(currInfIdx, segs) in inf != none[io.AbsInfoField] && len(hopfields) == end-start && acc(sl.Bytes(hopfields, 0, len(hopfields)), R49) @@ -273,12 +273,12 @@ decreases pure func RightSegWithInfo( hopfields []byte, currInfIdx int, - s io.SegLens, + segs io.SegLens, inf option[io.AbsInfoField]) option[io.IO_seg3] { - return (currInfIdx == 1 && s.Seg2Len > 0 && s.Seg3Len > 0) ? - some(CurrSegWithInfo(hopfields, s.Seg2Len, s.Seg2Len, get(inf))) : - (currInfIdx == 0 && s.Seg2Len > 0) ? - some(CurrSegWithInfo(hopfields, s.Seg1Len, s.Seg1Len, get(inf))) : + return (currInfIdx == 1 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? + some(CurrSegWithInfo(hopfields, segs.Seg2Len, segs.Seg2Len, get(inf))) : + (currInfIdx == 0 && segs.Seg2Len > 0) ? + some(CurrSegWithInfo(hopfields, segs.Seg1Len, segs.Seg1Len, get(inf))) : none[io.IO_seg3] } @@ -288,11 +288,11 @@ pure func RightSegWithInfo( // requires permissions to the hopfields byte slice of the segment specified by currInfIdx. ghost opaque -requires s.Valid() -requires (s.Seg2Len > 0 && s.Seg3Len > 0 && +requires segs.Valid() +requires (segs.Seg2Len > 0 && segs.Seg3Len > 0 && (currInfIdx == 2 || currInfIdx == 4)) ==> - let start := HopfieldsStartIdx(currInfIdx, s) in - let end := HopfieldsEndIdx(currInfIdx, s) in + let start := HopfieldsStartIdx(currInfIdx, segs) in + let end := HopfieldsEndIdx(currInfIdx, segs) in inf != none[io.AbsInfoField] && len(hopfields) == end-start && acc(sl.Bytes(hopfields, 0, len(hopfields)), R49) @@ -300,12 +300,12 @@ decreases pure func MidSegWithInfo( hopfields []byte, currInfIdx int, - s io.SegLens, + segs io.SegLens, inf option[io.AbsInfoField]) option[io.IO_seg3] { - return (currInfIdx == 4 && s.Seg2Len > 0 && s.Seg3Len > 0) ? - some(CurrSegWithInfo(hopfields, s.Seg1Len, s.Seg1Len, get(inf))) : - (currInfIdx == 2 && s.Seg2Len > 0 && s.Seg3Len > 0) ? - some(CurrSegWithInfo(hopfields, 0, s.Seg3Len, get(inf))) : + return (currInfIdx == 4 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? + some(CurrSegWithInfo(hopfields, segs.Seg1Len, segs.Seg1Len, get(inf))) : + (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? + some(CurrSegWithInfo(hopfields, 0, segs.Seg3Len, get(inf))) : none[io.IO_seg3] } @@ -360,27 +360,27 @@ func UpdateCurrSegInfo(raw []byte, currHfIdx int, SegLen int, // LeftSegEqualitySpec defines the conditions that must hold for LeftSegWithInfo(..) // and LeftSeg(..) to represent the same abstract segment. ghost -requires s.Valid() -requires pktLen(s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) <= len(raw) +requires segs.Valid() +requires PktLen(segs, MetaLen) <= len(raw) requires 1 <= currInfIdx && currInfIdx < 4 requires acc(sl.Bytes(raw, 0, len(raw)), R49) -requires (currInfIdx == 1 && s.Seg2Len > 0) || - (currInfIdx == 2 && s.Seg2Len > 0 && s.Seg3Len > 0) ==> +requires (currInfIdx == 1 && segs.Seg2Len > 0) || + (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ==> let infoBytes := InfofieldByteSlice(raw, currInfIdx) in - let hopBytes := HopfieldsByteSlice(raw, currInfIdx, s) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) decreases -pure func LeftSegEqualitySpec(raw []byte, currInfIdx int, s io.SegLens) bool { - return (currInfIdx == 1 && s.Seg2Len > 0) || - (currInfIdx == 2 && s.Seg2Len > 0 && s.Seg3Len > 0) ? +pure func LeftSegEqualitySpec(raw []byte, currInfIdx int, segs io.SegLens) bool { + return (currInfIdx == 1 && segs.Seg2Len > 0) || + (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? let infoBytes := InfofieldByteSlice(raw, currInfIdx) in - let hopBytes := HopfieldsByteSlice(raw, currInfIdx, s) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in let inf := some(path.BytesToAbsInfoField(infoBytes, 0)) in - LeftSeg(raw, currInfIdx, s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) == - LeftSegWithInfo(hopBytes, currInfIdx, s, inf) : - LeftSeg(raw, currInfIdx, s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) == - LeftSegWithInfo(nil, currInfIdx, s, none[io.AbsInfoField]) + LeftSeg(raw, currInfIdx, segs, MetaLen) == + LeftSegWithInfo(hopBytes, currInfIdx, segs, inf) : + LeftSeg(raw, currInfIdx, segs, MetaLen) == + LeftSegWithInfo(nil, currInfIdx, segs, none[io.AbsInfoField]) } // LeftSegEquality ensures that the two definitions of abstract segments, LeftSegWithInfo(..) @@ -390,58 +390,58 @@ pure func LeftSegEqualitySpec(raw []byte, currInfIdx int, s io.SegLens) bool { // complex preconditions and postconditions because, for every currInfIdx, we need an offset for // its infofield and one for its hopfields. ghost -requires s.Valid() -requires pktLen(s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) <= len(raw) +requires segs.Valid() +requires PktLen(segs, MetaLen) <= len(raw) requires 1 <= currInfIdx && currInfIdx < 4 preserves acc(sl.Bytes(raw, 0, len(raw)), R49) -preserves (currInfIdx == 1 && s.Seg2Len > 0) || - (currInfIdx == 2 && s.Seg2Len > 0 && s.Seg3Len > 0) ==> +preserves (currInfIdx == 1 && segs.Seg2Len > 0) || + (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ==> let infoBytes := InfofieldByteSlice(raw, currInfIdx) in - let hopBytes := HopfieldsByteSlice(raw, currInfIdx, s) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) -ensures LeftSegEqualitySpec(raw, currInfIdx, s) +ensures LeftSegEqualitySpec(raw, currInfIdx, segs) decreases -func LeftSegEquality(raw []byte, currInfIdx int, s io.SegLens) { - reveal LeftSeg(raw, currInfIdx, s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) - if ((currInfIdx == 1 && s.Seg2Len > 0) || - (currInfIdx == 2 && s.Seg2Len > 0 && s.Seg3Len > 0)) { +func LeftSegEquality(raw []byte, currInfIdx int, segs io.SegLens) { + reveal LeftSeg(raw, currInfIdx, segs, MetaLen) + if ((currInfIdx == 1 && segs.Seg2Len > 0) || + (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0)) { infoBytes := InfofieldByteSlice(raw, currInfIdx) - hopBytes := HopfieldsByteSlice(raw, currInfIdx, s) + hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) inf := some(reveal path.BytesToAbsInfoField(infoBytes, 0)) - offset := HopfieldsStartIdx(currInfIdx, s) - segLen := currInfIdx == 1 ? s.Seg2Len : s.Seg3Len - reveal LeftSegWithInfo(hopBytes, currInfIdx, s, inf) + offset := HopfieldsStartIdx(currInfIdx, segs) + segLen := currInfIdx == 1 ? segs.Seg2Len : segs.Seg3Len + reveal LeftSegWithInfo(hopBytes, currInfIdx, segs, inf) CurrSegEquality(raw, offset, currInfIdx, 0, segLen) } else { - reveal LeftSegWithInfo(nil, currInfIdx, s, none[io.AbsInfoField]) + reveal LeftSegWithInfo(nil, currInfIdx, segs, none[io.AbsInfoField]) } } // RightSegEqualitySpec defines the conditions that must hold for RightSegWithInfo(..) // and RightSeg(..) to represent the same abstract segment. ghost -requires s.Valid() -requires pktLen(s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) <= len(raw) +requires segs.Valid() +requires PktLen(segs, MetaLen) <= len(raw) requires -1 <= currInfIdx && currInfIdx < 2 requires acc(sl.Bytes(raw, 0, len(raw)), R49) -requires (currInfIdx == 0 && s.Seg2Len > 0) || - (currInfIdx == 1 && s.Seg2Len > 0 && s.Seg3Len > 0) ==> +requires (currInfIdx == 0 && segs.Seg2Len > 0) || + (currInfIdx == 1 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ==> let infoBytes := InfofieldByteSlice(raw, currInfIdx) in - let hopBytes := HopfieldsByteSlice(raw, currInfIdx, s) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) decreases -pure func RightSegEqualitySpec(raw []byte, currInfIdx int, s io.SegLens) bool { - return (currInfIdx == 0 && s.Seg2Len > 0) || - (currInfIdx == 1 && s.Seg2Len > 0 && s.Seg3Len > 0) ? +pure func RightSegEqualitySpec(raw []byte, currInfIdx int, segs io.SegLens) bool { + return (currInfIdx == 0 && segs.Seg2Len > 0) || + (currInfIdx == 1 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? let infoBytes := InfofieldByteSlice(raw, currInfIdx) in - let hopBytes := HopfieldsByteSlice(raw, currInfIdx, s) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in let inf := some(path.BytesToAbsInfoField(infoBytes, 0)) in - RightSeg(raw, currInfIdx, s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) == - RightSegWithInfo(hopBytes, currInfIdx, s, inf) : - RightSeg(raw, currInfIdx, s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) == - RightSegWithInfo(nil, currInfIdx, s, none[io.AbsInfoField]) + RightSeg(raw, currInfIdx, segs, MetaLen) == + RightSegWithInfo(hopBytes, currInfIdx, segs, inf) : + RightSeg(raw, currInfIdx, segs, MetaLen) == + RightSegWithInfo(nil, currInfIdx, segs, none[io.AbsInfoField]) } // RightSegEquality ensures that the two definitions of abstract segments, RightSegWithInfo(..) @@ -451,58 +451,58 @@ pure func RightSegEqualitySpec(raw []byte, currInfIdx int, s io.SegLens) bool { // complex preconditions and postconditions because, for every currInfIdx, we need an offset for // its infofield and one for its hopfields. ghost -requires s.Valid() -requires pktLen(s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) <= len(raw) +requires segs.Valid() +requires PktLen(segs, MetaLen) <= len(raw) requires -1 <= currInfIdx && currInfIdx < 2 preserves acc(sl.Bytes(raw, 0, len(raw)), R49) -preserves (currInfIdx == 0 && s.Seg2Len > 0) || - (currInfIdx == 1 && s.Seg2Len > 0 && s.Seg3Len > 0) ==> +preserves (currInfIdx == 0 && segs.Seg2Len > 0) || + (currInfIdx == 1 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ==> let infoBytes := InfofieldByteSlice(raw, currInfIdx) in - let hopBytes := HopfieldsByteSlice(raw, currInfIdx, s) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) -ensures RightSegEqualitySpec(raw, currInfIdx, s) +ensures RightSegEqualitySpec(raw, currInfIdx, segs) decreases -func RightSegEquality(raw []byte, currInfIdx int, s io.SegLens) { - reveal RightSeg(raw, currInfIdx, s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) - if ((currInfIdx == 0 && s.Seg2Len > 0) || - (currInfIdx == 1 && s.Seg2Len > 0 && s.Seg3Len > 0)) { +func RightSegEquality(raw []byte, currInfIdx int, segs io.SegLens) { + reveal RightSeg(raw, currInfIdx, segs, MetaLen) + if ((currInfIdx == 0 && segs.Seg2Len > 0) || + (currInfIdx == 1 && segs.Seg2Len > 0 && segs.Seg3Len > 0)) { infoBytes := InfofieldByteSlice(raw, currInfIdx) - hopBytes := HopfieldsByteSlice(raw, currInfIdx, s) + hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) inf := some(reveal path.BytesToAbsInfoField(infoBytes, 0)) - offset := HopfieldsStartIdx(currInfIdx, s) - segLen := currInfIdx == 0 ? s.Seg1Len : s.Seg2Len - reveal RightSegWithInfo(hopBytes, currInfIdx, s, inf) + offset := HopfieldsStartIdx(currInfIdx, segs) + segLen := currInfIdx == 0 ? segs.Seg1Len : segs.Seg2Len + reveal RightSegWithInfo(hopBytes, currInfIdx, segs, inf) CurrSegEquality(raw, offset, currInfIdx, segLen, segLen) } else { - reveal RightSegWithInfo(nil, currInfIdx, s, none[io.AbsInfoField]) + reveal RightSegWithInfo(nil, currInfIdx, segs, none[io.AbsInfoField]) } } // MidSegEqualitySpec defines the conditions that must hold for MidSegWithInfo(..) // and MidSeg(..) to represent the same abstract segment. ghost -requires s.Valid() -requires pktLen(s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) <= len(raw) +requires segs.Valid() +requires PktLen(segs, MetaLen) <= len(raw) requires 2 <= currInfIdx && currInfIdx < 5 requires acc(sl.Bytes(raw, 0, len(raw)), R49) -requires (s.Seg2Len > 0 && s.Seg3Len > 0 && +requires (segs.Seg2Len > 0 && segs.Seg3Len > 0 && (currInfIdx == 2 || currInfIdx == 4)) ==> let infoBytes := InfofieldByteSlice(raw, currInfIdx) in - let hopBytes := HopfieldsByteSlice(raw, currInfIdx, s) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) decreases -pure func MidSegEqualitySpec(raw []byte, currInfIdx int, s io.SegLens) bool { - return (s.Seg2Len > 0 && s.Seg3Len > 0 && +pure func MidSegEqualitySpec(raw []byte, currInfIdx int, segs io.SegLens) bool { + return (segs.Seg2Len > 0 && segs.Seg3Len > 0 && (currInfIdx == 2 || currInfIdx == 4)) ? let infoBytes := InfofieldByteSlice(raw, currInfIdx) in - let hopBytes := HopfieldsByteSlice(raw, currInfIdx, s) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in let inf := some(path.BytesToAbsInfoField(infoBytes, 0)) in - MidSeg(raw, currInfIdx, s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) == - MidSegWithInfo(hopBytes, currInfIdx, s, inf) : - MidSeg(raw, currInfIdx, s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) == - MidSegWithInfo(nil, currInfIdx, s, none[io.AbsInfoField]) + MidSeg(raw, currInfIdx, segs, MetaLen) == + MidSegWithInfo(hopBytes, currInfIdx, segs, inf) : + MidSeg(raw, currInfIdx, segs, MetaLen) == + MidSegWithInfo(nil, currInfIdx, segs, none[io.AbsInfoField]) } // MidSegEquality ensures that the two definitions of abstract segments, MidSegWithInfo(..) @@ -512,35 +512,35 @@ pure func MidSegEqualitySpec(raw []byte, currInfIdx int, s io.SegLens) bool { // complex preconditions and postconditions because, for every currInfIdx, we need an offset for // its infofield and one for its hopfields. ghost -requires s.Valid() -requires pktLen(s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) <= len(raw) +requires segs.Valid() +requires PktLen(segs, MetaLen) <= len(raw) requires 2 <= currInfIdx && currInfIdx < 5 preserves acc(sl.Bytes(raw, 0, len(raw)), R49) -preserves (s.Seg2Len > 0 && s.Seg3Len > 0 && +preserves (segs.Seg2Len > 0 && segs.Seg3Len > 0 && (currInfIdx == 2 || currInfIdx == 4)) ==> let infoBytes := InfofieldByteSlice(raw, currInfIdx) in - let hopBytes := HopfieldsByteSlice(raw, currInfIdx, s) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) -ensures MidSegEqualitySpec(raw, currInfIdx, s) +ensures MidSegEqualitySpec(raw, currInfIdx, segs) decreases -func MidSegEquality(raw []byte, currInfIdx int, s io.SegLens) { - reveal MidSeg(raw, currInfIdx, s.Seg1Len, s.Seg2Len, s.Seg3Len, MetaLen) - if (currInfIdx == 4 && s.Seg2Len > 0 && s.Seg3Len > 0) { +func MidSegEquality(raw []byte, currInfIdx int, segs io.SegLens) { + reveal MidSeg(raw, currInfIdx, segs, MetaLen) + if (currInfIdx == 4 && segs.Seg2Len > 0 && segs.Seg3Len > 0) { infoBytes := InfofieldByteSlice(raw, 0) - hopBytes := HopfieldsByteSlice(raw, 0, s) + hopBytes := HopfieldsByteSlice(raw, 0, segs) inf := some(reveal path.BytesToAbsInfoField(infoBytes, 0)) - offset := HopfieldsStartIdx(currInfIdx, s) - reveal MidSegWithInfo(hopBytes, currInfIdx, s, inf) - CurrSegEquality(raw, offset, 0, s.Seg1Len, s.Seg1Len) - } else if (currInfIdx == 2 && s.Seg2Len > 0 && s.Seg3Len > 0) { + offset := HopfieldsStartIdx(currInfIdx, segs) + reveal MidSegWithInfo(hopBytes, currInfIdx, segs, inf) + CurrSegEquality(raw, offset, 0, segs.Seg1Len, segs.Seg1Len) + } else if (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) { infoBytes := InfofieldByteSlice(raw, currInfIdx) - hopBytes := HopfieldsByteSlice(raw, currInfIdx, s) + hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) inf := some(reveal path.BytesToAbsInfoField(infoBytes, 0)) - offset := HopfieldsStartIdx(currInfIdx, s) - reveal MidSegWithInfo(hopBytes, currInfIdx, s, inf) - CurrSegEquality(raw, offset, currInfIdx, 0, s.Seg3Len) + offset := HopfieldsStartIdx(currInfIdx, segs) + reveal MidSegWithInfo(hopBytes, currInfIdx, segs, inf) + CurrSegEquality(raw, offset, currInfIdx, 0, segs.Seg3Len) } else { - reveal MidSegWithInfo(nil, currInfIdx, s, none[io.AbsInfoField]) + reveal MidSegWithInfo(nil, currInfIdx, segs, none[io.AbsInfoField]) } } \ No newline at end of file diff --git a/pkg/slayers/path/scion/raw.go b/pkg/slayers/path/scion/raw.go index 8490963f4..ed1cafbd4 100644 --- a/pkg/slayers/path/scion/raw.go +++ b/pkg/slayers/path/scion/raw.go @@ -250,8 +250,9 @@ func (s *Raw) IncPath( /*@ ghost ubuf []byte @*/ ) (r error) { //@ oldSeg1Len := int(s.PathMeta.SegLen[0]) //@ oldSeg2Len := int(s.PathMeta.SegLen[1]) //@ oldSeg3Len := int(s.PathMeta.SegLen[2]) - //@ oldSegLen := LengthOfCurrSeg(oldCurrHfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) - //@ oldPrevSegLen := LengthOfPrevSeg(oldCurrHfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) + //@ oldSegs := io.CombineSegLens(oldSeg1Len, oldSeg2Len, oldSeg3Len) + //@ oldSegLen := oldSegs.LengthOfCurrSeg(oldCurrHfIdx) + //@ oldPrevSegLen := oldSegs.LengthOfPrevSeg(oldCurrHfIdx) //@ oldOffset := HopFieldOffset(s.Base.NumINF, oldPrevSegLen, 0) //@ fold acc(s.Base.Mem(), R56) if err := s.Base.IncPath(); err != nil { @@ -267,9 +268,9 @@ func (s *Raw) IncPath( /*@ ghost ubuf []byte @*/ ) (r error) { //@ oldHfIdxSeg := oldCurrHfIdx-oldPrevSegLen //@ WidenCurrSeg(ubuf, oldOffset + MetaLen, oldCurrInfIdx, oldHfIdxSeg, //@ oldSegLen, MetaLen, MetaLen, len(ubuf)) - //@ WidenLeftSeg(ubuf, oldCurrInfIdx + 1, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) - //@ WidenMidSeg(ubuf, oldCurrInfIdx + 2, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) - //@ WidenRightSeg(ubuf, oldCurrInfIdx - 1, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) + //@ WidenLeftSeg(ubuf, oldCurrInfIdx + 1, oldSegs, MetaLen, MetaLen, len(ubuf)) + //@ WidenMidSeg(ubuf, oldCurrInfIdx + 2, oldSegs, MetaLen, MetaLen, len(ubuf)) + //@ WidenRightSeg(ubuf, oldCurrInfIdx - 1, oldSegs, MetaLen, MetaLen, len(ubuf)) //@ LenCurrSeg(tail, oldOffset, oldCurrInfIdx, oldHfIdxSeg, oldSegLen) //@ oldAbsPkt := reveal s.absPkt(ubuf) //@ sl.SplitRange_Bytes(ubuf, 0, MetaLen, HalfPerm) @@ -297,24 +298,24 @@ func (s *Raw) IncPath( /*@ ghost ubuf []byte @*/ ) (r error) { //@ IncCurrSeg(tail, oldOffset, oldCurrInfIdx, oldHfIdxSeg, oldSegLen) //@ WidenCurrSeg(ubuf, oldOffset + MetaLen, oldCurrInfIdx, oldHfIdxSeg + 1, //@ oldSegLen, MetaLen, MetaLen, len(ubuf)) - //@ WidenLeftSeg(ubuf, oldCurrInfIdx + 1, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) - //@ WidenMidSeg(ubuf, oldCurrInfIdx + 2, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) - //@ WidenRightSeg(ubuf, oldCurrInfIdx - 1, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) + //@ WidenLeftSeg(ubuf, oldCurrInfIdx + 1, oldSegs, MetaLen, MetaLen, len(ubuf)) + //@ WidenMidSeg(ubuf, oldCurrInfIdx + 2, oldSegs, MetaLen, MetaLen, len(ubuf)) + //@ WidenRightSeg(ubuf, oldCurrInfIdx - 1, oldSegs, MetaLen, MetaLen, len(ubuf)) //@ assert reveal s.absPkt(ubuf) == AbsIncPath(oldAbsPkt) //@ } else { - //@ segLen := LengthOfCurrSeg(currHfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) - //@ prevSegLen := LengthOfPrevSeg(currHfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) + //@ segLen := oldSegs.LengthOfCurrSeg(currHfIdx) + //@ prevSegLen := oldSegs.LengthOfPrevSeg(currHfIdx) //@ offsetWithHops := HopFieldOffset(s.Base.NumINF, prevSegLen, MetaLen) //@ hfIdxSeg := currHfIdx-prevSegLen - //@ XoverSegNotNone(tail, oldCurrInfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) - //@ XoverCurrSeg(tail, oldCurrInfIdx + 1, oldCurrHfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) - //@ XoverLeftSeg(tail, oldCurrInfIdx + 2, oldSeg1Len, oldSeg2Len, oldSeg3Len) - //@ XoverMidSeg(tail, oldCurrInfIdx - 1, oldSeg1Len, oldSeg2Len, oldSeg3Len) - //@ XoverRightSeg(tail, oldCurrInfIdx, oldCurrHfIdx, oldSeg1Len, oldSeg2Len, oldSeg3Len) + //@ XoverSegNotNone(tail, oldCurrInfIdx, oldSegs) + //@ XoverCurrSeg(tail, oldCurrInfIdx + 1, oldCurrHfIdx, oldSegs) + //@ XoverLeftSeg(tail, oldCurrInfIdx + 2, oldSegs) + //@ XoverMidSeg(tail, oldCurrInfIdx - 1, oldSegs) + //@ XoverRightSeg(tail, oldCurrInfIdx, oldCurrHfIdx, oldSegs) //@ WidenCurrSeg(ubuf, offsetWithHops, currInfIdx, hfIdxSeg, segLen, MetaLen, MetaLen, len(ubuf)) - //@ WidenLeftSeg(ubuf, currInfIdx + 1, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) - //@ WidenMidSeg(ubuf, currInfIdx + 2, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) - //@ WidenRightSeg(ubuf, currInfIdx - 1, oldSeg1Len, oldSeg2Len, oldSeg3Len, MetaLen, MetaLen, len(ubuf)) + //@ WidenLeftSeg(ubuf, currInfIdx + 1, oldSegs, MetaLen, MetaLen, len(ubuf)) + //@ WidenMidSeg(ubuf, currInfIdx + 2, oldSegs, MetaLen, MetaLen, len(ubuf)) + //@ WidenRightSeg(ubuf, currInfIdx - 1, oldSegs, MetaLen, MetaLen, len(ubuf)) //@ assert reveal s.absPkt(ubuf) == AbsXover(oldAbsPkt) //@ } @@ -416,11 +417,11 @@ func (s *Raw) SetInfoField(info path.InfoField, idx int /*@, ghost ubuf []byte @ //@ seg1Len := int(s.PathMeta.SegLen[0]) //@ seg2Len := int(s.PathMeta.SegLen[1]) //@ seg3Len := int(s.PathMeta.SegLen[2]) - //@ segLen := LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) - //@ prevSegLen := LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) + //@ segLens := io.CombineSegLens(seg1Len, seg2Len, seg3Len) + //@ segLen := segLens.LengthOfCurrSeg(currHfIdx) + //@ prevSegLen := segLens.LengthOfPrevSeg(currHfIdx) //@ offset := HopFieldOffset(s.Base.NumINF, prevSegLen, MetaLen) //@ hopfieldOffset := MetaLen + s.NumINF*path.InfoLen - //@ segLens := io.CombineSegLens(seg1Len, seg2Len, seg3Len) if idx >= s.NumINF { err := serrors.New("InfoField index out of bounds", "max", s.NumINF-1, "actual", idx) //@ fold acc(s.Base.Mem(), R50) diff --git a/pkg/slayers/path/scion/raw_spec.gobra b/pkg/slayers/path/scion/raw_spec.gobra index 53914f8b5..03c5467a9 100644 --- a/pkg/slayers/path/scion/raw_spec.gobra +++ b/pkg/slayers/path/scion/raw_spec.gobra @@ -252,12 +252,6 @@ func (s *Raw) RawBufferNonInitMem() []byte { } /**** End of helpful pure functions ****/ -ghost -decreases -pure func NumInfoFields(seg1Len int, seg2Len int, seg3Len int) int { - return seg3Len > 0 ? 3 : (seg2Len > 0 ? 2 : (seg1Len > 0 ? 1 : 0)) -} - ghost decreases pure func HopFieldOffset(numINF int, currHF int, headerOffset int) int { @@ -266,24 +260,9 @@ pure func HopFieldOffset(numINF int, currHF int, headerOffset int) int { ghost decreases -pure func pktLen(seg1Len int, seg2Len int, seg3Len int, headerOffset int) int { - return HopFieldOffset(NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) + - path.HopLen * (seg1Len + seg2Len + seg3Len) -} - - -ghost -decreases -pure func LengthOfCurrSeg(currHF int, seg1Len int, seg2Len int, seg3Len int) int { - return seg1Len > currHF ? seg1Len : ((seg1Len + seg2Len) > currHF ? seg2Len : seg3Len) -} - -ghost -requires 0 <= currHF -ensures res <= currHF -decreases -pure func LengthOfPrevSeg(currHF int, seg1Len int, seg2Len int, seg3Len int) (res int) { - return seg1Len > currHF ? 0 : ((seg1Len + seg2Len) > currHF ? seg1Len : seg1Len + seg2Len) +pure func PktLen(segs io.SegLens, headerOffset int) int { + return HopFieldOffset(segs.NumInfoFields(), 0, headerOffset) + + path.HopLen * segs.TotalHops() } ghost @@ -387,75 +366,63 @@ pure func CurrSeg(raw []byte, ghost opaque requires 0 <= headerOffset -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len -requires pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= len(raw) +requires segs.Valid() +requires PktLen(segs, headerOffset) <= len(raw) requires 1 <= currInfIdx && currInfIdx < 4 requires acc(sl.Bytes(raw, 0, len(raw)), R56) decreases pure func LeftSeg( raw []byte, currInfIdx int, - seg1Len int, - seg2Len int, - seg3Len int, + segs io.SegLens, headerOffset int) option[io.IO_seg3] { - return let offset := HopFieldOffset(NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) in - (currInfIdx == 1 && seg2Len > 0) ? - some(CurrSeg(raw, offset + path.HopLen * seg1Len, currInfIdx, 0, seg2Len, headerOffset)) : - ((currInfIdx == 2 && seg2Len > 0 && seg3Len > 0) ? - some(CurrSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currInfIdx, 0, seg3Len, headerOffset)) : + return let offset := HopFieldOffset(segs.NumInfoFields(), 0, headerOffset) in + (currInfIdx == 1 && segs.Seg2Len > 0) ? + some(CurrSeg(raw, offset + path.HopLen * segs.Seg1Len, currInfIdx, 0, segs.Seg2Len, headerOffset)) : + ((currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? + some(CurrSeg(raw, offset + path.HopLen * (segs.Seg1Len + segs.Seg2Len), currInfIdx, 0, segs.Seg3Len, headerOffset)) : none[io.IO_seg3]) } ghost opaque requires 0 <= headerOffset -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len -requires pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= len(raw) +requires segs.Valid() +requires PktLen(segs, headerOffset) <= len(raw) requires -1 <= currInfIdx && currInfIdx < 2 requires acc(sl.Bytes(raw, 0, len(raw)), R56) decreases pure func RightSeg( raw []byte, currInfIdx int, - seg1Len int, - seg2Len int, - seg3Len int, + segs io.SegLens, headerOffset int) option[io.IO_seg3] { - return let offset := HopFieldOffset(NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) in - (currInfIdx == 1 && seg2Len > 0 && seg3Len > 0) ? - some(CurrSeg(raw, offset + path.HopLen * seg1Len, currInfIdx, seg2Len, seg2Len, headerOffset)) : - (currInfIdx == 0 && seg2Len > 0) ? - some(CurrSeg(raw, offset, currInfIdx, seg1Len, seg1Len, headerOffset)) : + return let offset := HopFieldOffset(segs.NumInfoFields(), 0, headerOffset) in + (currInfIdx == 1 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? + some(CurrSeg(raw, offset + path.HopLen * segs.Seg1Len, currInfIdx, segs.Seg2Len, segs.Seg2Len, headerOffset)) : + (currInfIdx == 0 && segs.Seg2Len > 0) ? + some(CurrSeg(raw, offset, currInfIdx, segs.Seg1Len, segs.Seg1Len, headerOffset)) : none[io.IO_seg3] } ghost opaque requires 0 <= headerOffset -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len -requires pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= len(raw) +requires segs.Valid() +requires PktLen(segs, headerOffset) <= len(raw) requires 2 <= currInfIdx && currInfIdx < 5 requires acc(sl.Bytes(raw, 0, len(raw)), R56) decreases pure func MidSeg( raw []byte, currInfIdx int, - seg1Len int, - seg2Len int, - seg3Len int, + segs io.SegLens, headerOffset int) option[io.IO_seg3] { - return let offset := HopFieldOffset(NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) in - (currInfIdx == 4 && seg2Len > 0 && seg3Len > 0) ? - some(CurrSeg(raw, offset, 0, seg1Len, seg1Len, headerOffset)) : - ((currInfIdx == 2 && seg2Len > 0 && seg3Len > 0) ? - some(CurrSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currInfIdx, 0, seg3Len, headerOffset)) : + return let offset := HopFieldOffset(segs.NumInfoFields(), 0, headerOffset) in + (currInfIdx == 4 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? + some(CurrSeg(raw, offset, 0, segs.Seg1Len, segs.Seg1Len, headerOffset)) : + ((currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? + some(CurrSeg(raw, offset + path.HopLen * (segs.Seg1Len + segs.Seg2Len), currInfIdx, 0, segs.Seg3Len, headerOffset)) : none[io.IO_seg3]) } @@ -472,15 +439,16 @@ pure func (s *Raw) absPkt(raw []byte) (res io.IO_pkt2) { let seg1Len := int(metaHdr.SegLen[0]) in let seg2Len := int(metaHdr.SegLen[1]) in let seg3Len := int(metaHdr.SegLen[2]) in - let segLen := LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) in - let prevSegLen := LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) in - let numINF := NumInfoFields(seg1Len, seg2Len, seg3Len) in + let segs := io.CombineSegLens(seg1Len, seg2Len, seg3Len) in + let segLen := segs.LengthOfCurrSeg(currHfIdx) in + let prevSegLen := segs.LengthOfPrevSeg(currHfIdx) in + let numINF := segs.NumInfoFields() in let offset := HopFieldOffset(numINF, prevSegLen, MetaLen) in io.IO_Packet2 { CurrSeg : CurrSeg(raw, offset, currInfIdx, currHfIdx-prevSegLen, segLen, MetaLen), - LeftSeg : LeftSeg(raw, currInfIdx + 1, seg1Len, seg2Len , seg3Len, MetaLen), - MidSeg : MidSeg(raw, currInfIdx + 2, seg1Len, seg2Len , seg3Len, MetaLen), - RightSeg : RightSeg(raw, currInfIdx - 1, seg1Len, seg2Len , seg3Len, MetaLen), + LeftSeg : LeftSeg(raw, currInfIdx + 1, segs, MetaLen), + MidSeg : MidSeg(raw, currInfIdx + 2, segs, MetaLen), + RightSeg : RightSeg(raw, currInfIdx - 1, segs, MetaLen), } } @@ -503,7 +471,8 @@ pure func RawBytesToBase(raw []byte) Base { let seg1 := int(metaHdr.SegLen[0]) in let seg2 := int(metaHdr.SegLen[1]) in let seg3 := int(metaHdr.SegLen[2]) in - Base{metaHdr, NumInfoFields(seg1, seg2, seg3), seg1 + seg2 + seg3} + let segs := io.CombineSegLens(seg1, seg2, seg3) in + Base{metaHdr, segs.NumInfoFields(), segs.TotalHops()} } ghost @@ -516,11 +485,12 @@ pure func validPktMetaHdr(raw []byte) bool { let seg1 := int(metaHdr.SegLen[0]) in let seg2 := int(metaHdr.SegLen[1]) in let seg3 := int(metaHdr.SegLen[2]) in + let segs := io.CombineSegLens(seg1, seg2, seg3) in let base := RawBytesToBase(raw) in 0 < metaHdr.SegLen[0] && base.ValidCurrIdxsSpec() && base.CurrInfMatchesCurrHF() && - pktLen(seg1, seg2, seg3, MetaLen) <= len(raw) + PktLen(segs, MetaLen) <= len(raw) } ghost @@ -563,9 +533,10 @@ func (s *Raw) EstablishValidPktMetaHdr(ghost ub []byte) { seg1 := int(s.Base.PathMeta.SegLen[0]) seg2 := int(s.Base.PathMeta.SegLen[1]) seg3 := int(s.Base.PathMeta.SegLen[2]) + segs := io.CombineSegLens(seg1, seg2, seg3) assert 0 < seg1 assert s.ValidCurrIdxs(ub) - assert pktLen(seg1, seg2, seg3, MetaLen) <= len(ub) + assert PktLen(segs, MetaLen) <= len(ub) assert reveal validPktMetaHdr(ub) fold acc(s.Base.Mem(), R56) fold acc(s.Mem(ub), R55) @@ -695,9 +666,10 @@ func (s *Raw) LastHopLemma(ubuf []byte) { seg1Len := int(metaHdr.SegLen[0]) seg2Len := int(metaHdr.SegLen[1]) seg3Len := int(metaHdr.SegLen[2]) - segLen := LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) - prevSegLen := LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) - numINF := NumInfoFields(seg1Len, seg2Len, seg3Len) + segs := io.CombineSegLens(seg1Len, seg2Len, seg3Len) + segLen := segs.LengthOfCurrSeg(currHfIdx) + prevSegLen := segs.LengthOfPrevSeg(currHfIdx) + numINF := segs.NumInfoFields() offset := HopFieldOffset(numINF, prevSegLen, MetaLen) pkt := reveal s.absPkt(ubuf) assert pkt.CurrSeg == reveal CurrSeg(ubuf, offset, currInfIdx, currHfIdx-prevSegLen, segLen, MetaLen) @@ -721,13 +693,14 @@ func (s *Raw) XoverLemma(ubuf []byte) { seg1Len := int(metaHdr.SegLen[0]) seg2Len := int(metaHdr.SegLen[1]) seg3Len := int(metaHdr.SegLen[2]) - segLen := LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) - prevSegLen := LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) - numINF := NumInfoFields(seg1Len, seg2Len, seg3Len) + segs := io.CombineSegLens(seg1Len, seg2Len, seg3Len) + segLen := segs.LengthOfCurrSeg(currHfIdx) + prevSegLen := segs.LengthOfPrevSeg(currHfIdx) + numINF := segs.NumInfoFields() offset := HopFieldOffset(numINF, prevSegLen, MetaLen) pkt := reveal s.absPkt(ubuf) assert pkt.CurrSeg == reveal CurrSeg(ubuf, offset, currInfIdx, currHfIdx-prevSegLen, segLen, MetaLen) - assert pkt.LeftSeg == reveal LeftSeg(ubuf, currInfIdx + 1, seg1Len, seg2Len , seg3Len, MetaLen) + assert pkt.LeftSeg == reveal LeftSeg(ubuf, currInfIdx + 1, segs, MetaLen) assert len(pkt.CurrSeg.Future) == 1 assert pkt.LeftSeg != none[io.IO_seg2] } @@ -773,9 +746,10 @@ func (s *Raw) DecodingLemma(ubuf []byte, info path.InfoField, hop path.HopField) seg1Len := int(metaHdr.SegLen[0]) seg2Len := int(metaHdr.SegLen[1]) seg3Len := int(metaHdr.SegLen[2]) - segLen := LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) - prevSegLen := LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) - numINF := NumInfoFields(seg1Len, seg2Len, seg3Len) + segs := io.CombineSegLens(seg1Len, seg2Len, seg3Len) + segLen := segs.LengthOfCurrSeg(currHfIdx) + prevSegLen := segs.LengthOfPrevSeg(currHfIdx) + numINF := segs.NumInfoFields() offset := HopFieldOffset(numINF, prevSegLen, MetaLen) hfIdxSeg := currHfIdx-prevSegLen reveal s.CorrectlyDecodedInf(ubuf, info) @@ -807,19 +781,18 @@ func LenCurrSeg(raw []byte, offset int, currInfIdx int, currHfIdx int, segLen in } ghost -requires 0 < seg1Len -requires 0 < seg2Len -requires 0 <= seg3Len -requires pktLen(seg1Len, seg2Len, seg3Len, 0) <= len(raw) +requires segs.Valid() +requires 0 < segs.Seg2Len +requires PktLen(segs, 0) <= len(raw) requires 0 <= currInfIdx && currInfIdx < 2 -requires 1 <= currInfIdx ==> 0 < seg3Len +requires 1 <= currInfIdx ==> 0 < segs.Seg3Len preserves acc(sl.Bytes(raw, 0, len(raw)), R56) -ensures LeftSeg(raw, currInfIdx+1, seg1Len, seg2Len, seg3Len, 0) != none[io.IO_seg3] -ensures RightSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) != none[io.IO_seg3] +ensures LeftSeg(raw, currInfIdx+1, segs, 0) != none[io.IO_seg3] +ensures RightSeg(raw, currInfIdx, segs, 0) != none[io.IO_seg3] decreases -func XoverSegNotNone(raw []byte, currInfIdx int, seg1Len int, seg2Len int, seg3Len int) { - reveal LeftSeg(raw, currInfIdx+1, seg1Len, seg2Len, seg3Len, 0) - reveal RightSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) +func XoverSegNotNone(raw []byte, currInfIdx int, segs io.SegLens) { + reveal LeftSeg(raw, currInfIdx+1, segs, 0) + reveal RightSeg(raw, currInfIdx, segs, 0) } ghost @@ -830,7 +803,7 @@ requires 0 <= currHfIdx && currHfIdx < segLen requires 0 <= currInfIdx && currInfIdx < 3 preserves acc(sl.Bytes(raw, 0, len(raw)), R56) preserves len(CurrSeg(raw, offset, currInfIdx, currHfIdx, segLen, 0).Future) > 0 -ensures CurrSeg(raw, offset, currInfIdx, currHfIdx+1, segLen, 0) == +ensures CurrSeg(raw, offset, currInfIdx, currHfIdx+1, segLen, 0) == absIncPathSeg(CurrSeg(raw, offset, currInfIdx, currHfIdx, segLen, 0)) decreases func IncCurrSeg(raw []byte, offset int, currInfIdx int, currHfIdx int, segLen int) { @@ -847,98 +820,92 @@ func IncCurrSeg(raw []byte, offset int, currInfIdx int, currHfIdx int, segLen in } ghost -requires 0 < seg1Len -requires 0 < seg2Len -requires 0 <= seg3Len -requires pktLen(seg1Len, seg2Len, seg3Len, 0) <= len(raw) +requires segs.Valid() +requires 0 < segs.Seg2Len +requires PktLen(segs, 0) <= len(raw) requires 1 <= currInfIdx && currInfIdx < 3 -requires 1 == currInfIdx ==> currHfIdx+1 == seg1Len -requires 2 == currInfIdx ==> 0 < seg3Len && currHfIdx+1 == seg1Len + seg2Len -requires pktLen(seg1Len, seg2Len, seg3Len, 0) <= len(raw) +requires 1 == currInfIdx ==> currHfIdx+1 == segs.Seg1Len +requires 2 == currInfIdx ==> 0 < segs.Seg3Len && currHfIdx+1 == segs.Seg1Len + segs.Seg2Len +requires PktLen(segs, 0) <= len(raw) preserves acc(sl.Bytes(raw, 0, len(raw)), R56) -preserves LeftSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) != none[io.IO_seg3] +preserves LeftSeg(raw, currInfIdx, segs, 0) != none[io.IO_seg3] ensures - let prevSegLen := LengthOfPrevSeg(currHfIdx+1, seg1Len, seg2Len, seg3Len) in - let segLen := LengthOfCurrSeg(currHfIdx+1, seg1Len, seg2Len, seg3Len) in - let numInf := NumInfoFields(seg1Len, seg2Len, seg3Len) in + let prevSegLen := segs.LengthOfPrevSeg(currHfIdx+1) in + let segLen := segs.LengthOfCurrSeg(currHfIdx+1) in + let numInf := segs.NumInfoFields() in let offset := HopFieldOffset(numInf, prevSegLen, 0) in CurrSeg(raw, offset, currInfIdx, currHfIdx-prevSegLen+1, segLen, 0) == - get(LeftSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0)) + get(LeftSeg(raw, currInfIdx, segs, 0)) decreases -func XoverCurrSeg(raw []byte, currInfIdx int, currHfIdx int, seg1Len int, seg2Len int, seg3Len int) { - prevSegLen := LengthOfPrevSeg(currHfIdx+1, seg1Len, seg2Len, seg3Len) - segLen := LengthOfCurrSeg(currHfIdx+1, seg1Len, seg2Len, seg3Len) - numInf := NumInfoFields(seg1Len, seg2Len, seg3Len) +func XoverCurrSeg(raw []byte, currInfIdx int, currHfIdx int, segs io.SegLens) { + prevSegLen := segs.LengthOfPrevSeg(currHfIdx+1) + segLen := segs.LengthOfCurrSeg(currHfIdx+1) + numInf := segs.NumInfoFields() offset := HopFieldOffset(numInf, prevSegLen, 0) currseg := reveal CurrSeg(raw, offset, currInfIdx, 0, segLen, 0) - leftseg := reveal LeftSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) + leftseg := reveal LeftSeg(raw, currInfIdx, segs, 0) assert currseg == get(leftseg) } ghost -requires 0 < seg1Len -requires 0 < seg2Len -requires 0 <= seg3Len -requires pktLen(seg1Len, seg2Len, seg3Len, 0) <= len(raw) +requires segs.Valid() +requires PktLen(segs, 0) <= len(raw) requires 2 <= currInfIdx && currInfIdx < 4 preserves acc(sl.Bytes(raw, 0, len(raw)), R56) -ensures LeftSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) == - MidSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) +ensures LeftSeg(raw, currInfIdx, segs, 0) == + MidSeg(raw, currInfIdx, segs, 0) decreases -func XoverLeftSeg(raw []byte, currInfIdx int, seg1Len int, seg2Len int, seg3Len int) { - leftseg := reveal LeftSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) - midseg := reveal MidSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) +func XoverLeftSeg(raw []byte, currInfIdx int, segs io.SegLens) { + leftseg := reveal LeftSeg(raw, currInfIdx, segs, 0) + midseg := reveal MidSeg(raw, currInfIdx, segs, 0) assert leftseg == midseg } ghost -requires 0 < seg1Len -requires 0 < seg2Len -requires 0 <= seg3Len -requires pktLen(seg1Len, seg2Len, seg3Len, 0) <= len(raw) +requires segs.Valid() +requires 0 < segs.Seg2Len +requires PktLen(segs, 0) <= len(raw) requires -1 <= currInfIdx && currInfIdx < 1 -requires 0 == currInfIdx ==> 0 < seg3Len +requires 0 == currInfIdx ==> 0 < segs.Seg3Len preserves acc(sl.Bytes(raw, 0, len(raw)), R56) -ensures MidSeg(raw, currInfIdx+4, seg1Len, seg2Len, seg3Len, 0) == - RightSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) +ensures MidSeg(raw, currInfIdx+4, segs, 0) == + RightSeg(raw, currInfIdx, segs, 0) decreases -func XoverMidSeg(raw []byte, currInfIdx int, seg1Len int, seg2Len int, seg3Len int) { - midseg := reveal MidSeg(raw, currInfIdx+4, seg1Len, seg2Len, seg3Len, 0) - rightseg := reveal RightSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) +func XoverMidSeg(raw []byte, currInfIdx int, segs io.SegLens) { + midseg := reveal MidSeg(raw, currInfIdx+4, segs, 0) + rightseg := reveal RightSeg(raw, currInfIdx, segs, 0) assert midseg == rightseg } ghost -requires 0 < seg1Len -requires 0 < seg2Len -requires 0 <= seg3Len -requires pktLen(seg1Len, seg2Len, seg3Len, 0) <= len(raw) +requires segs.Valid() +requires 0 < segs.Seg2Len +requires PktLen(segs, 0) <= len(raw) requires 0 <= currInfIdx && currInfIdx < 2 -requires 0 == currInfIdx ==> currHfIdx+1 == seg1Len -requires 1 == currInfIdx ==> 0 < seg3Len && currHfIdx+1 == seg1Len + seg2Len -requires pktLen(seg1Len, seg2Len, seg3Len, 0) <= len(raw) +requires 0 == currInfIdx ==> currHfIdx+1 == segs.Seg1Len +requires 1 == currInfIdx ==> 0 < segs.Seg3Len && currHfIdx+1 == segs.Seg1Len + segs.Seg2Len +requires PktLen(segs, 0) <= len(raw) preserves acc(sl.Bytes(raw, 0, len(raw)), R56) -preserves RightSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) != none[io.IO_seg3] +preserves RightSeg(raw, currInfIdx, segs, 0) != none[io.IO_seg3] ensures - let prevSegLen := LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) in - let segLen := LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) in - let numInf := NumInfoFields(seg1Len, seg2Len, seg3Len) in + let prevSegLen := segs.LengthOfPrevSeg(currHfIdx) in + let segLen := segs.LengthOfCurrSeg(currHfIdx) in + let numInf := segs.NumInfoFields() in let offset := HopFieldOffset(numInf, prevSegLen, 0) in let currseg := CurrSeg(raw, offset, currInfIdx, currHfIdx-prevSegLen, segLen, 0) in len(currseg.Future) > 0 && - get(RightSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0)) == - absIncPathSeg(currseg) + get(RightSeg(raw, currInfIdx, segs, 0)) == absIncPathSeg(currseg) decreases -func XoverRightSeg(raw []byte, currInfIdx int, currHfIdx int, seg1Len int, seg2Len int, seg3Len int) { - prevSegLen := LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) - segLen := LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) - numInf := NumInfoFields(seg1Len, seg2Len, seg3Len) +func XoverRightSeg(raw []byte, currInfIdx int, currHfIdx int, segs io.SegLens) { + prevSegLen := segs.LengthOfPrevSeg(currHfIdx) + segLen := segs.LengthOfCurrSeg(currHfIdx) + numInf := segs.NumInfoFields() offset := HopFieldOffset(numInf, prevSegLen, 0) LenCurrSeg(raw, offset, currInfIdx, segLen - 1, segLen) IncCurrSeg(raw, offset, currInfIdx, segLen - 1, segLen) currseg := CurrSeg(raw, offset, currInfIdx, segLen - 1, segLen, 0) nextseg := CurrSeg(raw, offset, currInfIdx, segLen, segLen, 0) - rightseg := reveal RightSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, 0) + rightseg := reveal RightSeg(raw, currInfIdx, segs, 0) assert absIncPathSeg(currseg) == nextseg assert nextseg == get(rightseg) assert absIncPathSeg(currseg) == get(rightseg) diff --git a/pkg/slayers/path/scion/widen-lemma.gobra b/pkg/slayers/path/scion/widen-lemma.gobra index ddbc6a451..0297715a6 100644 --- a/pkg/slayers/path/scion/widen-lemma.gobra +++ b/pkg/slayers/path/scion/widen-lemma.gobra @@ -138,97 +138,85 @@ func widenHopFields(raw []byte, offset int, currHfIdx int, segLen int, start int ghost requires 0 <= start && start <= headerOffset -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len +requires segs.Valid() requires 0 <= length && length <= len(raw) -requires pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= length +requires PktLen(segs, headerOffset) <= length requires 1 <= currInfIdx && currInfIdx < 4 preserves acc(sl.Bytes(raw, 0, len(raw)), R51) preserves acc(sl.Bytes(raw[start:length], 0, len(raw[start:length])), R51) -ensures LeftSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset) == - LeftSeg(raw[start:length], currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset-start) +ensures LeftSeg(raw, currInfIdx, segs, headerOffset) == + LeftSeg(raw[start:length], currInfIdx, segs, headerOffset-start) decreases func WidenLeftSeg(raw []byte, currInfIdx int, - seg1Len int, - seg2Len int, - seg3Len int, + segs io.SegLens, headerOffset int, start int, length int) { - offset := HopFieldOffset(NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) - if currInfIdx == 1 && seg2Len > 0 { - offsetWithHopfields := offset + path.HopLen * seg1Len - WidenCurrSeg(raw, offsetWithHopfields, currInfIdx, 0, seg2Len, headerOffset, start, length) - } else if currInfIdx == 2 && seg2Len > 0 && seg3Len > 0 { - offsetWithHopfields := offset + path.HopLen * (seg1Len + seg2Len) - WidenCurrSeg(raw, offsetWithHopfields, currInfIdx, 0, seg3Len, headerOffset, start, length) + offset := HopFieldOffset(segs.NumInfoFields(), 0, headerOffset) + if currInfIdx == 1 && segs.Seg2Len > 0 { + offsetWithHopfields := offset + path.HopLen * segs.Seg1Len + WidenCurrSeg(raw, offsetWithHopfields, currInfIdx, 0, segs.Seg2Len, headerOffset, start, length) + } else if currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0 { + offsetWithHopfields := offset + path.HopLen * (segs.Seg1Len + segs.Seg2Len) + WidenCurrSeg(raw, offsetWithHopfields, currInfIdx, 0, segs.Seg3Len, headerOffset, start, length) } - reveal LeftSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset) - reveal LeftSeg(raw[start:length], currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset- start) + reveal LeftSeg(raw, currInfIdx, segs, headerOffset) + reveal LeftSeg(raw[start:length], currInfIdx, segs, headerOffset- start) } ghost requires 0 <= start && start <= headerOffset -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len +requires segs.Valid() requires 0 <= length && length <= len(raw) -requires pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= length +requires PktLen(segs, headerOffset) <= length requires -1 <= currInfIdx && currInfIdx < 2 preserves acc(sl.Bytes(raw, 0, len(raw)), R51) preserves acc(sl.Bytes(raw[start:length], 0, len(raw[start:length])), R51) -ensures RightSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset) == - RightSeg(raw[start:length], currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset-start) +ensures RightSeg(raw, currInfIdx, segs, headerOffset) == + RightSeg(raw[start:length], currInfIdx, segs, headerOffset-start) decreases func WidenRightSeg(raw []byte, currInfIdx int, - seg1Len int, - seg2Len int, - seg3Len int, + segs io.SegLens, headerOffset int, start int, length int) { - offset := HopFieldOffset(NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) - if currInfIdx == 1 && seg2Len > 0 && seg3Len > 0 { - offsetWithHopfields := offset + path.HopLen * seg1Len - WidenCurrSeg(raw, offsetWithHopfields, currInfIdx, seg2Len, seg2Len, headerOffset, start, length) - } else if currInfIdx == 0 && seg2Len > 0 { - WidenCurrSeg(raw, offset, currInfIdx, seg1Len, seg1Len, headerOffset, start, length) + offset := HopFieldOffset(segs.NumInfoFields(), 0, headerOffset) + if currInfIdx == 1 && segs.Seg2Len > 0 && segs.Seg3Len > 0 { + offsetWithHopfields := offset + path.HopLen * segs.Seg1Len + WidenCurrSeg(raw, offsetWithHopfields, currInfIdx, segs.Seg2Len, segs.Seg2Len, headerOffset, start, length) + } else if currInfIdx == 0 && segs.Seg2Len > 0 { + WidenCurrSeg(raw, offset, currInfIdx, segs.Seg1Len, segs.Seg1Len, headerOffset, start, length) } - reveal RightSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset) - reveal RightSeg(raw[start:length], currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset - start) + reveal RightSeg(raw, currInfIdx, segs, headerOffset) + reveal RightSeg(raw[start:length], currInfIdx, segs, headerOffset - start) } ghost requires 0 <= start && start <= headerOffset -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len +requires segs.Valid() requires 2 <= currInfIdx && currInfIdx < 5 requires 0 <= length && length <= len(raw) -requires pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= length +requires PktLen(segs, headerOffset) <= length preserves acc(sl.Bytes(raw, 0, len(raw)), R51) preserves acc(sl.Bytes(raw[start:length], 0, len(raw[start:length])), R51) -ensures MidSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset) == - MidSeg(raw[start:length], currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset - start) +ensures MidSeg(raw, currInfIdx, segs, headerOffset) == + MidSeg(raw[start:length], currInfIdx, segs, headerOffset - start) decreases func WidenMidSeg(raw []byte, currInfIdx int, - seg1Len int, - seg2Len int, - seg3Len int, + segs io.SegLens, headerOffset int, start int, length int) { - offset := HopFieldOffset(NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) - if currInfIdx == 4 && seg2Len > 0 { - WidenCurrSeg(raw, offset, 0, seg1Len, seg1Len, headerOffset, start, length) - } else if currInfIdx == 2 && seg2Len > 0 && seg3Len > 0 { - offsetWithHopfields := offset + path.HopLen * (seg1Len + seg2Len) - WidenCurrSeg(raw, offsetWithHopfields, currInfIdx, 0, seg3Len, headerOffset, start, length) + offset := HopFieldOffset(segs.NumInfoFields(), 0, headerOffset) + if currInfIdx == 4 && segs.Seg2Len > 0 { + WidenCurrSeg(raw, offset, 0, segs.Seg1Len, segs.Seg1Len, headerOffset, start, length) + } else if currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0 { + offsetWithHopfields := offset + path.HopLen * (segs.Seg1Len + segs.Seg2Len) + WidenCurrSeg(raw, offsetWithHopfields, currInfIdx, 0, segs.Seg3Len, headerOffset, start, length) } - reveal MidSeg(raw, currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset) - reveal MidSeg(raw[start:length], currInfIdx, seg1Len, seg2Len, seg3Len, headerOffset - start) + reveal MidSeg(raw, currInfIdx, segs, headerOffset) + reveal MidSeg(raw[start:length], currInfIdx, segs, headerOffset - start) } \ No newline at end of file diff --git a/pkg/slayers/scion_spec.gobra b/pkg/slayers/scion_spec.gobra index 43342c2e1..ff6723c2b 100644 --- a/pkg/slayers/scion_spec.gobra +++ b/pkg/slayers/scion_spec.gobra @@ -29,6 +29,7 @@ import ( . "verification/utils/definitions" sl "verification/utils/slices" + "verification/io" "encoding/binary" ) @@ -427,8 +428,9 @@ func (s *SCION) EqAbsHeader(ub []byte) bool { let seg1 := int(metaHdr.SegLen[0]) in let seg2 := int(metaHdr.SegLen[1]) in let seg3 := int(metaHdr.SegLen[2]) in + let segs := io.CombineSegLens(seg1, seg2, seg3) in s.Path.(*scion.Raw).Base.GetBase() == - scion.Base{metaHdr, scion.NumInfoFields(seg1, seg2, seg3), seg1 + seg2 + seg3} + scion.Base{metaHdr, segs.NumInfoFields(), segs.TotalHops()} } // Describes a SCION packet that was successfully decoded by `DecodeFromBytes`. @@ -467,11 +469,12 @@ pure func ValidPktMetaHdr(raw []byte) bool { let seg1 := int(metaHdr.SegLen[0]) in let seg2 := int(metaHdr.SegLen[1]) in let seg3 := int(metaHdr.SegLen[2]) in - let base := scion.Base{metaHdr, scion.NumInfoFields(seg1, seg2, seg3), seg1+seg2+seg3} in + let segs := io.CombineSegLens(seg1, seg2, seg3) in + let base := scion.Base{metaHdr, segs.NumInfoFields(), segs.TotalHops()} in 0 < metaHdr.SegLen[0] && base.ValidCurrIdxsSpec() && base.CurrInfMatchesCurrHF() && - scion.pktLen(seg1, seg2, seg3, start + scion.MetaLen) <= length + scion.PktLen(segs, start + scion.MetaLen) <= length } ghost diff --git a/router/io-spec-lemmas.gobra b/router/io-spec-lemmas.gobra index 01377e461..be6d949cd 100644 --- a/router/io-spec-lemmas.gobra +++ b/router/io-spec-lemmas.gobra @@ -63,9 +63,10 @@ func absPktFutureLemma(raw []byte) { seg1Len := int(metaHdr.SegLen[0]) seg2Len := int(metaHdr.SegLen[1]) seg3Len := int(metaHdr.SegLen[2]) - segLen := scion.LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) - prevSegLen := scion.LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) - numINF := scion.NumInfoFields(seg1Len, seg2Len, seg3Len) + segs := io.CombineSegLens(seg1Len, seg2Len, seg3Len) + segLen := segs.LengthOfCurrSeg(currHfIdx) + prevSegLen := segs.LengthOfPrevSeg(currHfIdx) + numINF := segs.NumInfoFields() offset := scion.HopFieldOffset(numINF, prevSegLen, headerOffsetWithMetaLen) pkt := reveal absPkt(raw) assert pkt.CurrSeg == reveal scion.CurrSeg(raw, offset, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen) @@ -291,15 +292,16 @@ func (p* scionPacketProcessor) AbsPktToSubSliceAbsPkt(ub []byte, start int, end seg1Len := int(metaHdr.SegLen[0]) seg2Len := int(metaHdr.SegLen[1]) seg3Len := int(metaHdr.SegLen[2]) - segLen := scion.LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) - prevSegLen := scion.LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) - numINF := scion.NumInfoFields(seg1Len, seg2Len, seg3Len) + segs := io.CombineSegLens(seg1Len, seg2Len, seg3Len) + segLen := segs.LengthOfCurrSeg(currHfIdx) + prevSegLen := segs.LengthOfPrevSeg(currHfIdx) + numINF := segs.NumInfoFields() offset := scion.HopFieldOffset(numINF, prevSegLen, headerOffsetWithMetaLen) scion.WidenCurrSeg(ub, offset, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen, start, end) - scion.WidenLeftSeg(ub, currInfIdx + 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) - scion.WidenMidSeg(ub, currInfIdx + 2, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) - scion.WidenRightSeg(ub, currInfIdx - 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) + scion.WidenLeftSeg(ub, currInfIdx + 1, segs, headerOffsetWithMetaLen, start, end) + scion.WidenMidSeg(ub, currInfIdx + 2, segs, headerOffsetWithMetaLen, start, end) + scion.WidenRightSeg(ub, currInfIdx - 1, segs, headerOffsetWithMetaLen, start, end) assert reveal absPkt(ub) == reveal p.path.absPkt(ub[start:end]) } @@ -352,15 +354,16 @@ func (p* scionPacketProcessor) SubSliceAbsPktToAbsPkt(ub []byte, start int, end seg1Len := int(metaHdr.SegLen[0]) seg2Len := int(metaHdr.SegLen[1]) seg3Len := int(metaHdr.SegLen[2]) - segLen := scion.LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) - prevSegLen := scion.LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) - numINF := scion.NumInfoFields(seg1Len, seg2Len, seg3Len) + segs := io.CombineSegLens(seg1Len, seg2Len, seg3Len) + segLen := segs.LengthOfCurrSeg(currHfIdx) + prevSegLen := segs.LengthOfPrevSeg(currHfIdx) + numINF := segs.NumInfoFields() offset := scion.HopFieldOffset(numINF, prevSegLen, headerOffsetWithMetaLen) scion.WidenCurrSeg(ub, offset, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen, start, end) - scion.WidenLeftSeg(ub, currInfIdx + 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) - scion.WidenMidSeg(ub, currInfIdx + 2, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) - scion.WidenRightSeg(ub, currInfIdx - 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, start, end) + scion.WidenLeftSeg(ub, currInfIdx + 1, segs, headerOffsetWithMetaLen, start, end) + scion.WidenMidSeg(ub, currInfIdx + 2, segs, headerOffsetWithMetaLen, start, end) + scion.WidenRightSeg(ub, currInfIdx - 1, segs, headerOffsetWithMetaLen, start, end) assert reveal absPkt(ub) == reveal p.path.absPkt(ub[start:end]) } diff --git a/router/io-spec.gobra b/router/io-spec.gobra index c891e8f2f..6aeed7678 100644 --- a/router/io-spec.gobra +++ b/router/io-spec.gobra @@ -47,15 +47,16 @@ pure func absPkt(raw []byte) (res io.IO_pkt2) { let seg1Len := int(metaHdr.SegLen[0]) in let seg2Len := int(metaHdr.SegLen[1]) in let seg3Len := int(metaHdr.SegLen[2]) in - let segLen := scion.LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) in - let prevSegLen := scion.LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) in - let numINF := scion.NumInfoFields(seg1Len, seg2Len, seg3Len) in + let segs := io.CombineSegLens(seg1Len, seg2Len, seg3Len) in + let segLen := segs.LengthOfCurrSeg(currHfIdx) in + let prevSegLen := segs.LengthOfPrevSeg(currHfIdx) in + let numINF := segs.NumInfoFields() in let offset := scion.HopFieldOffset(numINF, prevSegLen, headerOffsetWithMetaLen) in io.IO_Packet2 { CurrSeg : scion.CurrSeg(raw, offset, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen), - LeftSeg : scion.LeftSeg(raw, currInfIdx + 1, seg1Len, seg2Len , seg3Len, headerOffsetWithMetaLen), - MidSeg : scion.MidSeg(raw, currInfIdx + 2, seg1Len, seg2Len , seg3Len, headerOffsetWithMetaLen), - RightSeg : scion.RightSeg(raw, currInfIdx - 1, seg1Len, seg2Len , seg3Len, headerOffsetWithMetaLen), + LeftSeg : scion.LeftSeg(raw, currInfIdx + 1, segs, headerOffsetWithMetaLen), + MidSeg : scion.MidSeg(raw, currInfIdx + 2, segs, headerOffsetWithMetaLen), + RightSeg : scion.RightSeg(raw, currInfIdx - 1, segs, headerOffsetWithMetaLen), } } diff --git a/router/widen-lemma.gobra b/router/widen-lemma.gobra index 4abb48e83..ae01a1280 100644 --- a/router/widen-lemma.gobra +++ b/router/widen-lemma.gobra @@ -136,15 +136,16 @@ func absPktWidenLemma(raw []byte, length int) { seg1Len := int(metaHdr.SegLen[0]) seg2Len := int(metaHdr.SegLen[1]) seg3Len := int(metaHdr.SegLen[2]) - segLen := scion.LengthOfCurrSeg(currHfIdx, seg1Len, seg2Len, seg3Len) - prevSegLen := scion.LengthOfPrevSeg(currHfIdx, seg1Len, seg2Len, seg3Len) - numINF := scion.NumInfoFields(seg1Len, seg2Len, seg3Len) + segs := io.CombineSegLens(seg1Len, seg2Len, seg3Len) + segLen := segs.LengthOfCurrSeg(currHfIdx) + prevSegLen := segs.LengthOfPrevSeg(currHfIdx) + numINF := segs.NumInfoFields() offset := scion.HopFieldOffset(numINF, prevSegLen, headerOffsetWithMetaLen) scion.WidenCurrSeg(raw, offset, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen, 0, length) - scion.WidenLeftSeg(raw, currInfIdx + 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, 0, length) - scion.WidenMidSeg(raw, currInfIdx + 2, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, 0, length) - scion.WidenRightSeg(raw, currInfIdx - 1, seg1Len, seg2Len, seg3Len, headerOffsetWithMetaLen, 0, length) + scion.WidenLeftSeg(raw, currInfIdx + 1, segs, headerOffsetWithMetaLen, 0, length) + scion.WidenMidSeg(raw, currInfIdx + 2, segs, headerOffsetWithMetaLen, 0, length) + scion.WidenRightSeg(raw, currInfIdx - 1, segs, headerOffsetWithMetaLen, 0, length) assert reveal absPkt(raw) == reveal absPkt(raw[:length]) } diff --git a/verification/io/io_spec_definitions.gobra b/verification/io/io_spec_definitions.gobra index 71eb1b5ed..9f78b8969 100644 --- a/verification/io/io_spec_definitions.gobra +++ b/verification/io/io_spec_definitions.gobra @@ -108,9 +108,6 @@ pure func (s SegLens) Valid() bool { } ghost -requires seg1Len > 0 -requires seg2Len >= 0 -requires seg3Len >= 0 decreases pure func CombineSegLens(seg1Len int, seg2Len int, seg3Len int) SegLens { return SegLens_ { @@ -124,4 +121,24 @@ ghost decreases pure func (s SegLens) NumInfoFields() int { return s.Seg3Len > 0 ? 3 : (s.Seg2Len > 0 ? 2 : (s.Seg1Len > 0 ? 1 : 0)) +} + +ghost +decreases +pure func (s SegLens) TotalHops() int { + return s.Seg1Len + s.Seg2Len + s.Seg3Len +} + +ghost +decreases +pure func (s SegLens) LengthOfCurrSeg(currHF int) int { + return s.Seg1Len > currHF ? s.Seg1Len : ((s.Seg1Len + s.Seg2Len) > currHF ? s.Seg2Len : s.Seg3Len) +} + +ghost +requires 0 <= currHF +ensures res <= currHF +decreases +pure func (s SegLens) LengthOfPrevSeg(currHF int) (res int) { + return s.Seg1Len > currHF ? 0 : ((s.Seg1Len + s.Seg2Len) > currHF ? s.Seg1Len : s.Seg1Len + s.Seg2Len) } \ No newline at end of file From aa0a4724fa89f15f101a0624663d71dfbe26eae6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Fri, 14 Jun 2024 11:24:25 -0400 Subject: [PATCH 46/57] make Len impure and add LenSpec (#357) --- pkg/slayers/path/empty/empty.go | 15 +++-- pkg/slayers/path/empty/empty_spec.gobra | 7 +++ pkg/slayers/path/epic/epic.go | 16 ++---- pkg/slayers/path/epic/epic_spec.gobra | 13 +++++ pkg/slayers/path/epic/epic_spec_test.gobra | 19 +------ pkg/slayers/path/onehop/onehop.go | 5 +- pkg/slayers/path/onehop/onehop_spec.gobra | 9 ++- pkg/slayers/path/path.go | 65 ++++++++++++---------- pkg/slayers/path/path_spec.gobra | 9 +++ pkg/slayers/path/scion/decoded.go | 4 +- pkg/slayers/path/scion/decoded_spec.gobra | 18 ++++-- pkg/slayers/path/scion/raw_spec.gobra | 15 +++-- pkg/slayers/scion.go | 8 +-- pkg/slayers/scion_spec.gobra | 4 +- 14 files changed, 119 insertions(+), 88 deletions(-) diff --git a/pkg/slayers/path/empty/empty.go b/pkg/slayers/path/empty/empty.go index 7151b024e..73e92dc15 100644 --- a/pkg/slayers/path/empty/empty.go +++ b/pkg/slayers/path/empty/empty.go @@ -71,29 +71,28 @@ func (o Path) DecodeFromBytes(r []byte) (e error) { // @ ensures e == nil // @ decreases -func (o Path) SerializeTo(b []byte /*@, underlyingBuf []byte @*/) (e error) { +func (o Path) SerializeTo(b []byte /*@, ub []byte @*/) (e error) { return nil } -// @ requires o.Mem(underlyingBuf) +// @ requires o.Mem(ub) // @ ensures p == o -// @ ensures p.Mem(underlyingBuf) +// @ ensures p.Mem(ub) // @ ensures e == nil // @ decreases -func (o Path) Reverse( /*@ underlyingBuf []byte @*/ ) (p path.Path, e error) { +func (o Path) Reverse( /*@ ub []byte @*/ ) (p path.Path, e error) { return o, nil } -// @ pure -// @ ensures 0 <= r +// @ ensures r == o.LenSpec(ub) // @ decreases -func (o Path) Len( /*@ underlyingBuf []byte @*/ ) (r int) { +func (o Path) Len( /*@ ub []byte @*/ ) (r int) { return PathLen } // @ pure // @ ensures r == PathType // @ decreases -func (o Path) Type( /*@ underlyingBuf []byte @*/ ) (r path.Type) { +func (o Path) Type( /*@ ub []byte @*/ ) (r path.Type) { return PathType } diff --git a/pkg/slayers/path/empty/empty_spec.gobra b/pkg/slayers/path/empty/empty_spec.gobra index 416c4292d..044a671fa 100644 --- a/pkg/slayers/path/empty/empty_spec.gobra +++ b/pkg/slayers/path/empty/empty_spec.gobra @@ -41,6 +41,13 @@ func (p Path) IsValidResultOfDecoding(b []byte, err error) (res bool) { return true } +ghost +pure +decreases +func (p Path) LenSpec(ghost ub []byte) (l int) { + return PathLen +} + Path implements path.Path // Definitions to allow *Path to be treated as a path.Path diff --git a/pkg/slayers/path/epic/epic.go b/pkg/slayers/path/epic/epic.go index 7c14a9c96..b9400a1ca 100644 --- a/pkg/slayers/path/epic/epic.go +++ b/pkg/slayers/path/epic/epic.go @@ -85,7 +85,7 @@ type Path struct { // @ preserves sl.Bytes(b, 0, len(b)) // @ ensures r != nil ==> r.ErrorMem() // @ ensures !old(p.hasScionPath(ubuf)) ==> r != nil -// @ ensures len(b) < old(p.Len(ubuf)) ==> r != nil +// @ ensures len(b) < old(p.LenSpec(ubuf)) ==> r != nil // @ ensures old(p.getPHVFLen(ubuf)) != HVFLen ==> r != nil // @ ensures old(p.getLHVFLen(ubuf)) != HVFLen ==> r != nil // @ decreases @@ -211,20 +211,16 @@ func (p *Path) Reverse( /*@ ghost ubuf []byte @*/ ) (ret path.Path, r error) { } // Len returns the length of the EPIC path in bytes. -// (VerifiedSCION) This is currently not checked here because Gobra -// does not support statements in pure functions. The proof obligations -// for this method are discharged in function `len_test` in the file `epic_spec_test.gobra`. -// @ trusted -// @ pure -// @ requires acc(p.Mem(ubuf), _) -// @ ensures !p.hasScionPath(ubuf) ==> l == MetadataLen -// @ ensures p.hasScionPath(ubuf) ==> l == MetadataLen + unfolding acc(p.Mem(ubuf), _) in p.ScionPath.Len(ubuf[MetadataLen:]) +// @ preserves acc(p.Mem(ubuf), R50) +// @ ensures l == p.LenSpec(ubuf) // @ decreases func (p *Path) Len( /*@ ghost ubuf []byte @*/ ) (l int) { + // @ unfold acc(p.Mem(ubuf), R50) + // @ defer fold acc(p.Mem(ubuf), R50) if p.ScionPath == nil { return MetadataLen } - return MetadataLen + p.ScionPath.Len( /*@ ubuf @*/ ) + return MetadataLen + p.ScionPath.Len( /*@ ubuf[MetadataLen:] @*/ ) } // Type returns the EPIC path type identifier. diff --git a/pkg/slayers/path/epic/epic_spec.gobra b/pkg/slayers/path/epic/epic_spec.gobra index aeabff882..c87863bb4 100644 --- a/pkg/slayers/path/epic/epic_spec.gobra +++ b/pkg/slayers/path/epic/epic_spec.gobra @@ -18,6 +18,8 @@ package epic import ( "github.com/scionproto/scion/pkg/slayers/path" + + . "github.com/scionproto/scion/verification/utils/definitions" sl "github.com/scionproto/scion/verification/utils/slices" ) @@ -35,6 +37,17 @@ pred (p *Path) Mem(ubuf []byte) { p.ScionPath.Mem(ubuf[MetadataLen:]) } +ghost +pure +requires acc(p.Mem(ub), _) +decreases +func (p *Path) LenSpec(ghost ub []byte) (l int) { + return unfolding acc(p.Mem(ub), _) in + (p.ScionPath == nil ? + MetadataLen : + MetadataLen + p.ScionPath.LenSpec(ub[MetadataLen:])) +} + ghost requires p.Mem(buf) ensures p.NonInitMem() diff --git a/pkg/slayers/path/epic/epic_spec_test.gobra b/pkg/slayers/path/epic/epic_spec_test.gobra index a53b22eb0..ac1d32a33 100644 --- a/pkg/slayers/path/epic/epic_spec_test.gobra +++ b/pkg/slayers/path/epic/epic_spec_test.gobra @@ -24,21 +24,4 @@ func testAllocateNonInitMem() { } // A test folding Mem(ubuf) is skipped here, as one can just call DesugarFromBytes to get the -// desired predicate - -// (*Path).Len() cannot be currently be verified because Gobra does not allow statements in -// pure functions, but Len must be pure. -// This method contains the same exact body and checks that the contract holds. -ghost -preserves acc(p.Mem(ubuf), _) -ensures !p.hasScionPath(ubuf) ==> l == MetadataLen -ensures p.hasScionPath(ubuf) ==> l == MetadataLen + unfolding acc(p.Mem(ubuf), _) in p.ScionPath.Len(ubuf[MetadataLen:]) -decreases -func len_test(p *Path, ubuf []byte) (l int) { - unfold acc(p.Mem(ubuf), _) // would need to be 'unfolding' in the pure version - if p.ScionPath == nil { - return MetadataLen - } - unfold acc(p.ScionPath.Mem(ubuf[MetadataLen:]), _) // would need to be 'unfolding' in the pure version - return MetadataLen + p.ScionPath.Len(ubuf[MetadataLen:]) -} +// desired predicate. diff --git a/pkg/slayers/path/onehop/onehop.go b/pkg/slayers/path/onehop/onehop.go index 8ebee355e..868147f76 100644 --- a/pkg/slayers/path/onehop/onehop.go +++ b/pkg/slayers/path/onehop/onehop.go @@ -105,7 +105,7 @@ func (o *Path) DecodeFromBytes(data []byte) (r error) { // @ preserves sl.Bytes(b, 0, len(b)) // @ ensures (len(b) >= PathLen) == (err == nil) // @ ensures err != nil ==> err.ErrorMem() -// @ ensures err == nil ==> o.Len(ubuf) <= len(b) +// @ ensures err == nil ==> o.LenSpec(ubuf) <= len(b) // @ decreases func (o *Path) SerializeTo(b []byte /*@, ubuf []byte @*/) (err error) { if len(b) < PathLen { @@ -219,8 +219,7 @@ func (o *Path) Reverse( /*@ ghost ubuf []byte @*/ ) (p path.Path, err error) { return sp.Reverse( /*@ ubuf @*/ ) } -// @ pure -// @ ensures l == PathLen +// @ ensures l == o.LenSpec(ubuf) // @ decreases func (o *Path) Len( /*@ ghost ubuf []byte @*/ ) (l int) { return PathLen diff --git a/pkg/slayers/path/onehop/onehop_spec.gobra b/pkg/slayers/path/onehop/onehop_spec.gobra index d9bcb54fd..84d42dc75 100644 --- a/pkg/slayers/path/onehop/onehop_spec.gobra +++ b/pkg/slayers/path/onehop/onehop_spec.gobra @@ -48,7 +48,7 @@ requires acc(o.Mem(ub), _) ensures b decreases pure func (o *Path) InferSizeUb(ghost ub []byte) (b bool) { - return unfolding acc(o.Mem(ub), _) in o.Len(ub) <= len(ub) + return unfolding acc(o.Mem(ub), _) in o.LenSpec(ub) <= len(ub) } ghost @@ -58,4 +58,11 @@ func (p *Path) IsValidResultOfDecoding(b []byte, err error) (res bool) { return true } +ghost +pure +decreases +func (p *Path) LenSpec(ghost ub []byte) (l int) { + return PathLen +} + (*Path) implements path.Path \ No newline at end of file diff --git a/pkg/slayers/path/path.go b/pkg/slayers/path/path.go index c23a25215..951af3ab6 100644 --- a/pkg/slayers/path/path.go +++ b/pkg/slayers/path/path.go @@ -64,19 +64,19 @@ func (t Type) String() string { // Path is the path contained in the SCION header. type Path interface { // (VerifiedSCION) Must hold for every valid Path. - //@ pred Mem(underlyingBuf []byte) + //@ pred Mem(ub []byte) // (VerifiedSCION) Must imply the resources required to initialize // a new instance of a predicate. //@ pred NonInitMem() // SerializeTo serializes the path into the provided buffer. // (VerifiedSCION) There are implementations of this interface that modify the underlying // structure when serializing (e.g. scion.Raw) - //@ preserves sl.Bytes(underlyingBuf, 0, len(underlyingBuf)) - //@ preserves acc(Mem(underlyingBuf), R1) + //@ preserves sl.Bytes(ub, 0, len(ub)) + //@ preserves acc(Mem(ub), R1) //@ preserves sl.Bytes(b, 0, len(b)) //@ ensures e != nil ==> e.ErrorMem() //@ decreases - SerializeTo(b []byte /*@, ghost underlyingBuf []byte @*/) (e error) + SerializeTo(b []byte /*@, ghost ub []byte @*/) (e error) // DecodesFromBytes decodes the path from the provided buffer. // (VerifiedSCION) There are implementations of this interface (e.g., scion.Raw) that // store b and use it as internal data. @@ -96,29 +96,35 @@ type Path interface { //@ IsValidResultOfDecoding(b []byte, err error) (res bool) // Reverse reverses a path such that it can be used in the reversed direction. // XXX(shitz): This method should possibly be moved to a higher-level path manipulation package. - //@ requires Mem(underlyingBuf) - //@ preserves sl.Bytes(underlyingBuf, 0, len(underlyingBuf)) + //@ requires Mem(ub) + //@ preserves sl.Bytes(ub, 0, len(ub)) //@ ensures e == nil ==> p != nil - //@ ensures e == nil ==> p.Mem(underlyingBuf) + //@ ensures e == nil ==> p.Mem(ub) //@ ensures e != nil ==> e.ErrorMem() //@ decreases - Reverse( /*@ ghost underlyingBuf []byte @*/ ) (p Path, e error) - // Len returns the length of a path in bytes. + Reverse( /*@ ghost ub []byte @*/ ) (p Path, e error) + //@ ghost //@ pure - //@ requires acc(Mem(underlyingBuf), _) - //@ ensures l >= 0 + //@ requires acc(Mem(ub), _) + //@ ensures 0 <= l + //@ decreases + //@ LenSpec(ghost ub []byte) (l int) + + // Len returns the length of a path in bytes. + //@ preserves acc(Mem(ub), R50) + //@ ensures l == LenSpec(ub) //@ decreases - Len( /*@ ghost underlyingBuf []byte @*/ ) (l int) + Len( /*@ ghost ub []byte @*/ ) (l int) // Type returns the type of a path. //@ pure - //@ requires acc(Mem(underlyingBuf), _) + //@ requires acc(Mem(ub), _) //@ decreases - Type( /*@ ghost underlyingBuf []byte @*/ ) Type + Type( /*@ ghost ub []byte @*/ ) Type //@ ghost - //@ requires Mem(underlyingBuf) + //@ requires Mem(ub) //@ ensures NonInitMem() //@ decreases - //@ DowngradePerm(ghost underlyingBuf []byte) + //@ DowngradePerm(ghost ub []byte) } type metadata struct { @@ -209,18 +215,18 @@ type rawPath struct { pathType Type } -// @ preserves acc(p.Mem(underlyingBuf), R10) -// @ preserves acc(sl.Bytes(underlyingBuf, 0, len(underlyingBuf)), R10) +// @ preserves acc(p.Mem(ub), R10) +// @ preserves acc(sl.Bytes(ub, 0, len(ub)), R10) // @ preserves sl.Bytes(b, 0, len(b)) // @ ensures e == nil // @ decreases -func (p *rawPath) SerializeTo(b []byte /*@, ghost underlyingBuf []byte @*/) (e error) { +func (p *rawPath) SerializeTo(b []byte /*@, ghost ub []byte @*/) (e error) { //@ unfold sl.Bytes(b, 0, len(b)) - //@ unfold acc(p.Mem(underlyingBuf), R10) + //@ unfold acc(p.Mem(ub), R10) //@ unfold acc(sl.Bytes(p.raw, 0, len(p.raw)), R11) copy(b, p.raw /*@, R11 @*/) //@ fold acc(sl.Bytes(p.raw, 0, len(p.raw)), R11) - //@ fold acc(p.Mem(underlyingBuf), R10) + //@ fold acc(p.Mem(ub), R10) //@ fold sl.Bytes(b, 0, len(b)) return nil } @@ -239,21 +245,20 @@ func (p *rawPath) DecodeFromBytes(b []byte) (e error) { // @ ensures e != nil && e.ErrorMem() // @ decreases -func (p *rawPath) Reverse( /*@ ghost underlyingBuf []byte @*/ ) (r Path, e error) { +func (p *rawPath) Reverse( /*@ ghost ub []byte @*/ ) (r Path, e error) { return nil, serrors.New("not supported") } -// @ pure -// @ requires acc(p.Mem(underlyingBuf), _) -// @ ensures l >= 0 +// @ preserves acc(p.Mem(ub), R50) +// @ ensures l == p.LenSpec(ub) // @ decreases -func (p *rawPath) Len( /*@ ghost underlyingBuf []byte @*/ ) (l int) { - return /*@ unfolding acc(p.Mem(underlyingBuf), _) in @*/ len(p.raw) +func (p *rawPath) Len( /*@ ghost ub []byte @*/ ) (l int) { + return /*@ unfolding acc(p.Mem(ub), R50) in @*/ len(p.raw) } // @ pure -// @ requires acc(p.Mem(underlyingBuf), _) +// @ requires acc(p.Mem(ub), _) // @ decreases -func (p *rawPath) Type( /*@ ghost underlyingBuf []byte @*/ ) Type { - return /*@ unfolding acc(p.Mem(underlyingBuf), _) in @*/ p.pathType +func (p *rawPath) Type( /*@ ghost ub []byte @*/ ) Type { + return /*@ unfolding acc(p.Mem(ub), _) in @*/ p.pathType } diff --git a/pkg/slayers/path/path_spec.gobra b/pkg/slayers/path/path_spec.gobra index 060edeb82..0963b1cd0 100644 --- a/pkg/slayers/path/path_spec.gobra +++ b/pkg/slayers/path/path_spec.gobra @@ -44,6 +44,15 @@ func (p *rawPath) IsValidResultOfDecoding(b []byte, err error) (res bool) { return true } +ghost +pure +requires acc(p.Mem(ub), _) +ensures 0 <= l +decreases +func (p *rawPath) LenSpec(ghost ub []byte) (l int) { + return unfolding acc(p.Mem(ub), _) in len(p.raw) +} + (*rawPath) implements Path /** End of rawPath spec **/ diff --git a/pkg/slayers/path/scion/decoded.go b/pkg/slayers/path/scion/decoded.go index 0d365765a..6c5b8cca0 100644 --- a/pkg/slayers/path/scion/decoded.go +++ b/pkg/slayers/path/scion/decoded.go @@ -153,7 +153,7 @@ func (s *Decoded) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { //@ invariant acc(s.Mem(ubuf), R1) //@ invariant sl.Bytes(ubuf, 0, len(ubuf)) //@ invariant b !== ubuf ==> sl.Bytes(b, 0, len(b)) - //@ invariant s.Len(ubuf) <= len(b) + //@ invariant s.LenSpec(ubuf) <= len(b) //@ invariant 0 <= i && i <= s.getLenInfoFields(ubuf) //@ invariant offset == MetaLen + i * path.InfoLen //@ invariant MetaLen + s.getLenInfoFields(ubuf) * path.InfoLen + s.getLenHopFields(ubuf) * path.HopLen <= len(b) @@ -180,7 +180,7 @@ func (s *Decoded) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { //@ invariant acc(s.Mem(ubuf), R1) //@ invariant sl.Bytes(ubuf, 0, len(ubuf)) //@ invariant b !== ubuf ==> sl.Bytes(b, 0, len(b)) - //@ invariant s.Len(ubuf) <= len(b) + //@ invariant s.LenSpec(ubuf) <= len(b) //@ invariant 0 <= i && i <= s.getLenHopFields(ubuf) //@ invariant offset == MetaLen + s.getLenInfoFields(ubuf) * path.InfoLen + i * path.HopLen //@ invariant MetaLen + s.getLenInfoFields(ubuf) * path.InfoLen + s.getLenHopFields(ubuf) * path.HopLen <= len(b) diff --git a/pkg/slayers/path/scion/decoded_spec.gobra b/pkg/slayers/path/scion/decoded_spec.gobra index c69d1355a..6c15ef402 100644 --- a/pkg/slayers/path/scion/decoded_spec.gobra +++ b/pkg/slayers/path/scion/decoded_spec.gobra @@ -58,16 +58,22 @@ pred (d *Decoded) Mem(ubuf []byte) { * Unfortunately, Gobra does not fully support them yet, so we * introduced this method which acts as a wrapper. */ - // TODO: can this spec be simplified (by removing the access to d.Mem(...))? -pure -requires acc(d.Mem(ubuf), _) -ensures unfolding acc(d.Mem(ubuf), _) in l == d.Base.Len() -ensures l >= 0 +preserves acc(d.Mem(ubuf), R50) +ensures l == d.LenSpec(ubuf) decreases func (d *Decoded) Len(ghost ubuf []byte) (l int) { return unfolding acc(d.Mem(ubuf), _) in d.Base.Len() } +ghost +pure +requires acc(d.Mem(ub), _) +ensures unfolding acc(d.Mem(ub), _) in l == d.Base.Len() +decreases +func (d *Decoded) LenSpec(ghost ub []byte) (l int) { + return unfolding acc(d.Mem(ub), _) in d.Base.Len() +} + /** * This method is not part of the original SCION codebase. * Instead, `Len` was defined in `*Decoded` via embedded structs. @@ -106,7 +112,7 @@ func (d *Decoded) IsXover(ghost ubuf []byte) bool { requires d.Mem(ubuf) ensures e == nil ==> ( d.Mem(ubuf) && - d.Len(ubuf) == old(d.Len(ubuf)) && + d.LenSpec(ubuf) == old(d.LenSpec(ubuf)) && (old(d.ValidCurrIdxs(ubuf)) ==> d.ValidCurrIdxs(ubuf))) ensures e != nil ==> d.NonInitMem() && e.ErrorMem() decreases diff --git a/pkg/slayers/path/scion/raw_spec.gobra b/pkg/slayers/path/scion/raw_spec.gobra index 03c5467a9..4f0b72711 100644 --- a/pkg/slayers/path/scion/raw_spec.gobra +++ b/pkg/slayers/path/scion/raw_spec.gobra @@ -72,15 +72,22 @@ func (s *Raw) Type(ghost buf []byte) (t path.Type) { * Unfortunately, Gobra does not fully support them yet, so we * introduced this wrapper method which acts as a wrapper. */ -pure -requires acc(s.Mem(buf), _) -ensures unfolding acc(s.Mem(buf), _) in l == s.Base.Len() -ensures l >= 0 +preserves acc(s.Mem(buf), R50) +ensures l == s.LenSpec(buf) decreases func (s *Raw) Len(ghost buf []byte) (l int) { return unfolding acc(s.Mem(buf), _) in s.Base.Len() } +ghost +pure +requires acc(s.Mem(ub), _) +ensures unfolding acc(s.Mem(ub), _) in l == s.Base.Len() +decreases +func (s *Raw) LenSpec(ghost ub []byte) (l int) { + return unfolding acc(s.Mem(ub), _) in s.Base.Len() +} + /** * This method is not part of the original SCION codebase. * Instead, `IsFirstHopAfterXover` was defined in `*Base` via embedded structs. diff --git a/pkg/slayers/scion.go b/pkg/slayers/scion.go index 8d2085403..d3a7ad19d 100644 --- a/pkg/slayers/scion.go +++ b/pkg/slayers/scion.go @@ -224,9 +224,9 @@ func (s *SCION) NetworkFlow() (res gopacket.Flow) { // TODO: hide internal spec details // @ ensures e == nil && s.HasOneHopPath(ubuf) ==> // @ len(b.UBuf()) == old(len(b.UBuf())) + unfolding acc(s.Mem(ubuf), R55) in -// @ (CmnHdrLen + s.AddrHdrLenSpecInternal() + s.Path.Len(ubuf[CmnHdrLen+s.AddrHdrLenSpecInternal() : s.HdrLen*LineLen])) +// @ (CmnHdrLen + s.AddrHdrLenSpecInternal() + s.Path.LenSpec(ubuf[CmnHdrLen+s.AddrHdrLenSpecInternal() : s.HdrLen*LineLen])) // @ ensures e == nil && s.HasOneHopPath(ubuf) ==> -// @ (unfolding acc(s.Mem(ubuf), R55) in CmnHdrLen + s.AddrHdrLenSpecInternal() + s.Path.Len(ubuf[CmnHdrLen+s.AddrHdrLenSpecInternal() : s.HdrLen*LineLen])) <= len(ubuf) +// @ (unfolding acc(s.Mem(ubuf), R55) in CmnHdrLen + s.AddrHdrLenSpecInternal() + s.Path.LenSpec(ubuf[CmnHdrLen+s.AddrHdrLenSpecInternal() : s.HdrLen*LineLen])) <= len(ubuf) // @ ensures e != nil ==> e.ErrorMem() // @ decreases func (s *SCION) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions /* @ , ghost ubuf []byte @*/) (e error) { @@ -423,8 +423,8 @@ func (s *SCION) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res er } // @ ghost if typeOf(s.Path) == type[*onehop.Path] { // @ s.Path.(*onehop.Path).InferSizeUb(data[offset : offset+pathLen]) - // @ assert s.Path.Len(data[offset : offset+pathLen]) <= len(data[offset : offset+pathLen]) - // @ assert CmnHdrLen + s.AddrHdrLenSpecInternal() + s.Path.Len(data[offset : offset+pathLen]) <= len(data) + // @ assert s.Path.LenSpec(data[offset : offset+pathLen]) <= len(data[offset : offset+pathLen]) + // @ assert CmnHdrLen + s.AddrHdrLenSpecInternal() + s.Path.LenSpec(data[offset : offset+pathLen]) <= len(data) // @ } s.Contents = data[:hdrBytes] s.Payload = data[hdrBytes:] diff --git a/pkg/slayers/scion_spec.gobra b/pkg/slayers/scion_spec.gobra index ff6723c2b..cbda1441a 100644 --- a/pkg/slayers/scion_spec.gobra +++ b/pkg/slayers/scion_spec.gobra @@ -179,7 +179,7 @@ pred (s *SCION) Mem(ubuf []byte) { // end of path pool // helpful facts for other methods: // - for router::updateScionLayer: - (typeOf(s.Path) == type[*onehop.Path] ==> CmnHdrLen + s.AddrHdrLenSpecInternal() + s.Path.Len(ubuf[CmnHdrLen+s.AddrHdrLenSpecInternal() : s.HdrLen*LineLen]) <= len(ubuf)) + (typeOf(s.Path) == type[*onehop.Path] ==> CmnHdrLen + s.AddrHdrLenSpecInternal() + s.Path.LenSpec(ubuf[CmnHdrLen+s.AddrHdrLenSpecInternal() : s.HdrLen*LineLen]) <= len(ubuf)) } ghost @@ -721,6 +721,6 @@ decreases pure func (s *SCION) InferSizeOHP(ghost ub []byte) (b bool) { return unfolding acc(s.Mem(ub), _) in let pathSlice := ub[CmnHdrLen+s.AddrHdrLenSpecInternal() : s.HdrLen*LineLen] in - let pathLen := s.Path.Len(pathSlice) in + let pathLen := s.Path.LenSpec(pathSlice) in CmnHdrLen + s.AddrHdrLenSpecInternal() + pathLen <= len(ub) } \ No newline at end of file From c82183d2103fe6c8f6cab868af9d8dcd8a2313ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Mon, 17 Jun 2024 10:21:06 -0400 Subject: [PATCH 47/57] Format `info_hop_setter_lemmas.gobra` (#359) * fmt * fmt --- .../path/scion/info_hop_setter_lemmas.gobra | 478 +++++++++--------- 1 file changed, 239 insertions(+), 239 deletions(-) diff --git a/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra b/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra index 3c00edd4a..09e421953 100644 --- a/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra +++ b/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra @@ -17,10 +17,10 @@ package scion import ( - "github.com/scionproto/scion/pkg/slayers/path" - . "verification/utils/definitions" - sl "verification/utils/slices" - "verification/io" + "github.com/scionproto/scion/pkg/slayers/path" + . "verification/utils/definitions" + sl "verification/utils/slices" + "verification/io" ) /*** This file contains helpful lemmas for proving SetInfoField and SetHopfield. ***/ @@ -42,10 +42,10 @@ requires 0 <= currInfIdx requires path.InfoFieldOffset(currInfIdx, MetaLen) + path.InfoLen <= len(raw) decreases pure func InfofieldByteSlice(raw []byte, currInfIdx int) ([]byte) { - return let infOffset := currInfIdx == 4 ? - path.InfoFieldOffset(0, MetaLen) : - path.InfoFieldOffset(currInfIdx, MetaLen) in - raw[infOffset:infOffset+path.InfoLen] + return let infOffset := currInfIdx == 4 ? + path.InfoFieldOffset(0, MetaLen) : + path.InfoFieldOffset(currInfIdx, MetaLen) in + raw[infOffset:infOffset+path.InfoLen] } // HopfieldsStartIdx returns index of the first byte of the hopfields of a segment @@ -58,11 +58,11 @@ requires segs.Valid() requires 0 <= currInfIdx decreases pure func HopfieldsStartIdx(currInfIdx int, segs io.SegLens) int { - return let numInf := segs.NumInfoFields() in - let infOffset := path.InfoFieldOffset(numInf, MetaLen) in - (currInfIdx == 0 || currInfIdx == 4) ? infOffset : - currInfIdx == 1 ? infOffset+segs.Seg1Len*path.HopLen : - infOffset+(segs.Seg1Len+segs.Seg2Len)*path.HopLen + return let numInf := segs.NumInfoFields() in + let infOffset := path.InfoFieldOffset(numInf, MetaLen) in + (currInfIdx == 0 || currInfIdx == 4) ? infOffset : + currInfIdx == 1 ? infOffset+segs.Seg1Len*path.HopLen : + infOffset+(segs.Seg1Len+segs.Seg2Len)*path.HopLen } // HopfieldsStartIdx returns index of the last byte of the hopfields of a segment @@ -75,11 +75,11 @@ requires segs.Valid() requires 0 <= currInfIdx decreases pure func HopfieldsEndIdx(currInfIdx int, segs io.SegLens) int { - return let numInf := segs.NumInfoFields() in - let infOffset := path.InfoFieldOffset(numInf, MetaLen) in - (currInfIdx == 0 || currInfIdx == 4) ? infOffset+segs.Seg1Len*path.HopLen : - currInfIdx == 1 ? infOffset+(segs.Seg1Len+segs.Seg2Len)*path.HopLen : - infOffset+(segs.Seg1Len+segs.Seg2Len+segs.Seg3Len)*path.HopLen + return let numInf := segs.NumInfoFields() in + let infOffset := path.InfoFieldOffset(numInf, MetaLen) in + (currInfIdx == 0 || currInfIdx == 4) ? infOffset+segs.Seg1Len*path.HopLen : + currInfIdx == 1 ? infOffset+(segs.Seg1Len+segs.Seg2Len)*path.HopLen : + infOffset+(segs.Seg1Len+segs.Seg2Len+segs.Seg3Len)*path.HopLen } // HopfieldsStartIdx returns returns the byte slice of the hopfields of a segment @@ -93,11 +93,11 @@ requires 0 <= currInfIdx requires PktLen(segs, MetaLen) <= len(raw) decreases pure func HopfieldsByteSlice(raw []byte, currInfIdx int, segs io.SegLens) ([]byte) { - return let numInf := segs.NumInfoFields() in - let infOffset := path.InfoFieldOffset(numInf, MetaLen) in - let start := HopfieldsStartIdx(currInfIdx, segs) in - let end := HopfieldsEndIdx(currInfIdx, segs) in - raw[start:end] + return let numInf := segs.NumInfoFields() in + let infOffset := path.InfoFieldOffset(numInf, MetaLen) in + let start := HopfieldsStartIdx(currInfIdx, segs) in + let end := HopfieldsEndIdx(currInfIdx, segs) in + raw[start:end] } // SliceBytesIntoSegments splits the raw bytes of a packet into its hopfield segments @@ -113,15 +113,15 @@ ensures acc(sl.Bytes(HopfieldsByteSlice(raw, 2, segs), 0, segs.Seg3Len*path.Hop ensures acc(sl.Bytes(raw[HopfieldsEndIdx(2, segs):], 0, len(raw[HopfieldsEndIdx(2, segs):])), p) decreases func SliceBytesIntoSegments(raw []byte, segs io.SegLens, p perm) { - sl.SplitByIndex_Bytes(raw, 0, len(raw), HopfieldsStartIdx(0, segs), p) - sl.SplitByIndex_Bytes(raw, HopfieldsStartIdx(0, segs), len(raw), HopfieldsEndIdx(0, segs), p) - sl.SplitByIndex_Bytes(raw, HopfieldsStartIdx(1, segs), len(raw), HopfieldsEndIdx(1, segs), p) - sl.SplitByIndex_Bytes(raw, HopfieldsStartIdx(2, segs), len(raw), HopfieldsEndIdx(2, segs), p) - sl.Reslice_Bytes(raw, 0, HopfieldsStartIdx(0, segs), p) - sl.Reslice_Bytes(raw, HopfieldsStartIdx(0, segs), HopfieldsEndIdx(0, segs), p) - sl.Reslice_Bytes(raw, HopfieldsStartIdx(1, segs), HopfieldsEndIdx(1, segs), p) - sl.Reslice_Bytes(raw, HopfieldsStartIdx(2, segs), HopfieldsEndIdx(2, segs), p) - sl.Reslice_Bytes(raw, HopfieldsEndIdx(2, segs), len(raw), p) + sl.SplitByIndex_Bytes(raw, 0, len(raw), HopfieldsStartIdx(0, segs), p) + sl.SplitByIndex_Bytes(raw, HopfieldsStartIdx(0, segs), len(raw), HopfieldsEndIdx(0, segs), p) + sl.SplitByIndex_Bytes(raw, HopfieldsStartIdx(1, segs), len(raw), HopfieldsEndIdx(1, segs), p) + sl.SplitByIndex_Bytes(raw, HopfieldsStartIdx(2, segs), len(raw), HopfieldsEndIdx(2, segs), p) + sl.Reslice_Bytes(raw, 0, HopfieldsStartIdx(0, segs), p) + sl.Reslice_Bytes(raw, HopfieldsStartIdx(0, segs), HopfieldsEndIdx(0, segs), p) + sl.Reslice_Bytes(raw, HopfieldsStartIdx(1, segs), HopfieldsEndIdx(1, segs), p) + sl.Reslice_Bytes(raw, HopfieldsStartIdx(2, segs), HopfieldsEndIdx(2, segs), p) + sl.Reslice_Bytes(raw, HopfieldsEndIdx(2, segs), len(raw), p) } // CombineBytesFromSegments combines the three hopfield segments of a packet into a single slice of bytes. @@ -137,15 +137,15 @@ requires acc(sl.Bytes(raw[HopfieldsEndIdx(2, segs):], 0, len(raw[HopfieldsEndIdx ensures acc(sl.Bytes(raw, 0, len(raw)), p) decreases func CombineBytesFromSegments(raw []byte, segs io.SegLens, p perm) { - sl.Unslice_Bytes(raw, HopfieldsEndIdx(2, segs), len(raw), p) - sl.Unslice_Bytes(raw, HopfieldsStartIdx(2, segs), HopfieldsEndIdx(2, segs), p) - sl.Unslice_Bytes(raw, HopfieldsStartIdx(1, segs), HopfieldsEndIdx(1, segs), p) - sl.Unslice_Bytes(raw, HopfieldsStartIdx(0, segs), HopfieldsEndIdx(0, segs), p) - sl.Unslice_Bytes(raw, 0, HopfieldsStartIdx(0, segs), p) - sl.CombineAtIndex_Bytes(raw, HopfieldsStartIdx(2, segs), len(raw), HopfieldsEndIdx(2, segs), p) - sl.CombineAtIndex_Bytes(raw, HopfieldsStartIdx(1, segs), len(raw), HopfieldsEndIdx(1, segs), p) - sl.CombineAtIndex_Bytes(raw, HopfieldsStartIdx(0, segs), len(raw), HopfieldsEndIdx(0, segs), p) - sl.CombineAtIndex_Bytes(raw, 0, len(raw), HopfieldsStartIdx(0, segs), p) + sl.Unslice_Bytes(raw, HopfieldsEndIdx(2, segs), len(raw), p) + sl.Unslice_Bytes(raw, HopfieldsStartIdx(2, segs), HopfieldsEndIdx(2, segs), p) + sl.Unslice_Bytes(raw, HopfieldsStartIdx(1, segs), HopfieldsEndIdx(1, segs), p) + sl.Unslice_Bytes(raw, HopfieldsStartIdx(0, segs), HopfieldsEndIdx(0, segs), p) + sl.Unslice_Bytes(raw, 0, HopfieldsStartIdx(0, segs), p) + sl.CombineAtIndex_Bytes(raw, HopfieldsStartIdx(2, segs), len(raw), HopfieldsEndIdx(2, segs), p) + sl.CombineAtIndex_Bytes(raw, HopfieldsStartIdx(1, segs), len(raw), HopfieldsEndIdx(1, segs), p) + sl.CombineAtIndex_Bytes(raw, HopfieldsStartIdx(0, segs), len(raw), HopfieldsEndIdx(0, segs), p) + sl.CombineAtIndex_Bytes(raw, 0, len(raw), HopfieldsStartIdx(0, segs), p) } // SliceBytesIntoInfoFields splits the raw bytes of a packet into its infofields @@ -162,22 +162,22 @@ ensures 2 < numInf ==> acc(sl.Bytes(InfofieldByteSlice(raw, 2), 0, path.InfoLen ensures acc(sl.Bytes(raw[HopfieldsStartIdx(0, segs):], 0, len(raw[HopfieldsStartIdx(0, segs):])), p) decreases func SliceBytesIntoInfoFields(raw []byte, numInf int, segs io.SegLens, p perm) { - sl.SplitByIndex_Bytes(raw, 0, len(raw), MetaLen, p) - sl.SplitByIndex_Bytes(raw, MetaLen, len(raw), path.InfoFieldOffset(1, MetaLen), p) - sl.Reslice_Bytes(raw, 0, MetaLen, p) - sl.Reslice_Bytes(raw, MetaLen, path.InfoFieldOffset(1, MetaLen), p) - if(numInf > 1) { - sl.SplitByIndex_Bytes(raw, path.InfoFieldOffset(1, MetaLen), len(raw), - path.InfoFieldOffset(2, MetaLen), p) - sl.Reslice_Bytes(raw, path.InfoFieldOffset(1, MetaLen), - path.InfoFieldOffset(2, MetaLen), p) - } - if(numInf > 2) { - sl.SplitByIndex_Bytes(raw, path.InfoFieldOffset(2, MetaLen), len(raw), - HopfieldsStartIdx(0, segs), p) - sl.Reslice_Bytes(raw, path.InfoFieldOffset(2, MetaLen), HopfieldsStartIdx(0, segs), p) - } - sl.Reslice_Bytes(raw, HopfieldsStartIdx(0, segs), len(raw), p) + sl.SplitByIndex_Bytes(raw, 0, len(raw), MetaLen, p) + sl.SplitByIndex_Bytes(raw, MetaLen, len(raw), path.InfoFieldOffset(1, MetaLen), p) + sl.Reslice_Bytes(raw, 0, MetaLen, p) + sl.Reslice_Bytes(raw, MetaLen, path.InfoFieldOffset(1, MetaLen), p) + if(numInf > 1) { + sl.SplitByIndex_Bytes(raw, path.InfoFieldOffset(1, MetaLen), len(raw), + path.InfoFieldOffset(2, MetaLen), p) + sl.Reslice_Bytes(raw, path.InfoFieldOffset(1, MetaLen), + path.InfoFieldOffset(2, MetaLen), p) + } + if(numInf > 2) { + sl.SplitByIndex_Bytes(raw, path.InfoFieldOffset(2, MetaLen), len(raw), + HopfieldsStartIdx(0, segs), p) + sl.Reslice_Bytes(raw, path.InfoFieldOffset(2, MetaLen), HopfieldsStartIdx(0, segs), p) + } + sl.Reslice_Bytes(raw, HopfieldsStartIdx(0, segs), len(raw), p) } // CombineBytesFromInfoFields combines the infofields of a packet into a single slice of bytes. @@ -194,22 +194,22 @@ requires acc(sl.Bytes(raw[HopfieldsStartIdx(0, segs):], 0, len(raw[HopfieldsStar ensures acc(sl.Bytes(raw, 0, len(raw)), p) decreases func CombineBytesFromInfoFields(raw []byte, numInf int, segs io.SegLens, p perm) { - sl.Unslice_Bytes(raw, HopfieldsStartIdx(0, segs), len(raw), p) - if(numInf > 2) { - sl.Unslice_Bytes(raw, path.InfoFieldOffset(2, MetaLen), HopfieldsStartIdx(0, segs), p) - sl.CombineAtIndex_Bytes(raw, path.InfoFieldOffset(2, MetaLen), len(raw), - HopfieldsStartIdx(0, segs), p) - } - if(numInf > 1) { - sl.Unslice_Bytes(raw, path.InfoFieldOffset(1, MetaLen), - path.InfoFieldOffset(2, MetaLen), p) - sl.CombineAtIndex_Bytes(raw, path.InfoFieldOffset(1, MetaLen), len(raw), - path.InfoFieldOffset(2, MetaLen), p) - } - sl.Unslice_Bytes(raw, MetaLen, path.InfoFieldOffset(1, MetaLen), p) - sl.Unslice_Bytes(raw, 0, MetaLen, p) - sl.CombineAtIndex_Bytes(raw, MetaLen, len(raw), path.InfoFieldOffset(1, MetaLen), p) - sl.CombineAtIndex_Bytes(raw, 0, len(raw), MetaLen, p) + sl.Unslice_Bytes(raw, HopfieldsStartIdx(0, segs), len(raw), p) + if(numInf > 2) { + sl.Unslice_Bytes(raw, path.InfoFieldOffset(2, MetaLen), HopfieldsStartIdx(0, segs), p) + sl.CombineAtIndex_Bytes(raw, path.InfoFieldOffset(2, MetaLen), len(raw), + HopfieldsStartIdx(0, segs), p) + } + if(numInf > 1) { + sl.Unslice_Bytes(raw, path.InfoFieldOffset(1, MetaLen), + path.InfoFieldOffset(2, MetaLen), p) + sl.CombineAtIndex_Bytes(raw, path.InfoFieldOffset(1, MetaLen), len(raw), + path.InfoFieldOffset(2, MetaLen), p) + } + sl.Unslice_Bytes(raw, MetaLen, path.InfoFieldOffset(1, MetaLen), p) + sl.Unslice_Bytes(raw, 0, MetaLen, p) + sl.CombineAtIndex_Bytes(raw, MetaLen, len(raw), path.InfoFieldOffset(1, MetaLen), p) + sl.CombineAtIndex_Bytes(raw, 0, len(raw), MetaLen, p) } // CurrSegWithInfo returns the abstract representation of the current segment of a packet. @@ -224,7 +224,7 @@ requires SegLen*path.HopLen == len(hopfields) requires acc(sl.Bytes(hopfields, 0, len(hopfields)), R56) decreases pure func CurrSegWithInfo(hopfields []byte, currHfIdx int, SegLen int, inf io.AbsInfoField) io.IO_seg3 { - return segment(hopfields, 0, currHfIdx, inf.AInfo, inf.UInfo, inf.ConsDir, inf.Peer, SegLen) + return segment(hopfields, 0, currHfIdx, inf.AInfo, inf.UInfo, inf.ConsDir, inf.Peer, SegLen) } @@ -236,23 +236,23 @@ ghost opaque requires segs.Valid() requires (currInfIdx == 1 && segs.Seg2Len > 0) || - (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ==> - let start := HopfieldsStartIdx(currInfIdx, segs) in - let end := HopfieldsEndIdx(currInfIdx, segs) in - inf != none[io.AbsInfoField] && - len(hopfields) == end-start && - acc(sl.Bytes(hopfields, 0, len(hopfields)), R49) + (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ==> + let start := HopfieldsStartIdx(currInfIdx, segs) in + let end := HopfieldsEndIdx(currInfIdx, segs) in + inf != none[io.AbsInfoField] && + len(hopfields) == end-start && + acc(sl.Bytes(hopfields, 0, len(hopfields)), R49) decreases pure func LeftSegWithInfo( - hopfields []byte, - currInfIdx int, - segs io.SegLens, - inf option[io.AbsInfoField]) option[io.IO_seg3] { - return (currInfIdx == 1 && segs.Seg2Len > 0) ? - some(CurrSegWithInfo(hopfields, 0, segs.Seg2Len, get(inf))) : - (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? - some(CurrSegWithInfo(hopfields, 0, segs.Seg3Len, get(inf))) : - none[io.IO_seg3] + hopfields []byte, + currInfIdx int, + segs io.SegLens, + inf option[io.AbsInfoField]) option[io.IO_seg3] { + return (currInfIdx == 1 && segs.Seg2Len > 0) ? + some(CurrSegWithInfo(hopfields, 0, segs.Seg2Len, get(inf))) : + (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? + some(CurrSegWithInfo(hopfields, 0, segs.Seg3Len, get(inf))) : + none[io.IO_seg3] } // RightSegWithInfo returns the abstract representation of the previous segment of a packet. @@ -263,23 +263,23 @@ ghost opaque requires segs.Valid() requires (currInfIdx == 0 && segs.Seg2Len > 0) || - (currInfIdx == 1 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ==> - let start := HopfieldsStartIdx(currInfIdx, segs) in - let end := HopfieldsEndIdx(currInfIdx, segs) in - inf != none[io.AbsInfoField] && - len(hopfields) == end-start && - acc(sl.Bytes(hopfields, 0, len(hopfields)), R49) + (currInfIdx == 1 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ==> + let start := HopfieldsStartIdx(currInfIdx, segs) in + let end := HopfieldsEndIdx(currInfIdx, segs) in + inf != none[io.AbsInfoField] && + len(hopfields) == end-start && + acc(sl.Bytes(hopfields, 0, len(hopfields)), R49) decreases pure func RightSegWithInfo( - hopfields []byte, - currInfIdx int, - segs io.SegLens, - inf option[io.AbsInfoField]) option[io.IO_seg3] { - return (currInfIdx == 1 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? - some(CurrSegWithInfo(hopfields, segs.Seg2Len, segs.Seg2Len, get(inf))) : - (currInfIdx == 0 && segs.Seg2Len > 0) ? - some(CurrSegWithInfo(hopfields, segs.Seg1Len, segs.Seg1Len, get(inf))) : - none[io.IO_seg3] + hopfields []byte, + currInfIdx int, + segs io.SegLens, + inf option[io.AbsInfoField]) option[io.IO_seg3] { + return (currInfIdx == 1 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? + some(CurrSegWithInfo(hopfields, segs.Seg2Len, segs.Seg2Len, get(inf))) : + (currInfIdx == 0 && segs.Seg2Len > 0) ? + some(CurrSegWithInfo(hopfields, segs.Seg1Len, segs.Seg1Len, get(inf))) : + none[io.IO_seg3] } // MidSegWithInfo returns the abstract representation of the last or first segment of a packet. @@ -290,23 +290,23 @@ ghost opaque requires segs.Valid() requires (segs.Seg2Len > 0 && segs.Seg3Len > 0 && - (currInfIdx == 2 || currInfIdx == 4)) ==> - let start := HopfieldsStartIdx(currInfIdx, segs) in - let end := HopfieldsEndIdx(currInfIdx, segs) in - inf != none[io.AbsInfoField] && - len(hopfields) == end-start && - acc(sl.Bytes(hopfields, 0, len(hopfields)), R49) + (currInfIdx == 2 || currInfIdx == 4)) ==> + let start := HopfieldsStartIdx(currInfIdx, segs) in + let end := HopfieldsEndIdx(currInfIdx, segs) in + inf != none[io.AbsInfoField] && + len(hopfields) == end-start && + acc(sl.Bytes(hopfields, 0, len(hopfields)), R49) decreases pure func MidSegWithInfo( - hopfields []byte, - currInfIdx int, - segs io.SegLens, - inf option[io.AbsInfoField]) option[io.IO_seg3] { - return (currInfIdx == 4 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? - some(CurrSegWithInfo(hopfields, segs.Seg1Len, segs.Seg1Len, get(inf))) : - (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? - some(CurrSegWithInfo(hopfields, 0, segs.Seg3Len, get(inf))) : - none[io.IO_seg3] + hopfields []byte, + currInfIdx int, + segs io.SegLens, + inf option[io.AbsInfoField]) option[io.IO_seg3] { + return (currInfIdx == 4 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? + some(CurrSegWithInfo(hopfields, segs.Seg1Len, segs.Seg1Len, get(inf))) : + (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? + some(CurrSegWithInfo(hopfields, 0, segs.Seg3Len, get(inf))) : + none[io.IO_seg3] } // CurrSegEquality ensures that the two definitions of abstract segments, CurrSegWithInfo(..) @@ -321,23 +321,23 @@ preserves acc(sl.Bytes(raw, 0, len(raw)), R50) preserves acc(sl.Bytes(raw[offset:offset+SegLen*path.HopLen], 0, SegLen*path.HopLen), R50) preserves acc(sl.Bytes(InfofieldByteSlice(raw, currInfIdx), 0, path.InfoLen), R50) ensures let inf := path.BytesToAbsInfoField(InfofieldByteSlice(raw, currInfIdx), 0) in - CurrSegWithInfo(raw[offset:offset+SegLen*path.HopLen], currHfIdx, SegLen, inf) == - CurrSeg(raw, offset, currInfIdx, currHfIdx, SegLen, MetaLen) + CurrSegWithInfo(raw[offset:offset+SegLen*path.HopLen], currHfIdx, SegLen, inf) == + CurrSeg(raw, offset, currInfIdx, currHfIdx, SegLen, MetaLen) decreases func CurrSegEquality(raw []byte, offset int, currInfIdx int, currHfIdx int, SegLen int) { - infoBytes := InfofieldByteSlice(raw, currInfIdx) - inf := path.BytesToAbsInfoField(infoBytes, 0) - infOffset := path.InfoFieldOffset(currInfIdx, MetaLen) - unfold acc(sl.Bytes(raw, 0, len(raw)), R56) - unfold acc(sl.Bytes(infoBytes, 0, path.InfoLen), R56) - assert reveal path.BytesToAbsInfoField(raw, infOffset) == - reveal path.BytesToAbsInfoField(infoBytes, 0) - reveal CurrSeg(raw, offset, currInfIdx, currHfIdx, SegLen, MetaLen) - reveal CurrSegWithInfo(raw[offset:offset+SegLen*path.HopLen], currHfIdx, SegLen, inf) - fold acc(sl.Bytes(raw, 0, len(raw)), R56) - fold acc(sl.Bytes(infoBytes, 0, path.InfoLen), R56) - widenSegment(raw, offset, currHfIdx, inf.AInfo, inf.UInfo, inf.ConsDir, - inf.Peer, SegLen, offset, offset+SegLen*path.HopLen) + infoBytes := InfofieldByteSlice(raw, currInfIdx) + inf := path.BytesToAbsInfoField(infoBytes, 0) + infOffset := path.InfoFieldOffset(currInfIdx, MetaLen) + unfold acc(sl.Bytes(raw, 0, len(raw)), R56) + unfold acc(sl.Bytes(infoBytes, 0, path.InfoLen), R56) + assert reveal path.BytesToAbsInfoField(raw, infOffset) == + reveal path.BytesToAbsInfoField(infoBytes, 0) + reveal CurrSeg(raw, offset, currInfIdx, currHfIdx, SegLen, MetaLen) + reveal CurrSegWithInfo(raw[offset:offset+SegLen*path.HopLen], currHfIdx, SegLen, inf) + fold acc(sl.Bytes(raw, 0, len(raw)), R56) + fold acc(sl.Bytes(infoBytes, 0, path.InfoLen), R56) + widenSegment(raw, offset, currHfIdx, inf.AInfo, inf.UInfo, inf.ConsDir, + inf.Peer, SegLen, offset, offset+SegLen*path.HopLen) } // UpdateCurrSegInfo proves that updating the infofield from inf1 to inf2 does not alter the hopfields @@ -348,12 +348,12 @@ requires 0 <= currHfIdx && currHfIdx <= SegLen requires SegLen*path.HopLen == len(raw) preserves acc(sl.Bytes(raw, 0, len(raw)), R50) ensures CurrSegWithInfo(raw, currHfIdx, SegLen, inf1).UpdateCurrSeg(inf2) == - CurrSegWithInfo(raw, currHfIdx, SegLen, inf2) + CurrSegWithInfo(raw, currHfIdx, SegLen, inf2) decreases func UpdateCurrSegInfo(raw []byte, currHfIdx int, SegLen int, - inf1 io.AbsInfoField, inf2 io.AbsInfoField) { - seg1 := reveal CurrSegWithInfo(raw, currHfIdx, SegLen, inf1) - seg2 := reveal CurrSegWithInfo(raw, currHfIdx, SegLen, inf2) + inf1 io.AbsInfoField, inf2 io.AbsInfoField) { + seg1 := reveal CurrSegWithInfo(raw, currHfIdx, SegLen, inf1) + seg2 := reveal CurrSegWithInfo(raw, currHfIdx, SegLen, inf2) } @@ -365,22 +365,22 @@ requires PktLen(segs, MetaLen) <= len(raw) requires 1 <= currInfIdx && currInfIdx < 4 requires acc(sl.Bytes(raw, 0, len(raw)), R49) requires (currInfIdx == 1 && segs.Seg2Len > 0) || - (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ==> - let infoBytes := InfofieldByteSlice(raw, currInfIdx) in - let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in - acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && - acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) + (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ==> + let infoBytes := InfofieldByteSlice(raw, currInfIdx) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in + acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && + acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) decreases pure func LeftSegEqualitySpec(raw []byte, currInfIdx int, segs io.SegLens) bool { - return (currInfIdx == 1 && segs.Seg2Len > 0) || - (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? - let infoBytes := InfofieldByteSlice(raw, currInfIdx) in - let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in - let inf := some(path.BytesToAbsInfoField(infoBytes, 0)) in - LeftSeg(raw, currInfIdx, segs, MetaLen) == - LeftSegWithInfo(hopBytes, currInfIdx, segs, inf) : - LeftSeg(raw, currInfIdx, segs, MetaLen) == - LeftSegWithInfo(nil, currInfIdx, segs, none[io.AbsInfoField]) + return (currInfIdx == 1 && segs.Seg2Len > 0) || + (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? + let infoBytes := InfofieldByteSlice(raw, currInfIdx) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in + let inf := some(path.BytesToAbsInfoField(infoBytes, 0)) in + LeftSeg(raw, currInfIdx, segs, MetaLen) == + LeftSegWithInfo(hopBytes, currInfIdx, segs, inf) : + LeftSeg(raw, currInfIdx, segs, MetaLen) == + LeftSegWithInfo(nil, currInfIdx, segs, none[io.AbsInfoField]) } // LeftSegEquality ensures that the two definitions of abstract segments, LeftSegWithInfo(..) @@ -395,27 +395,27 @@ requires PktLen(segs, MetaLen) <= len(raw) requires 1 <= currInfIdx && currInfIdx < 4 preserves acc(sl.Bytes(raw, 0, len(raw)), R49) preserves (currInfIdx == 1 && segs.Seg2Len > 0) || - (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ==> - let infoBytes := InfofieldByteSlice(raw, currInfIdx) in - let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in - acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && - acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) + (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ==> + let infoBytes := InfofieldByteSlice(raw, currInfIdx) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in + acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && + acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) ensures LeftSegEqualitySpec(raw, currInfIdx, segs) decreases func LeftSegEquality(raw []byte, currInfIdx int, segs io.SegLens) { - reveal LeftSeg(raw, currInfIdx, segs, MetaLen) - if ((currInfIdx == 1 && segs.Seg2Len > 0) || - (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0)) { - infoBytes := InfofieldByteSlice(raw, currInfIdx) - hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) - inf := some(reveal path.BytesToAbsInfoField(infoBytes, 0)) - offset := HopfieldsStartIdx(currInfIdx, segs) - segLen := currInfIdx == 1 ? segs.Seg2Len : segs.Seg3Len - reveal LeftSegWithInfo(hopBytes, currInfIdx, segs, inf) - CurrSegEquality(raw, offset, currInfIdx, 0, segLen) - } else { - reveal LeftSegWithInfo(nil, currInfIdx, segs, none[io.AbsInfoField]) - } + reveal LeftSeg(raw, currInfIdx, segs, MetaLen) + if ((currInfIdx == 1 && segs.Seg2Len > 0) || + (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0)) { + infoBytes := InfofieldByteSlice(raw, currInfIdx) + hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) + inf := some(reveal path.BytesToAbsInfoField(infoBytes, 0)) + offset := HopfieldsStartIdx(currInfIdx, segs) + segLen := currInfIdx == 1 ? segs.Seg2Len : segs.Seg3Len + reveal LeftSegWithInfo(hopBytes, currInfIdx, segs, inf) + CurrSegEquality(raw, offset, currInfIdx, 0, segLen) + } else { + reveal LeftSegWithInfo(nil, currInfIdx, segs, none[io.AbsInfoField]) + } } // RightSegEqualitySpec defines the conditions that must hold for RightSegWithInfo(..) @@ -426,22 +426,22 @@ requires PktLen(segs, MetaLen) <= len(raw) requires -1 <= currInfIdx && currInfIdx < 2 requires acc(sl.Bytes(raw, 0, len(raw)), R49) requires (currInfIdx == 0 && segs.Seg2Len > 0) || - (currInfIdx == 1 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ==> - let infoBytes := InfofieldByteSlice(raw, currInfIdx) in - let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in - acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && - acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) + (currInfIdx == 1 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ==> + let infoBytes := InfofieldByteSlice(raw, currInfIdx) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in + acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && + acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) decreases pure func RightSegEqualitySpec(raw []byte, currInfIdx int, segs io.SegLens) bool { - return (currInfIdx == 0 && segs.Seg2Len > 0) || - (currInfIdx == 1 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? - let infoBytes := InfofieldByteSlice(raw, currInfIdx) in - let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in - let inf := some(path.BytesToAbsInfoField(infoBytes, 0)) in - RightSeg(raw, currInfIdx, segs, MetaLen) == - RightSegWithInfo(hopBytes, currInfIdx, segs, inf) : - RightSeg(raw, currInfIdx, segs, MetaLen) == - RightSegWithInfo(nil, currInfIdx, segs, none[io.AbsInfoField]) + return (currInfIdx == 0 && segs.Seg2Len > 0) || + (currInfIdx == 1 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? + let infoBytes := InfofieldByteSlice(raw, currInfIdx) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in + let inf := some(path.BytesToAbsInfoField(infoBytes, 0)) in + RightSeg(raw, currInfIdx, segs, MetaLen) == + RightSegWithInfo(hopBytes, currInfIdx, segs, inf) : + RightSeg(raw, currInfIdx, segs, MetaLen) == + RightSegWithInfo(nil, currInfIdx, segs, none[io.AbsInfoField]) } // RightSegEquality ensures that the two definitions of abstract segments, RightSegWithInfo(..) @@ -456,27 +456,27 @@ requires PktLen(segs, MetaLen) <= len(raw) requires -1 <= currInfIdx && currInfIdx < 2 preserves acc(sl.Bytes(raw, 0, len(raw)), R49) preserves (currInfIdx == 0 && segs.Seg2Len > 0) || - (currInfIdx == 1 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ==> - let infoBytes := InfofieldByteSlice(raw, currInfIdx) in - let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in - acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && - acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) + (currInfIdx == 1 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ==> + let infoBytes := InfofieldByteSlice(raw, currInfIdx) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in + acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && + acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) ensures RightSegEqualitySpec(raw, currInfIdx, segs) decreases func RightSegEquality(raw []byte, currInfIdx int, segs io.SegLens) { - reveal RightSeg(raw, currInfIdx, segs, MetaLen) - if ((currInfIdx == 0 && segs.Seg2Len > 0) || - (currInfIdx == 1 && segs.Seg2Len > 0 && segs.Seg3Len > 0)) { - infoBytes := InfofieldByteSlice(raw, currInfIdx) - hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) - inf := some(reveal path.BytesToAbsInfoField(infoBytes, 0)) - offset := HopfieldsStartIdx(currInfIdx, segs) - segLen := currInfIdx == 0 ? segs.Seg1Len : segs.Seg2Len - reveal RightSegWithInfo(hopBytes, currInfIdx, segs, inf) - CurrSegEquality(raw, offset, currInfIdx, segLen, segLen) - } else { - reveal RightSegWithInfo(nil, currInfIdx, segs, none[io.AbsInfoField]) - } + reveal RightSeg(raw, currInfIdx, segs, MetaLen) + if ((currInfIdx == 0 && segs.Seg2Len > 0) || + (currInfIdx == 1 && segs.Seg2Len > 0 && segs.Seg3Len > 0)) { + infoBytes := InfofieldByteSlice(raw, currInfIdx) + hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) + inf := some(reveal path.BytesToAbsInfoField(infoBytes, 0)) + offset := HopfieldsStartIdx(currInfIdx, segs) + segLen := currInfIdx == 0 ? segs.Seg1Len : segs.Seg2Len + reveal RightSegWithInfo(hopBytes, currInfIdx, segs, inf) + CurrSegEquality(raw, offset, currInfIdx, segLen, segLen) + } else { + reveal RightSegWithInfo(nil, currInfIdx, segs, none[io.AbsInfoField]) + } } // MidSegEqualitySpec defines the conditions that must hold for MidSegWithInfo(..) @@ -487,22 +487,22 @@ requires PktLen(segs, MetaLen) <= len(raw) requires 2 <= currInfIdx && currInfIdx < 5 requires acc(sl.Bytes(raw, 0, len(raw)), R49) requires (segs.Seg2Len > 0 && segs.Seg3Len > 0 && - (currInfIdx == 2 || currInfIdx == 4)) ==> - let infoBytes := InfofieldByteSlice(raw, currInfIdx) in - let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in - acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && - acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) + (currInfIdx == 2 || currInfIdx == 4)) ==> + let infoBytes := InfofieldByteSlice(raw, currInfIdx) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in + acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && + acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) decreases pure func MidSegEqualitySpec(raw []byte, currInfIdx int, segs io.SegLens) bool { - return (segs.Seg2Len > 0 && segs.Seg3Len > 0 && - (currInfIdx == 2 || currInfIdx == 4)) ? - let infoBytes := InfofieldByteSlice(raw, currInfIdx) in - let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in - let inf := some(path.BytesToAbsInfoField(infoBytes, 0)) in - MidSeg(raw, currInfIdx, segs, MetaLen) == - MidSegWithInfo(hopBytes, currInfIdx, segs, inf) : - MidSeg(raw, currInfIdx, segs, MetaLen) == - MidSegWithInfo(nil, currInfIdx, segs, none[io.AbsInfoField]) + return (segs.Seg2Len > 0 && segs.Seg3Len > 0 && + (currInfIdx == 2 || currInfIdx == 4)) ? + let infoBytes := InfofieldByteSlice(raw, currInfIdx) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in + let inf := some(path.BytesToAbsInfoField(infoBytes, 0)) in + MidSeg(raw, currInfIdx, segs, MetaLen) == + MidSegWithInfo(hopBytes, currInfIdx, segs, inf) : + MidSeg(raw, currInfIdx, segs, MetaLen) == + MidSegWithInfo(nil, currInfIdx, segs, none[io.AbsInfoField]) } // MidSegEquality ensures that the two definitions of abstract segments, MidSegWithInfo(..) @@ -517,30 +517,30 @@ requires PktLen(segs, MetaLen) <= len(raw) requires 2 <= currInfIdx && currInfIdx < 5 preserves acc(sl.Bytes(raw, 0, len(raw)), R49) preserves (segs.Seg2Len > 0 && segs.Seg3Len > 0 && - (currInfIdx == 2 || currInfIdx == 4)) ==> - let infoBytes := InfofieldByteSlice(raw, currInfIdx) in - let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in - acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && - acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) + (currInfIdx == 2 || currInfIdx == 4)) ==> + let infoBytes := InfofieldByteSlice(raw, currInfIdx) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in + acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && + acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) ensures MidSegEqualitySpec(raw, currInfIdx, segs) decreases func MidSegEquality(raw []byte, currInfIdx int, segs io.SegLens) { - reveal MidSeg(raw, currInfIdx, segs, MetaLen) - if (currInfIdx == 4 && segs.Seg2Len > 0 && segs.Seg3Len > 0) { - infoBytes := InfofieldByteSlice(raw, 0) - hopBytes := HopfieldsByteSlice(raw, 0, segs) - inf := some(reveal path.BytesToAbsInfoField(infoBytes, 0)) - offset := HopfieldsStartIdx(currInfIdx, segs) - reveal MidSegWithInfo(hopBytes, currInfIdx, segs, inf) - CurrSegEquality(raw, offset, 0, segs.Seg1Len, segs.Seg1Len) - } else if (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) { - infoBytes := InfofieldByteSlice(raw, currInfIdx) - hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) - inf := some(reveal path.BytesToAbsInfoField(infoBytes, 0)) - offset := HopfieldsStartIdx(currInfIdx, segs) - reveal MidSegWithInfo(hopBytes, currInfIdx, segs, inf) - CurrSegEquality(raw, offset, currInfIdx, 0, segs.Seg3Len) - } else { - reveal MidSegWithInfo(nil, currInfIdx, segs, none[io.AbsInfoField]) - } + reveal MidSeg(raw, currInfIdx, segs, MetaLen) + if (currInfIdx == 4 && segs.Seg2Len > 0 && segs.Seg3Len > 0) { + infoBytes := InfofieldByteSlice(raw, 0) + hopBytes := HopfieldsByteSlice(raw, 0, segs) + inf := some(reveal path.BytesToAbsInfoField(infoBytes, 0)) + offset := HopfieldsStartIdx(currInfIdx, segs) + reveal MidSegWithInfo(hopBytes, currInfIdx, segs, inf) + CurrSegEquality(raw, offset, 0, segs.Seg1Len, segs.Seg1Len) + } else if (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) { + infoBytes := InfofieldByteSlice(raw, currInfIdx) + hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) + inf := some(reveal path.BytesToAbsInfoField(infoBytes, 0)) + offset := HopfieldsStartIdx(currInfIdx, segs) + reveal MidSegWithInfo(hopBytes, currInfIdx, segs, inf) + CurrSegEquality(raw, offset, currInfIdx, 0, segs.Seg3Len) + } else { + reveal MidSegWithInfo(nil, currInfIdx, segs, none[io.AbsInfoField]) + } } \ No newline at end of file From def1aad61e31244be4262dcec532364132a0a123 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Mon, 17 Jun 2024 15:07:58 -0400 Subject: [PATCH 48/57] Simplify validity criteria of paths (#352) * backup * backup * continue refactor * backup * Update pkg/slayers/path/epic/epic_spec_test.gobra * backup * merge with master * backup * fix verification error * backup * simplify preconditions * drop unnecessary method * fix annotation * fix verification error * minor fixes in styling * fix verification errors --- pkg/slayers/path/epic/epic_spec.gobra | 29 +---- pkg/slayers/path/scion/base.go | 12 +- pkg/slayers/path/scion/base_spec.gobra | 130 ++++++++++------------ pkg/slayers/path/scion/decoded.go | 12 +- pkg/slayers/path/scion/decoded_spec.gobra | 30 +---- pkg/slayers/path/scion/raw.go | 42 +++---- pkg/slayers/path/scion/raw_spec.gobra | 110 ++++-------------- pkg/slayers/scion.go | 2 +- pkg/slayers/scion_spec.gobra | 13 +-- router/dataplane.go | 72 ++++++------ router/io-spec-lemmas.gobra | 15 ++- 11 files changed, 170 insertions(+), 297 deletions(-) diff --git a/pkg/slayers/path/epic/epic_spec.gobra b/pkg/slayers/path/epic/epic_spec.gobra index c87863bb4..93380d08c 100644 --- a/pkg/slayers/path/epic/epic_spec.gobra +++ b/pkg/slayers/path/epic/epic_spec.gobra @@ -18,7 +18,7 @@ package epic import ( "github.com/scionproto/scion/pkg/slayers/path" - + "github.com/scionproto/scion/pkg/slayers/path/scion" . "github.com/scionproto/scion/verification/utils/definitions" sl "github.com/scionproto/scion/verification/utils/slices" ) @@ -58,30 +58,11 @@ func (p *Path) DowngradePerm(buf []byte) { } ghost -requires acc(p.Mem(ub), _) -decreases -pure func (p *Path) ValidCurrINF(ghost ub []byte) bool { - return unfolding acc(p.Mem(ub), _) in - let ubPath := ub[MetadataLen:] in - p.ScionPath.ValidCurrINF(ubPath) -} - -ghost -requires acc(p.Mem(ub), _) +requires acc(r.Mem(ub), _) decreases -pure func (p *Path) ValidCurrHF(ghost ub []byte) bool { - return unfolding acc(p.Mem(ub), _) in - let ubPath := ub[MetadataLen:] in - p.ScionPath.ValidCurrHF(ubPath) -} - -ghost -requires acc(p.Mem(ub), _) -decreases -pure func (p *Path) ValidCurrIdxs(ghost ub []byte) bool { - return unfolding acc(p.Mem(ub), _) in - let ubPath := ub[MetadataLen:] in - p.ScionPath.ValidCurrIdxs(ubPath) +pure func (r *Path) GetBase(ub []byte) scion.Base { + return unfolding acc(r.Mem(ub), _) in + r.ScionPath.GetBase(ub[MetadataLen:]) } ghost diff --git a/pkg/slayers/path/scion/base.go b/pkg/slayers/path/scion/base.go index 610cd1df7..cbd0e2078 100644 --- a/pkg/slayers/path/scion/base.go +++ b/pkg/slayers/path/scion/base.go @@ -83,7 +83,9 @@ type Base struct { // @ ensures r != nil ==> // @ s.NonInitMem() && r.ErrorMem() // @ ensures r == nil ==> -// @ s.Mem() && s.DecodeFromBytesSpec(data) && s.InfsMatchHfs() +// @ s.Mem() && +// @ s.GetBase().WeaklyValid() && +// @ s.DecodeFromBytesSpec(data) // @ ensures len(data) < MetaLen ==> r != nil // posts for IO: // @ ensures r == nil ==> s.GetBase().EqAbsHeader(data) @@ -162,8 +164,8 @@ func (s *Base) DecodeFromBytes(data []byte) (r error) { // @ old(int(s.GetCurrHF()) >= s.GetNumHops()-1)) // @ ensures e == nil ==> ( // @ s.Mem() && -// @ let oldBase := old(unfolding s.Mem() in *s) in -// @ let newBase := (unfolding s.Mem() in *s) in +// @ let oldBase := old(s.GetBase()) in +// @ let newBase := s.GetBase() in // @ newBase == oldBase.IncPathSpec()) // @ ensures e != nil ==> (s.NonInitMem() && e.ErrorMem()) // @ decreases @@ -187,7 +189,7 @@ func (s *Base) IncPath() (e error) { // IsXover returns whether we are at a crossover point. // @ preserves acc(s.Mem(), R45) -// @ ensures r == s.IsXoverSpec() +// @ ensures r == s.GetBase().IsXoverSpec() // @ decreases func (s *Base) IsXover() (r bool) { //@ unfold acc(s.Mem(), R45) @@ -227,7 +229,6 @@ func (s *Base) infIndexForHF(hf uint8) (r uint8) { // @ pure // @ requires acc(s.Mem(), _) // @ ensures r >= MetaLen -// @ ensures r == (unfolding acc(s.Mem(), _) in (MetaLen + int(s.NumINF)*path.InfoLen + int(s.NumHops)*path.HopLen)) // @ decreases func (s *Base) Len() (r int) { return /*@ unfolding acc(s.Mem(), _) in @*/ MetaLen + s.NumINF*path.InfoLen + s.NumHops*path.HopLen @@ -253,6 +254,7 @@ type MetaHdr struct { // @ preserves acc(m) // @ preserves acc(sl.Bytes(raw, 0, len(raw)), R50) // @ ensures (len(raw) >= MetaLen) == (e == nil) +// @ ensures e == nil ==> m.InBounds() // @ ensures e == nil ==> m.DecodeFromBytesSpec(raw) // @ ensures e != nil ==> e.ErrorMem() // @ decreases diff --git a/pkg/slayers/path/scion/base_spec.gobra b/pkg/slayers/path/scion/base_spec.gobra index c165e56bb..586d2fecf 100644 --- a/pkg/slayers/path/scion/base_spec.gobra +++ b/pkg/slayers/path/scion/base_spec.gobra @@ -29,17 +29,12 @@ pred (b *Base) NonInitMem() { } // This predicate is established right after DecodeFromBytes. -// Because this method does not perform any bounds checks, it -// is not possible to have assertions in this invariant about -// how the fileds of Base compare to those of its MetaHdr field. pred (b *Base) Mem() { - acc(&b.NumINF) && - acc(&b.NumHops) && - acc(&b.PathMeta.CurrINF) && - acc(&b.PathMeta.CurrHF) && - acc(&b.PathMeta.SegLen[0]) && - acc(&b.PathMeta.SegLen[1]) && - acc(&b.PathMeta.SegLen[2]) && + acc(b) && + // In the future, we might want to drop the properties + // below, as they are already present in WeaklyValid. + // This requires a bit of refactoring to pass around the + // knowledge that WeaklyValid holds between methods. 0 <= b.NumINF && b.NumINF <= MaxINFs && 0 <= b.NumHops && b.NumHops <= MaxHops && (0 < b.NumINF ==> 0 < b.NumHops) @@ -59,68 +54,60 @@ pure func (b Base) ValidCurrHfSpec() bool { ghost decreases -pure func (b Base) ValidCurrIdxsSpec() bool { +pure func (b Base) ValidCurrFieldsSpec() bool { return 0 <= b.NumINF && b.NumINF <= MaxINFs && 0 <= b.NumHops && b.NumHops <= MaxHops && - b.ValidCurrHfSpec() && - b.ValidCurrInfSpec() && - 0 <= b.PathMeta.SegLen[0] && b.PathMeta.SegLen[0] < MaxHops && - 0 <= b.PathMeta.SegLen[1] && b.PathMeta.SegLen[1] < MaxHops && - 0 <= b.PathMeta.SegLen[2] && b.PathMeta.SegLen[2] < MaxHops && - (b.NumINF == 1 ==> b.NumHops == int(b.PathMeta.SegLen[0])) && - (b.NumINF == 2 ==> b.NumHops == int(b.PathMeta.SegLen[0] + b.PathMeta.SegLen[1])) && - (b.NumINF == 3 ==> b.NumHops == int(b.PathMeta.SegLen[0] + b.PathMeta.SegLen[1] + b.PathMeta.SegLen[2])) && - (forall i int :: { b.PathMeta.SegLen[i] } 0 <= i && i < b.NumINF ==> - b.PathMeta.SegLen[i] != 0) && - (forall i int :: { b.PathMeta.SegLen[i] } b.NumINF <= i && i < MaxINFs ==> - b.PathMeta.SegLen[i] == 0) -} - -ghost -decreases -pure func (b Base) CurrInfMatchesCurrHF() bool { - return b.PathMeta.CurrINF == b.InfForHfSpec(b.PathMeta.CurrHF) + b.ValidCurrInfSpec() && + b.ValidCurrHfSpec() } +// A `Base` is weakly valid when the fields `NumINF` and `NumHops` are, +// at most, `MaxINF` and `MaxHops`, respectively, and the field `SegLen` +// is valid as well. `DecodeFromBytes` guarantees that all `Base`s +// obtained from raw bytes will be weakly valid. ghost decreases -pure func (b Base) InfsMatchHfsSpec() bool { - return 0 <= b.NumINF && b.NumINF <= 3 && - (b.NumINF == 1 ==> b.NumHops == int(b.PathMeta.SegLen[0])) && - (b.NumINF == 2 ==> b.NumHops == int(b.PathMeta.SegLen[0] + b.PathMeta.SegLen[1])) && - (b.NumINF == 3 ==> b.NumHops == int(b.PathMeta.SegLen[0] + b.PathMeta.SegLen[1] + b.PathMeta.SegLen[2])) && - (forall i int :: { b.PathMeta.SegLen[i] } 0 <= i && i < b.NumINF ==> - b.PathMeta.SegLen[i] != 0) && - (forall i int :: { b.PathMeta.SegLen[i] } b.NumINF <= i && i < MaxINFs ==> - b.PathMeta.SegLen[i] == 0) +pure func (b Base) WeaklyValid() bool { + return 0 <= b.NumINF && b.NumINF <= MaxINFs && + 0 <= b.NumHops && b.NumHops <= MaxHops && + (0 < b.NumINF ==> 0 < b.NumHops) && + b.PathMeta.InBounds() && + b.NumsCompatibleWithSegLen() } +// A `Base` is strongly valid iff it is weakly valid and its `CurrHF` +// and `CurrINF` are within bounds. ghost -requires acc(b.Mem(), _) decreases -pure func (b *Base) ValidCurrINF() bool { - return unfolding acc(b.Mem(), _) in (*b).ValidCurrInfSpec() +pure func (b Base) StronglyValid() bool { + return b.WeaklyValid() && b.ValidCurrFieldsSpec() } +// A `Base` is fully valid iff it is strongly valid and its `CurrHF` is +// compatible with its `CurrINF`. ghost -requires acc(b.Mem(), _) decreases -pure func (b *Base) ValidCurrHF() bool { - return unfolding acc(b.Mem(), _) in (*b).ValidCurrHfSpec() +pure func (b Base) FullyValid() bool { + return b.StronglyValid() && b.CurrInfMatchesCurrHF() } ghost -requires acc(b.Mem(), _) decreases -pure func (b *Base) ValidCurrIdxs() bool { - return unfolding acc(b.Mem(), _) in (*b).ValidCurrIdxsSpec() +pure func (b Base) CurrInfMatchesCurrHF() bool { + return b.PathMeta.CurrINF == b.InfForHfSpec(b.PathMeta.CurrHF) } ghost -requires acc(b.Mem(), _) decreases -pure func (b *Base) InfsMatchHfs() bool { - return unfolding acc(b.Mem(), _) in (*b).InfsMatchHfsSpec() +pure func (b Base) NumsCompatibleWithSegLen() bool { + return 0 <= b.NumINF && b.NumINF <= 3 && + (b.NumINF == 1 ==> b.NumHops == int(b.PathMeta.SegLen[0])) && + (b.NumINF == 2 ==> b.NumHops == int(b.PathMeta.SegLen[0] + b.PathMeta.SegLen[1])) && + (b.NumINF == 3 ==> b.NumHops == int(b.PathMeta.SegLen[0] + b.PathMeta.SegLen[1] + b.PathMeta.SegLen[2])) && + (forall i int :: { b.PathMeta.SegLen[i] } 0 <= i && i < b.NumINF ==> + b.PathMeta.SegLen[i] != 0) && + (forall i int :: { b.PathMeta.SegLen[i] } b.NumINF <= i && i < MaxINFs ==> + b.PathMeta.SegLen[i] == 0) } ghost @@ -170,21 +157,20 @@ pure func (s Base) InfForHfSpec(hf uint8) (r uint8) { } ghost -requires acc(s.Mem(), _) decreases -pure func (s *Base) IsXoverSpec() bool { - return unfolding acc(s.Mem(), _) in ( - s.PathMeta.CurrHF+1 < uint8(s.NumHops) && - s.PathMeta.CurrINF != s.InfForHfSpec(s.PathMeta.CurrHF+1)) +pure func (s Base) IsXoverSpec() bool { + return s.PathMeta.CurrHF+1 < uint8(s.NumHops) && + s.PathMeta.CurrINF != s.InfForHfSpec(s.PathMeta.CurrHF+1) } ghost requires s.NumINF != 0 requires int(s.PathMeta.CurrHF) < s.NumHops-1 -ensures s.ValidCurrIdxsSpec() ==> res.ValidCurrIdxsSpec() +ensures s.WeaklyValid() ==> res.WeaklyValid() +ensures s.StronglyValid() ==> res.StronglyValid() decreases pure func (s Base) IncPathSpec() (res Base) { - return Base{ + return Base { PathMeta: MetaHdr{s.InfForHfSpec(s.PathMeta.CurrHF+1), s.PathMeta.CurrHF+1, s.PathMeta.SegLen}, NumINF: s.NumINF, NumHops: s.NumHops, @@ -222,10 +208,10 @@ pure func (b Base) ReverseSegLen() [3]uint8 { } ghost -requires b.ValidCurrIdxsSpec() -ensures b.ReverseSpec().ValidCurrIdxsSpec() +requires b.StronglyValid() +ensures b.ReverseSpec().StronglyValid() decreases -pure func (b Base) ReversingValidBaseIsValidBase() Lemma { +pure func (b Base) ReversingBaseStronglyValidSegLenHasValidSegLen() Lemma { return Lemma{} } @@ -274,12 +260,20 @@ pure func (s *Base) DecodeFromBytesSpec(b []byte) bool { s.PathMeta.DecodeFromBytesSpec(b) } +ghost +decreases +pure func (m MetaHdr) InBounds() bool { + return 0 <= m.CurrINF && m.CurrINF <= MaxINFs && + 0 <= m.CurrHF && m.CurrHF < MaxHops && + m.SegsInBounds() +} + ghost decreases pure func (m MetaHdr) SegsInBounds() bool { - return 0 <= m.SegLen[0] && m.SegLen[0] <= 63 && - 0 <= m.SegLen[1] && m.SegLen[1] <= 63 && - 0 <= m.SegLen[2] && m.SegLen[2] <= 63 + return 0 <= m.SegLen[0] && m.SegLen[0] < MaxHops && + 0 <= m.SegLen[1] && m.SegLen[1] < MaxHops && + 0 <= m.SegLen[2] && m.SegLen[2] < MaxHops } ghost @@ -306,16 +300,6 @@ pure func (m MetaHdr) SerializeToSpec(b []byte) bool { binary.BigEndian.PutUint32Spec(b0, b1, b2, b3, v) } -ghost -decreases -pure func (m MetaHdr) InBounds() bool { - return 0 <= m.CurrINF && m.CurrINF <= 3 && - 0 <= m.CurrHF && m.CurrHF <= 63 && - 0 <= m.SegLen[0] && m.SegLen[0] <= 63 && - 0 <= m.SegLen[1] && m.SegLen[1] <= 63 && - 0 <= m.SegLen[2] && m.SegLen[2] <= 63 -} - ghost requires acc(sl.Bytes(ub, 0, len(ub)), R56) decreases diff --git a/pkg/slayers/path/scion/decoded.go b/pkg/slayers/path/scion/decoded.go index 6c5b8cca0..cc64c724e 100644 --- a/pkg/slayers/path/scion/decoded.go +++ b/pkg/slayers/path/scion/decoded.go @@ -48,8 +48,8 @@ type Decoded struct { // @ let b3 := sl.GetByte(data, 0, lenD, 3) in // @ let line := binary.BigEndian.Uint32Spec(b0, b1, b2, b3) in // @ let metaHdr := DecodedFrom(line) in -// @ metaHdr == s.GetMetaHdr(data) && -// @ s.InfsMatchHfs(data)) +// @ metaHdr == s.GetMetaHdr(data)) +// @ ensures r == nil ==> s.GetBase(data).WeaklyValid() // @ ensures r != nil ==> (r.ErrorMem() && s.NonInitMem()) // @ decreases func (s *Decoded) DecodeFromBytes(data []byte) (r error) { @@ -213,11 +213,11 @@ func (s *Decoded) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { // @ p.Mem(ubuf) && // @ p == s && // @ typeOf(p) == type[*Decoded] && -// @ (old(s.ValidCurrIdxs(ubuf)) ==> s.ValidCurrIdxs(ubuf))) +// @ (old(s.GetBase(ubuf).StronglyValid()) ==> s.GetBase(ubuf).StronglyValid())) // @ ensures r != nil ==> r.ErrorMem() && s.Mem(ubuf) // @ decreases func (s *Decoded) Reverse( /*@ ghost ubuf []byte @*/ ) (p path.Path, r error) { - //@ ghost isValid := s.ValidCurrIdxs(ubuf) + //@ ghost isValid := s.GetBase(ubuf).StronglyValid() //@ ghost base := s.GetBase(ubuf) //@ ghost metaHdrAferReversingSegLen := MetaHdr { //@ CurrINF: base.PathMeta.CurrINF, @@ -282,8 +282,8 @@ func (s *Decoded) Reverse( /*@ ghost ubuf []byte @*/ ) (p path.Path, r error) { s.PathMeta.CurrINF = uint8(s.NumINF) - s.PathMeta.CurrINF - 1 s.PathMeta.CurrHF = uint8(s.NumHops) - s.PathMeta.CurrHF - 1 //@ assert s.Base == base.ReverseSpec() - //@ ghost if isValid { base.ReversingValidBaseIsValidBase() } - //@ assert isValid ==> s.Base.ValidCurrIdxsSpec() + //@ ghost if isValid { base.ReversingBaseStronglyValidSegLenHasValidSegLen() } + //@ assert isValid ==> s.Base.StronglyValid() //@ fold s.Base.Mem() //@ fold s.Mem(ubuf) return s, nil diff --git a/pkg/slayers/path/scion/decoded_spec.gobra b/pkg/slayers/path/scion/decoded_spec.gobra index 6c15ef402..58c3aa191 100644 --- a/pkg/slayers/path/scion/decoded_spec.gobra +++ b/pkg/slayers/path/scion/decoded_spec.gobra @@ -113,7 +113,7 @@ requires d.Mem(ubuf) ensures e == nil ==> ( d.Mem(ubuf) && d.LenSpec(ubuf) == old(d.LenSpec(ubuf)) && - (old(d.ValidCurrIdxs(ubuf)) ==> d.ValidCurrIdxs(ubuf))) + (old(d.GetBase(ubuf).StronglyValid()) ==> d.GetBase(ubuf).StronglyValid())) ensures e != nil ==> d.NonInitMem() && e.ErrorMem() decreases func (d *Decoded) IncPath(ghost ubuf []byte) (e error) { @@ -127,27 +127,6 @@ func (d *Decoded) IncPath(ghost ubuf []byte) (e error) { return e } -ghost -requires acc(d.Mem(ub), _) -decreases -pure func (d *Decoded) ValidCurrINF(ub []byte) bool { - return unfolding acc(d.Mem(ub), _) in d.Base.ValidCurrINF() -} - -ghost -requires acc(d.Mem(ub), _) -decreases -pure func (d *Decoded) ValidCurrHF(ub []byte) bool { - return unfolding acc(d.Mem(ub), _) in d.Base.ValidCurrHF() -} - -ghost -requires acc(d.Mem(ub), _) -decreases -pure func (d *Decoded) ValidCurrIdxs(ub []byte) bool { - return unfolding acc(d.Mem(ub), _) in d.Base.ValidCurrIdxs() -} - ghost requires acc(d.Mem(ub), _) decreases @@ -169,13 +148,6 @@ pure func (s *Decoded) GetMetaHdr(ub []byte) MetaHdr { return unfolding acc(s.Mem(ub), _) in s.Base.GetMetaHdr() } -ghost -requires acc(d.Mem(ub), _) -decreases -pure func (d *Decoded) InfsMatchHfs(ub []byte) bool { - return unfolding acc(d.Mem(ub), _) in d.Base.InfsMatchHfs() -} - /**** End of Stubs ****/ /**** Auxiliary Functions ****/ diff --git a/pkg/slayers/path/scion/raw.go b/pkg/slayers/path/scion/raw.go index ed1cafbd4..95059b614 100644 --- a/pkg/slayers/path/scion/raw.go +++ b/pkg/slayers/path/scion/raw.go @@ -37,10 +37,10 @@ type Raw struct { // @ requires s.NonInitMem() // @ preserves acc(sl.Bytes(data, 0, len(data)), R42) // @ ensures res == nil ==> s.Mem(data) +// @ ensures res == nil ==> +// @ s.GetBase(data).WeaklyValid() && +// @ s.GetBase(data).EqAbsHeader(data) // @ ensures res != nil ==> (s.NonInitMem() && res.ErrorMem()) -// posts for IO: -// @ ensures res == nil ==> s.EqAbsHeader(data) && -// @ s.InfsMatchHfs(data) && s.SegsInBounds(data) // @ decreases func (s *Raw) DecodeFromBytes(data []byte) (res error) { //@ unfold s.NonInitMem() @@ -139,7 +139,7 @@ func (s *Raw) Reverse( /*@ ghost ubuf []byte @*/ ) (p path.Path, err error) { // @ ensures err == nil ==> ( // @ let newUb := s.RawBufferMem(ubuf) in // @ d.Mem(newUb) && -// @ (old(s.ValidCurrIdxs(ubuf)) ==> d.ValidCurrIdxs(newUb))) +// @ (old(s.GetBase(ubuf).StronglyValid()) ==> d.GetBase(newUb).StronglyValid())) // @ ensures err != nil ==> err.ErrorMem() // @ decreases func (s *Raw) ToDecoded( /*@ ghost ubuf []byte @*/ ) (d *Decoded, err error) { @@ -147,9 +147,9 @@ func (s *Raw) ToDecoded( /*@ ghost ubuf []byte @*/ ) (d *Decoded, err error) { //@ unfold acc(s.Base.Mem(), R6) //@ ghost var base Base = s.Base //@ ghost var pathMeta MetaHdr = s.Base.PathMeta - //@ ghost validIdxs := s.ValidCurrIdxs(ubuf) + //@ ghost validIdxs := s.GetBase(ubuf).StronglyValid() //@ assert validIdxs ==> s.Base.PathMeta.InBounds() - //@ assert validIdxs ==> base.ValidCurrIdxsSpec() + //@ assert validIdxs ==> base.StronglyValid() //@ assert s.Raw[:MetaLen] === ubuf[:MetaLen] // (VerifiedSCION) In this method, many slice operations are done in two @@ -207,8 +207,7 @@ func (s *Raw) ToDecoded( /*@ ghost ubuf []byte @*/ ) (d *Decoded, err error) { //@ ghost if validIdxs { //@ s.PathMeta.SerializeAndDeserializeLemma(b0, b1, b2, b3) //@ assert pathMeta == decoded.GetMetaHdr(s.Raw) - //@ assert decoded.GetBase(s.Raw).ValidCurrIdxsSpec() - //@ assert decoded.ValidCurrIdxs(s.Raw) + //@ assert decoded.GetBase(s.Raw).StronglyValid() //@ } //@ sl.Unslice_Bytes(ubuf, 0, len(s.Raw), HalfPerm) //@ sl.Unslice_Bytes(ubuf, 0, len(s.Raw), HalfPerm) @@ -223,10 +222,10 @@ func (s *Raw) ToDecoded( /*@ ghost ubuf []byte @*/ ) (d *Decoded, err error) { // @ requires s.Mem(ubuf) // @ requires sl.Bytes(ubuf, 0, len(ubuf)) // pres for IO: -// @ requires s.EqAbsHeader(ubuf) +// @ requires s.GetBase(ubuf).EqAbsHeader(ubuf) // @ requires validPktMetaHdr(ubuf) // @ requires len(s.absPkt(ubuf).CurrSeg.Future) > 0 -// @ requires s.GetIsXoverSpec(ubuf) ==> +// @ requires s.GetBase(ubuf).IsXoverSpec() ==> // @ s.absPkt(ubuf).LeftSeg != none[io.IO_seg3] // @ ensures sl.Bytes(ubuf, 0, len(ubuf)) // @ ensures old(unfolding s.Mem(ubuf) in unfolding @@ -235,10 +234,10 @@ func (s *Raw) ToDecoded( /*@ ghost ubuf []byte @*/ ) (d *Decoded, err error) { // @ ensures r != nil ==> s.NonInitMem() // @ ensures r != nil ==> r.ErrorMem() // post for IO: -// @ ensures r == nil ==> s.EqAbsHeader(ubuf) && validPktMetaHdr(ubuf) -// @ ensures r == nil && old(s.GetIsXoverSpec(ubuf)) ==> +// @ ensures r == nil ==> s.GetBase(ubuf).EqAbsHeader(ubuf) && validPktMetaHdr(ubuf) +// @ ensures r == nil && old(s.GetBase(ubuf).IsXoverSpec()) ==> // @ s.absPkt(ubuf) == AbsXover(old(s.absPkt(ubuf))) -// @ ensures r == nil && !old(s.GetIsXoverSpec(ubuf)) ==> +// @ ensures r == nil && !old(s.GetBase(ubuf).IsXoverSpec()) ==> // @ s.absPkt(ubuf) == AbsIncPath(old(s.absPkt(ubuf))) // @ decreases func (s *Raw) IncPath( /*@ ghost ubuf []byte @*/ ) (r error) { @@ -276,7 +275,7 @@ func (s *Raw) IncPath( /*@ ghost ubuf []byte @*/ ) (r error) { //@ sl.SplitRange_Bytes(ubuf, 0, MetaLen, HalfPerm) //@ unfold acc(s.Base.Mem(), R2) err := s.PathMeta.SerializeTo(s.Raw[:MetaLen]) - //@ assert s.Base.ValidCurrIdxs() + //@ assert s.Base.StronglyValid() //@ assert s.PathMeta.InBounds() //@ v := s.Raw[:MetaLen] //@ b0 := sl.GetByte(v, 0, MetaLen, 0) @@ -285,7 +284,7 @@ func (s *Raw) IncPath( /*@ ghost ubuf []byte @*/ ) (r error) { //@ b3 := sl.GetByte(v, 0, MetaLen, 3) //@ s.PathMeta.SerializeAndDeserializeLemma(b0, b1, b2, b3) //@ assert s.PathMeta.EqAbsHeader(v) - //@ assert RawBytesToBase(v).ValidCurrIdxsSpec() + //@ assert RawBytesToBase(v).StronglyValid() //@ sl.CombineRange_Bytes(ubuf, 0, MetaLen, HalfPerm) //@ ValidPktMetaHdrSublice(ubuf, MetaLen) //@ assert s.EqAbsHeader(ubuf) == s.PathMeta.EqAbsHeader(ubuf) @@ -371,7 +370,7 @@ func (s *Raw) GetInfoField(idx int /*@, ghost ubuf []byte @*/) (ifield path.Info // CurrINF index in the path meta header. // @ preserves acc(s.Mem(ubuf), R8) // @ preserves acc(sl.Bytes(ubuf, 0, len(ubuf)), R9) -// @ ensures (r == nil) == s.ValidCurrINF(ubuf) +// @ ensures (r == nil) == s.GetBase(ubuf).ValidCurrInfSpec() // @ ensures r == nil ==> s.CorrectlyDecodedInf(ubuf, res) // @ ensures r != nil ==> r.ErrorMem() // @ decreases @@ -384,7 +383,7 @@ func (s *Raw) GetCurrentInfoField( /*@ ghost ubuf []byte @*/ ) (res path.InfoFie //@ assume 0 <= idx //@ fold acc(s.Base.Mem(), R10) //@ fold acc(s.Mem(ubuf), R9) - //@ assert forall res path.InfoField :: {s.CorrectlyDecodedInf(ubuf, res)} s.ValidCurrINF(ubuf) ==> + //@ assert forall res path.InfoField :: { s.CorrectlyDecodedInf(ubuf, res) } s.GetBase(ubuf).ValidCurrInfSpec() ==> //@ reveal s.CorrectlyDecodedInf(ubuf, res) == reveal s.CorrectlyDecodedInfWithIdx(ubuf, idx, res) return s.GetInfoField(idx /*@, ubuf @*/) } @@ -394,13 +393,14 @@ func (s *Raw) GetCurrentInfoField( /*@ ghost ubuf []byte @*/ ) (res path.InfoFie // @ requires sl.Bytes(ubuf, 0, len(ubuf)) // @ requires acc(s.Mem(ubuf), R20) // pres for IO: -// @ requires validPktMetaHdr(ubuf) && s.EqAbsHeader(ubuf) +// @ requires validPktMetaHdr(ubuf) +// @ requires s.GetBase(ubuf).EqAbsHeader(ubuf) // @ ensures acc(s.Mem(ubuf), R20) // @ ensures sl.Bytes(ubuf, 0, len(ubuf)) // @ ensures r != nil ==> r.ErrorMem() // posts for IO: // @ ensures r == nil ==> -// @ validPktMetaHdr(ubuf) && s.EqAbsHeader(ubuf) +// @ validPktMetaHdr(ubuf) && s.GetBase(ubuf).EqAbsHeader(ubuf) // @ ensures r == nil && idx == int(old(s.GetCurrINF(ubuf))) ==> // @ let oldPkt := old(s.absPkt(ubuf)) in // @ let newPkt := oldPkt.UpdateInfoField(info.ToAbsInfoField()) in @@ -510,7 +510,7 @@ func (s *Raw) GetHopField(idx int /*@, ghost ubuf []byte @*/) (res path.HopField // CurrHF index in the path meta header. // @ preserves acc(s.Mem(ubuf), R8) // @ preserves acc(sl.Bytes(ubuf, 0, len(ubuf)), R9) -// @ ensures (r == nil) == s.ValidCurrHF(ubuf) +// @ ensures (r == nil) == s.GetBase(ubuf).ValidCurrHfSpec() // @ ensures r == nil ==> s.CorrectlyDecodedHf(ubuf, res) // @ ensures r != nil ==> r.ErrorMem() // @ decreases @@ -523,7 +523,7 @@ func (s *Raw) GetCurrentHopField( /*@ ghost ubuf []byte @*/ ) (res path.HopField //@ assume 0 <= idx //@ fold acc(s.Base.Mem(), R10) //@ fold acc(s.Mem(ubuf), R9) - //@ assert forall res path.HopField :: {s.CorrectlyDecodedHf(ubuf, res)} s.ValidCurrHF(ubuf) ==> + //@ assert forall res path.HopField :: { s.CorrectlyDecodedHf(ubuf, res) } s.GetBase(ubuf).ValidCurrHfSpec() ==> //@ reveal s.CorrectlyDecodedHf(ubuf, res) == reveal s.CorrectlyDecodedHfWithIdx(ubuf, idx, res) return s.GetHopField(idx /*@, ubuf @*/) } diff --git a/pkg/slayers/path/scion/raw_spec.gobra b/pkg/slayers/path/scion/raw_spec.gobra index 4f0b72711..5fd0ba9a6 100644 --- a/pkg/slayers/path/scion/raw_spec.gobra +++ b/pkg/slayers/path/scion/raw_spec.gobra @@ -47,8 +47,9 @@ requires acc(s.Mem(buf), _) requires acc(sl.Bytes(buf, 0, len(buf)), R42) decreases func (s *Raw) IsValidResultOfDecoding(buf []byte, err error) (res bool) { - return s.EqAbsHeader(buf) && - s.InfsMatchHfs(buf) && s.SegsInBounds(buf) + return let base := s.GetBase(buf) in + base.EqAbsHeader(buf) && + base.WeaklyValid() } /**** Stubs ****/ @@ -110,7 +111,7 @@ func (s *Raw) IsFirstHopAfterXover(ghost ub []byte) (res bool) { * introduced this wrapper method which acts as a wrapper. */ preserves acc(s.Mem(ub), R9) -ensures res == s.GetIsXoverSpec(ub) +ensures res == s.GetBase(ub).IsXoverSpec() decreases func (s *Raw) IsXover(ghost ub []byte) (res bool) { unfold acc(s.Mem(ub), R9) @@ -118,72 +119,6 @@ func (s *Raw) IsXover(ghost ub []byte) (res bool) { return s.Base.IsXover() } -ghost -requires acc(s.Mem(ub), _) -decreases -pure func (s *Raw) ValidCurrINF(ghost ub []byte) bool { - return unfolding acc(s.Mem(ub), _) in - s.Base.ValidCurrINF() -} - -ghost -requires acc(s.Mem(ub), _) -decreases -pure func (s *Raw) ValidCurrHF(ghost ub []byte) bool { - return unfolding acc(s.Mem(ub), _) in - s.Base.ValidCurrHF() -} - -ghost -requires acc(s.Mem(ub), _) -decreases -pure func (s *Raw) ValidCurrIdxs(ghost ub []byte) bool { - return unfolding acc(s.Mem(ub), _) in - s.Base.ValidCurrIdxs() -} - -ghost -requires acc(s.Mem(ub), _) -decreases -pure func (s *Raw) InfsMatchHfs(ghost ub []byte) bool { - return unfolding acc(s.Mem(ub), _) in - s.Base.InfsMatchHfs() -} - -ghost -requires acc(s.Mem(ub), _) -decreases -pure func (s *Raw) CurrInfMatchesCurrHF(ghost ub []byte) bool { - return unfolding acc(s.Mem(ub), _) in - s.Base.GetBase().CurrInfMatchesCurrHF() -} - -ghost -requires acc(s.Mem(ub), _) -requires acc(sl.Bytes(ub, 0, len(ub)), R56) -decreases -pure func (s *Raw) EqAbsHeader(ub []byte) bool { - return unfolding acc(s.Mem(ub), _) in - unfolding acc(s.Base.Mem(), _) in - s.Base.EqAbsHeader(ub) -} - -ghost -requires acc(s.Mem(ub), _) -decreases -pure func (s *Raw) GetIsXoverSpec(ub []byte) bool { - return unfolding acc(s.Mem(ub), _) in s.Base.IsXoverSpec() -} - -ghost -requires acc(s.Mem(ub), _) -decreases -pure func (s *Raw) SegsInBounds(ub []byte) bool { - return unfolding acc(s.Mem(ub), _) in - unfolding acc(s.Base.Mem(), _) in - s.PathMeta.SegsInBounds() -} - /**** End of Stubs ****/ /**** Lemmas ****/ @@ -214,6 +149,13 @@ func (r *Raw) Widen(ubuf1, ubuf2 []byte) { /**** End of Lemmas ****/ /**** Start of helpful pure functions ****/ +ghost +requires acc(r.Mem(ub), _) +decreases +pure func (r *Raw) GetBase(ub []byte) Base { + return unfolding acc(r.Mem(ub), _) in r.Base.GetBase() +} + ghost requires acc(r.Mem(ub), _) decreases @@ -495,7 +437,7 @@ pure func validPktMetaHdr(raw []byte) bool { let segs := io.CombineSegLens(seg1, seg2, seg3) in let base := RawBytesToBase(raw) in 0 < metaHdr.SegLen[0] && - base.ValidCurrIdxsSpec() && + base.StronglyValid() && base.CurrInfMatchesCurrHF() && PktLen(segs, MetaLen) <= len(raw) } @@ -521,16 +463,12 @@ func ValidPktMetaHdrSublice(raw []byte, idx int) { ghost requires acc(s.Mem(ub), R54) requires acc(sl.Bytes(ub, 0, len(ub)), R55) -requires s.InfsMatchHfs(ub) -requires s.ValidCurrINF(ub) -requires s.ValidCurrHF(ub) -requires s.SegsInBounds(ub) -requires s.CurrInfMatchesCurrHF(ub) -requires s.EqAbsHeader(ub) +requires s.GetBase(ub).FullyValid() +requires s.GetBase(ub).EqAbsHeader(ub) ensures acc(sl.Bytes(ub, 0, len(ub)), R55) ensures acc(s.Mem(ub), R54) ensures validPktMetaHdr(ub) -ensures s.EqAbsHeader(ub) +ensures s.GetBase(ub).EqAbsHeader(ub) decreases func (s *Raw) EstablishValidPktMetaHdr(ghost ub []byte) { unfold acc(s.Mem(ub), R55) @@ -542,7 +480,7 @@ func (s *Raw) EstablishValidPktMetaHdr(ghost ub []byte) { seg3 := int(s.Base.PathMeta.SegLen[2]) segs := io.CombineSegLens(seg1, seg2, seg3) assert 0 < seg1 - assert s.ValidCurrIdxs(ub) + assert s.GetBase(ub).NumsCompatibleWithSegLen() assert PktLen(segs, MetaLen) <= len(ub) assert reveal validPktMetaHdr(ub) fold acc(s.Base.Mem(), R56) @@ -616,7 +554,7 @@ pure func (s *Raw) CorrectlyDecodedInfWithIdx(ub []byte, idx int, info path.Info ghost opaque requires acc(s.Mem(ub), _) -requires s.ValidCurrINF(ub) +requires s.GetBase(ub).ValidCurrInfSpec() requires acc(sl.Bytes(ub, 0, len(ub)), R56) decreases pure func (s *Raw) CorrectlyDecodedInf(ub []byte, info path.InfoField) bool { @@ -645,7 +583,7 @@ pure func (s *Raw) CorrectlyDecodedHfWithIdx(ub []byte, idx int, hop path.HopFie ghost opaque requires acc(s.Mem(ub), _) -requires s.ValidCurrHF(ub) +requires s.GetBase(ub).ValidCurrHfSpec() requires acc(sl.Bytes(ub, 0, len(ub)), R56) decreases pure func (s *Raw) CorrectlyDecodedHf(ub []byte, hop path.HopField) bool { @@ -662,7 +600,7 @@ preserves acc(s.Mem(ubuf), R55) preserves s.IsLastHopSpec(ubuf) preserves acc(sl.Bytes(ubuf, 0, len(ubuf)), R56) preserves validPktMetaHdr(ubuf) -preserves s.EqAbsHeader(ubuf) +preserves s.GetBase(ubuf).EqAbsHeader(ubuf) ensures len(s.absPkt(ubuf).CurrSeg.Future) == 1 decreases func (s *Raw) LastHopLemma(ubuf []byte) { @@ -685,10 +623,10 @@ func (s *Raw) LastHopLemma(ubuf []byte) { ghost preserves acc(s.Mem(ubuf), R55) -preserves s.GetIsXoverSpec(ubuf) +preserves s.GetBase(ubuf).IsXoverSpec() preserves acc(sl.Bytes(ubuf, 0, len(ubuf)), R56) preserves validPktMetaHdr(ubuf) -preserves s.EqAbsHeader(ubuf) +preserves s.GetBase(ubuf).EqAbsHeader(ubuf) ensures s.absPkt(ubuf).LeftSeg != none[io.IO_seg2] ensures len(s.absPkt(ubuf).CurrSeg.Future) == 1 decreases @@ -736,10 +674,10 @@ ghost preserves acc(s.Mem(ubuf), R53) preserves acc(sl.Bytes(ubuf, 0, len(ubuf)), R53) preserves validPktMetaHdr(ubuf) -preserves s.EqAbsHeader(ubuf) +preserves s.GetBase(ubuf).EqAbsHeader(ubuf) preserves len(s.absPkt(ubuf).CurrSeg.Future) > 0 -preserves s.ValidCurrINF(ubuf) -preserves s.ValidCurrHF(ubuf) +preserves s.GetBase(ubuf).ValidCurrInfSpec() +preserves s.GetBase(ubuf).ValidCurrHfSpec() preserves s.CorrectlyDecodedInf(ubuf, info) preserves s.CorrectlyDecodedHf(ubuf, hop) ensures s.EqAbsInfoField(s.absPkt(ubuf), info.ToAbsInfoField()) diff --git a/pkg/slayers/scion.go b/pkg/slayers/scion.go index d3a7ad19d..6ffb1b741 100644 --- a/pkg/slayers/scion.go +++ b/pkg/slayers/scion.go @@ -434,7 +434,7 @@ func (s *SCION) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res er // @ unfold acc(sl.Bytes(data[offset : offset+pathLen], 0, len(data[offset : offset+pathLen])), R56) // @ unfold acc(s.Path.(*scion.Raw).Mem(data[offset : offset+pathLen]), R55) // @ assert reveal s.EqAbsHeader(data) - // @ assert reveal s.ValidScionInitSpec(data) + // @ assert reveal s.ValidScionInitSpec(data) // @ fold acc(s.Path.Mem(data[offset : offset+pathLen]), R55) // @ fold acc(sl.Bytes(data, 0, len(data)), R56) // @ fold acc(sl.Bytes(data[offset : offset+pathLen], 0, len(data[offset : offset+pathLen])), R56) diff --git a/pkg/slayers/scion_spec.gobra b/pkg/slayers/scion_spec.gobra index cbda1441a..9d5689d17 100644 --- a/pkg/slayers/scion_spec.gobra +++ b/pkg/slayers/scion_spec.gobra @@ -189,9 +189,9 @@ pure func (s *SCION) ValidPathMetaData(ghost ub []byte) bool { return unfolding acc(s.Mem(ub), _) in let ubPath := s.UBPath(ub) in (typeOf(s.Path) == type[*scion.Raw] ==> - s.Path.(*scion.Raw).ValidCurrIdxs(ubPath)) && + s.Path.(*scion.Raw).GetBase(ubPath).StronglyValid()) && (typeOf(s.Path) == type[*epic.Path] ==> - s.Path.(*epic.Path).ValidCurrIdxs(ubPath)) + s.Path.(*epic.Path).GetBase(ubPath).StronglyValid()) } // TODO: simplify the body of the predicate when let expressions @@ -444,9 +444,7 @@ func (s *SCION) ValidScionInitSpec(ub []byte) bool { let low := CmnHdrLen+s.AddrHdrLenSpecInternal() in let high := s.HdrLen*LineLen in typeOf(s.Path) == (*scion.Raw) && - unfolding acc(s.Path.Mem(ub[low:high]), _) in - s.Path.(*scion.Raw).Base.InfsMatchHfs() && - s.Path.(*scion.Raw).Base.GetMetaHdr().SegsInBounds() + s.Path.(*scion.Raw).GetBase(ub[low:high]).WeaklyValid() } // Checks if the common path header is valid in the serialized scion packet. @@ -471,9 +469,8 @@ pure func ValidPktMetaHdr(raw []byte) bool { let seg3 := int(metaHdr.SegLen[2]) in let segs := io.CombineSegLens(seg1, seg2, seg3) in let base := scion.Base{metaHdr, segs.NumInfoFields(), segs.TotalHops()} in - 0 < metaHdr.SegLen[0] && - base.ValidCurrIdxsSpec() && - base.CurrInfMatchesCurrHF() && + 0 < metaHdr.SegLen[0] && + base.FullyValid() && scion.PktLen(segs, start + scion.MetaLen) <= length } diff --git a/router/dataplane.go b/router/dataplane.go index e156ec590..3ce5d8f6d 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -1103,7 +1103,7 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ ghost ioLock.Unlock() // @ unfold acc(writeMsgs[0].Mem(), R50) // @ ghost if addrAliasesPkt && result.OutAddr != nil { - // @ apply acc(result.OutAddr.Mem(), R15) --* acc(sl.Bytes(tmpBuf, 0, len(tmpBuf)), R15) + // @ apply acc(result.OutAddr.Mem(), R15) --* acc(sl.Bytes(tmpBuf, 0, len(tmpBuf)), R15) // @ } // @ sl.CombineRange_Bytes(p.Buffers[0], 0, p.N, writePerm) // @ msgs[:pkts][i0].IsActive = false @@ -1756,11 +1756,11 @@ func (p *scionPacketProcessor) processIntraBFD(data []byte) (res error) { // @ requires acc(ioLock.LockP(), _) // @ requires ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> // @ requires let absPkt := absIO_val(p.rawPkt, p.ingressID) in -// @ absPkt.isIO_val_Pkt2 ==> ElemWitness(ioSharedArg.IBufY, path.ifsToIO_ifs(p.ingressID), absPkt.IO_val_Pkt2_2) +// @ absPkt.isIO_val_Pkt2 ==> ElemWitness(ioSharedArg.IBufY, path.ifsToIO_ifs(p.ingressID), absPkt.IO_val_Pkt2_2) // @ ensures reserr == nil && newAbsPkt.isIO_val_Pkt2 ==> -// @ ElemWitness(ioSharedArg.OBufY, newAbsPkt.IO_val_Pkt2_1, newAbsPkt.IO_val_Pkt2_2) +// @ ElemWitness(ioSharedArg.OBufY, newAbsPkt.IO_val_Pkt2_1, newAbsPkt.IO_val_Pkt2_2) // @ ensures respr.OutPkt != nil ==> -// @ newAbsPkt == absIO_val(respr.OutPkt, respr.EgressID) +// @ newAbsPkt == absIO_val(respr.OutPkt, respr.EgressID) // @ ensures reserr != nil && respr.OutPkt != nil ==> // @ newAbsPkt.isIO_val_Unsupported // @ ensures (respr.OutPkt == nil) == (newAbsPkt == io.IO_val_Unit{}) @@ -1884,8 +1884,8 @@ type macBuffersT struct { } // @ trusted -// @ requires false -// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ requires false +// @ ensures reserr != nil && respr.OutPkt != nil ==> // @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported func (p *scionPacketProcessor) packSCMP( typ slayers.SCMPType, @@ -1924,14 +1924,14 @@ func (p *scionPacketProcessor) packSCMP( // @ ensures acc(&p.hopField) && acc(&p.infoField) // @ ensures respr === processResult{} // @ ensures reserr == nil ==> ( -// @ let ubPath := p.scionLayer.UBPath(ub) in -// @ unfolding acc(p.scionLayer.Mem(ub), R10) in -// @ p.path.GetCurrHF(ubPath) < p.path.GetNumHops(ubPath)) +// @ let ubPath := p.scionLayer.UBPath(ub) in +// @ unfolding acc(p.scionLayer.Mem(ub), R10) in +// @ p.path.GetCurrHF(ubPath) < p.path.GetNumHops(ubPath)) // @ ensures acc(p.scionLayer.Mem(ub), R6) // @ ensures p.d.validResult(respr, false) // @ ensures reserr == nil ==> ( -// @ let ubPath := p.scionLayer.UBPath(ub) in -// @ unfolding acc(p.scionLayer.Mem(ub), R10) in +// @ let ubPath := p.scionLayer.UBPath(ub) in +// @ unfolding acc(p.scionLayer.Mem(ub), R10) in // @ p.path.GetCurrINF(ubPath) < p.path.GetNumINF(ubPath)) // @ ensures reserr != nil ==> reserr.ErrorMem() // contracts for IO-spec @@ -1972,16 +1972,16 @@ func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte @*/ ) (respr proce } // (VerifiedSCION) This assumption will be dropped after clarifying // https://github.com/scionproto/scion/issues/4531 - // @ TemporaryAssumeForIO(p.path.CurrInfMatchesCurrHF(ubPath)) + // @ TemporaryAssumeForIO(p.path.GetBase(ubPath).CurrInfMatchesCurrHF()) // @ p.EstablishEqAbsHeader(ub, startP, endP) // @ p.path.EstablishValidPktMetaHdr(ubPath) // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP) // @ absPktFutureLemma(ub) // @ p.path.DecodingLemma(ubPath, p.infoField, p.hopField) // @ assert reveal p.path.EqAbsInfoField(p.path.absPkt(ubPath), - // @ p.infoField.ToAbsInfoField()) + // @ p.infoField.ToAbsInfoField()) // @ assert reveal p.path.EqAbsHopField(p.path.absPkt(ubPath), - // @ p.hopField.ToIO_HF()) + // @ p.hopField.ToIO_HF()) // @ assert reveal p.EqAbsHopField(absPkt(ub)) // @ assert reveal p.EqAbsInfoField(absPkt(ub)) return processResult{}, nil @@ -2137,17 +2137,17 @@ func (p *scionPacketProcessor) validateSrcDstIA( /*@ ghost ubScionL []byte @*/ ) // @ ghost if(p.path.IsLastHopSpec(ubPath)) { // @ p.path.LastHopLemma(ubPath) // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ubScionL, startP) - // @ p.SubSliceAbsPktToAbsPkt(ubScionL, startP, endP) + // @ p.SubSliceAbsPktToAbsPkt(ubScionL, startP, endP) // @ } } // @ fold p.d.validResult(processResult{}, false) // @ assert (unfolding acc(p.scionLayer.Mem(ubScionL), R55) in - // @ (unfolding acc(p.scionLayer.HeaderMem(ubScionL[slayers.CmnHdrLen:]), R55) in - // @ p.scionLayer.DstIA) == (unfolding acc(p.d.Mem(), _) in p.d.localIA)) ==> p.ingressID != 0 + // @ (unfolding acc(p.scionLayer.HeaderMem(ubScionL[slayers.CmnHdrLen:]), R55) in + // @ p.scionLayer.DstIA) == (unfolding acc(p.d.Mem(), _) in p.d.localIA)) ==> p.ingressID != 0 // @ assert (unfolding acc(p.scionLayer.Mem(ubScionL), R55) in - // @ (unfolding acc(p.scionLayer.HeaderMem(ubScionL[slayers.CmnHdrLen:]), R55) in - // @ p.scionLayer.DstIA) == (unfolding acc(p.d.Mem(), _) in p.d.localIA)) ==> p.path.IsLastHopSpec(ubPath) + // @ (unfolding acc(p.scionLayer.HeaderMem(ubScionL[slayers.CmnHdrLen:]), R55) in + // @ p.scionLayer.DstIA) == (unfolding acc(p.d.Mem(), _) in p.d.localIA)) ==> p.path.IsLastHopSpec(ubPath) // @ assert reveal p.DstIsLocalIngressID(ubScionL) // @ assert reveal p.LastHopLen(ubScionL) return processResult{}, nil @@ -2188,11 +2188,11 @@ func (p *scionPacketProcessor) invalidDstIA() (processResult, error) { // @ requires acc(&p.ingressID, R21) // @ requires acc(&p.infoField, R4) && acc(&p.hopField, R4) // @ requires let ubPath := p.scionLayer.UBPath(ub) in -// @ unfolding acc(p.scionLayer.Mem(ub), R10) in -// @ p.path.GetCurrHF(ubPath) <= p.path.GetNumHops(ubPath) +// @ unfolding acc(p.scionLayer.Mem(ub), R10) in +// @ p.path.GetCurrHF(ubPath) <= p.path.GetNumHops(ubPath) // @ requires let ubPath := p.scionLayer.UBPath(ub) in -// @ unfolding acc(p.scionLayer.Mem(ub), R10) in -// @ p.path.GetCurrINF(ubPath) <= p.path.GetNumINF(ubPath) +// @ unfolding acc(p.scionLayer.Mem(ub), R10) in +// @ p.path.GetCurrINF(ubPath) <= p.path.GetNumINF(ubPath) // @ requires acc(&p.d, R20) && acc(p.d.Mem(), _) // @ requires acc(&p.srcAddr, R20) && acc(p.srcAddr.Mem(), _) // @ preserves acc(sl.Bytes(ub, 0, len(ub)), R4) @@ -2227,7 +2227,7 @@ func (p *scionPacketProcessor) validateTransitUnderlaySrc( /*@ ghost ub []byte @ // @ ghost if p.d.internalNextHops != nil { unfold acc(accAddr(p.d.internalNextHops), _) } expectedSrc, ok := p.d.internalNextHops[pktIngressID] // @ ghost if ok { - // @ assert expectedSrc in range(p.d.internalNextHops) + // @ assert expectedSrc in range(p.d.internalNextHops) // @ unfold acc(expectedSrc.Mem(), _) // @ } // @ unfold acc(p.srcAddr.Mem(), _) @@ -2631,7 +2631,7 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr } // (VerifiedSCION) This assumption will be dropped after clarifying // https://github.com/scionproto/scion/issues/4524. - //@ TemporaryAssumeForIO(!p.path.GetIsXoverSpec(ubPath)) + //@ TemporaryAssumeForIO(!p.path.GetBase(ubPath).IsXoverSpec()) if err := p.path.IncPath( /*@ ubPath @*/ ); err != nil { // @ ghost sl.CombineRange_Bytes(ub, startP, endP, writePerm) // @ p.scionLayer.PathPoolMemExchange(p.scionLayer.PathType, p.scionLayer.Path) @@ -2909,7 +2909,7 @@ func (p *scionPacketProcessor) handleIngressRouterAlert( /*@ ghost ub []byte, gh } // @ sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) // @ assert p.DstIsLocalIngressID(ub) - // @ TemporaryAssumeForIO(scion.validPktMetaHdr(ubPath) && p.path.EqAbsHeader(ubPath)) // postcondition of SetHopfield + // @ TemporaryAssumeForIO(scion.validPktMetaHdr(ubPath) && p.path.GetBase(ubPath).EqAbsHeader(ubPath)) // postcondition of SetHopfield // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, startP) // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP) // @ absPktFutureLemma(ub) @@ -3016,7 +3016,7 @@ func (p *scionPacketProcessor) handleEgressRouterAlert( /*@ ghost ub []byte, gho return processResult{}, serrors.WrapStr("update hop field", err) } // @ sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) - // @ TemporaryAssumeForIO(scion.validPktMetaHdr(ubPath) && p.path.EqAbsHeader(ubPath)) // postcondition of SetHopfield + // @ TemporaryAssumeForIO(scion.validPktMetaHdr(ubPath) && p.path.GetBase(ubPath).EqAbsHeader(ubPath)) // postcondition of SetHopfield // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, startP) // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP) // @ absPktFutureLemma(ub) @@ -3075,7 +3075,7 @@ func (p *scionPacketProcessor) handleSCMPTraceRouteRequest( scionPld /*@ , start, end @*/ := p.lastLayer.LayerPayload( /*@ ubLastLayer @*/ ) // @ assert scionPld === ubLastLayer[start:end] || scionPld == nil // @ if scionPld == nil { sl.NilAcc_Bytes() } else { - // @ sl.SplitRange_Bytes(ubLastLayer, start, end, R1) + // @ sl.SplitRange_Bytes(ubLastLayer, start, end, R1) // @ ghost defer sl.CombineRange_Bytes(ubLastLayer, start, end, R1) // @ } // @ gopacket.AssertInvariantNilDecodeFeedback() @@ -3193,11 +3193,11 @@ func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte @*/ ) ( // @ requires acc(ioLock.LockP(), _) // @ requires ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> // @ requires let absPkt := absIO_val(ub, p.ingressID) in -// @ absPkt.isIO_val_Pkt2 ==> ElemWitness(ioSharedArg.IBufY, path.ifsToIO_ifs(p.ingressID), absPkt.IO_val_Pkt2_2) +// @ absPkt.isIO_val_Pkt2 ==> ElemWitness(ioSharedArg.IBufY, path.ifsToIO_ifs(p.ingressID), absPkt.IO_val_Pkt2_2) // @ ensures reserr == nil && newAbsPkt.isIO_val_Pkt2 ==> -// @ ElemWitness(ioSharedArg.OBufY, newAbsPkt.IO_val_Pkt2_1, newAbsPkt.IO_val_Pkt2_2) +// @ ElemWitness(ioSharedArg.OBufY, newAbsPkt.IO_val_Pkt2_1, newAbsPkt.IO_val_Pkt2_2) // @ ensures respr.OutPkt != nil ==> -// @ newAbsPkt == absIO_val(respr.OutPkt, respr.EgressID) +// @ newAbsPkt == absIO_val(respr.OutPkt, respr.EgressID) // @ ensures reserr != nil && respr.OutPkt != nil ==> // @ newAbsPkt.isIO_val_Unsupported // @ ensures (respr.OutPkt == nil) == (newAbsPkt == io.IO_val_Unit{}) @@ -3344,10 +3344,10 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, // @ ghost if(slayers.IsSupportedPkt(ub)) { // @ ghost if(!p.segmentChange) { // enter/exit event - // @ ExternalEnterOrExitEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, path.ifsToIO_ifs(egressID), ioLock, ioSharedArg, dp) + // @ ExternalEnterOrExitEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, path.ifsToIO_ifs(egressID), ioLock, ioSharedArg, dp) // @ } else { // xover event - // @ XoverEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, path.ifsToIO_ifs(egressID), ioLock, ioSharedArg, dp) + // @ XoverEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, path.ifsToIO_ifs(egressID), ioLock, ioSharedArg, dp) // @ } // @ } // @ newAbsPkt = reveal absIO_val(p.rawPkt, egressID) @@ -3364,9 +3364,9 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, // @ TemporaryAssumeForIO(old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub)) // @ ghost if(slayers.IsSupportedPkt(ub)) { // @ if(!p.segmentChange) { - // @ InternalEnterEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, none[io.IO_ifs], ioLock, ioSharedArg, dp) + // @ InternalEnterEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, none[io.IO_ifs], ioLock, ioSharedArg, dp) // @ } else { - // @ XoverEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, none[io.IO_ifs], ioLock, ioSharedArg, dp) + // @ XoverEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, none[io.IO_ifs], ioLock, ioSharedArg, dp) // @ } // @ } // @ newAbsPkt = reveal absIO_val(p.rawPkt, 0) @@ -3936,7 +3936,7 @@ func (p *scionPacketProcessor) prepareSCMP( _, external := p.d.external[p.ingressID] if external { // @ requires revPath.Mem(rawPath) - // @ requires revPath.ValidCurrIdxs(rawPath) + // @ requires revPath.GetBase(rawPath).StronglyValid() // @ ensures revPath.Mem(rawPath) // @ decreases // @ outline( diff --git a/router/io-spec-lemmas.gobra b/router/io-spec-lemmas.gobra index be6d949cd..e6733d2ce 100644 --- a/router/io-spec-lemmas.gobra +++ b/router/io-spec-lemmas.gobra @@ -159,7 +159,7 @@ decreases pure func (p* scionPacketProcessor) GetIsXoverSpec(ub []byte) bool { return let ubPath := p.scionLayer.UBPath(ub) in unfolding acc(p.scionLayer.Mem(ub), R55) in - p.path.GetIsXoverSpec(ubPath) + p.path.GetBase(ubPath).IsXoverSpec() } ghost @@ -223,9 +223,8 @@ ensures acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R50) ensures acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) ensures start == p.scionLayer.PathStartIdx(ub) ensures end == p.scionLayer.PathEndIdx(ub) -ensures p.path.EqAbsHeader(ub[start:end]) -ensures p.path.InfsMatchHfs(ub[start:end]) -ensures p.path.SegsInBounds(ub[start:end]) +ensures p.path.GetBase(ub[start:end]).EqAbsHeader(ub[start:end]) +ensures p.path.GetBase(ub[start:end]).WeaklyValid() ensures p.scionLayer.ValidHeaderOffset(ub, len(ub)) decreases func (p* scionPacketProcessor) EstablishEqAbsHeader(ub []byte, start int, end int) { @@ -236,7 +235,7 @@ func (p* scionPacketProcessor) EstablishEqAbsHeader(ub []byte, start int, end in reveal p.scionLayer.EqAbsHeader(ub) reveal p.scionLayer.ValidScionInitSpec(ub) assert reveal p.scionLayer.ValidHeaderOffset(ub, len(ub)) - assert p.path.EqAbsHeader(ub[start:end]) + assert p.path.GetBase(ub[start:end]).EqAbsHeader(ub[start:end]) fold acc(p.path.Mem(ub[start:end]), R56) fold acc(p.scionLayer.Mem(ub), R56) fold acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R56) @@ -263,7 +262,7 @@ ensures p.scionLayer.EqAbsHeader(ub) ensures start == p.scionLayer.PathStartIdx(ub) ensures end == p.scionLayer.PathEndIdx(ub) ensures scion.validPktMetaHdr(ub[start:end]) -ensures p.path.EqAbsHeader(ub[start:end]) +ensures p.path.GetBase(ub[start:end]).EqAbsHeader(ub[start:end]) ensures p.scionLayer.ValidHeaderOffset(ub, len(ub)) ensures absPkt(ub) == p.path.absPkt(ub[start:end]) decreases @@ -275,7 +274,7 @@ func (p* scionPacketProcessor) AbsPktToSubSliceAbsPkt(ub []byte, start int, end assert reveal scion.validPktMetaHdr(ub[start:end]) unfold acc(p.scionLayer.Mem(ub), R56) reveal p.scionLayer.ValidHeaderOffset(ub, len(ub)) - assert p.path.EqAbsHeader(ub[start:end]) + assert p.path.GetBase(ub[start:end]).EqAbsHeader(ub[start:end]) fold acc(p.scionLayer.Mem(ub), R56) assert start == slayers.GetAddressOffset(ub) @@ -315,7 +314,7 @@ requires p.path === p.scionLayer.GetPath(ub) requires scion.validPktMetaHdr(ub[start:end]) requires start == p.scionLayer.PathStartIdx(ub) requires end == p.scionLayer.PathEndIdx(ub) -requires p.path.EqAbsHeader(ub[start:end]) +requires p.path.GetBase(ub[start:end]).EqAbsHeader(ub[start:end]) requires p.scionLayer.ValidHeaderOffset(ub, len(ub)) ensures acc(sl.Bytes(ub, 0, len(ub)), R50) ensures acc(p.scionLayer.Mem(ub), R55) From 1a4feed02784525248e38a40157d21434a8e000b Mon Sep 17 00:00:00 2001 From: Markus Limbeck <92801626+mlimbeck@users.noreply.github.com> Date: Mon, 1 Jul 2024 15:06:34 +0200 Subject: [PATCH 49/57] proof of IsSupportedPkt (#363) --- pkg/slayers/scion_spec.gobra | 15 +++++++++++ router/dataplane.go | 52 +++++++++++++++++++++++++++++++++--- 2 files changed, 64 insertions(+), 3 deletions(-) diff --git a/pkg/slayers/scion_spec.gobra b/pkg/slayers/scion_spec.gobra index 9d5689d17..00069e6a0 100644 --- a/pkg/slayers/scion_spec.gobra +++ b/pkg/slayers/scion_spec.gobra @@ -486,6 +486,21 @@ pure func IsSupportedPkt(raw []byte) bool { nextHdr != L4SCMP } +ghost +requires CmnHdrLen <= idx && idx <= len(raw) +preserves acc(sl.Bytes(raw, 0, len(raw)), R55) +preserves acc(sl.Bytes(raw[:idx], 0, idx), R55) +ensures IsSupportedPkt(raw) == IsSupportedPkt(raw[:idx]) +decreases +func IsSupportedPktSubslice(raw []byte, idx int) { + unfold acc(sl.Bytes(raw, 0, len(raw)), R56) + unfold acc(sl.Bytes(raw[:idx], 0, idx), R56) + reveal IsSupportedPkt(raw) + reveal IsSupportedPkt(raw[:idx]) + fold acc(sl.Bytes(raw, 0, len(raw)), R56) + fold acc(sl.Bytes(raw[:idx], 0, idx), R56) +} + ghost requires acc(sl.Bytes(ub, 0, len(ub)), _) requires CmnHdrLen <= len(ub) diff --git a/router/dataplane.go b/router/dataplane.go index 3ce5d8f6d..db8b812b5 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -1940,6 +1940,7 @@ func (p *scionPacketProcessor) packSCMP( // @ ensures reserr == nil ==> len(absPkt(ub).CurrSeg.Future) > 0 // @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(ub)) // @ ensures reserr == nil ==> p.EqAbsInfoField(absPkt(ub)) +// @ ensures old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub) // @ ensures respr.OutPkt == nil // @ decreases func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte @*/ ) (respr processResult, reserr error) { @@ -1984,6 +1985,7 @@ func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte @*/ ) (respr proce // @ p.hopField.ToIO_HF()) // @ assert reveal p.EqAbsHopField(absPkt(ub)) // @ assert reveal p.EqAbsInfoField(absPkt(ub)) + // @ assert old(reveal slayers.IsSupportedPkt(ub)) == reveal slayers.IsSupportedPkt(ub) return processResult{}, nil } @@ -2393,6 +2395,7 @@ func (p *scionPacketProcessor) validateEgressID( /*@ ghost oldPkt io.IO_pkt2, gh // @ ensures err == nil ==> p.LastHopLen(ub) // @ ensures err == nil ==> p.EqAbsHopField(absPkt(ub)) // @ ensures err == nil ==> p.EqAbsInfoField(absPkt(ub)) +// @ ensures err == nil ==> old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub) // @ decreases func (p *scionPacketProcessor) updateNonConsDirIngressSegID( /*@ ghost ub []byte @*/ ) (err error) { // @ ghost ubPath := p.scionLayer.UBPath(ub) @@ -2416,14 +2419,22 @@ func (p *scionPacketProcessor) updateNonConsDirIngressSegID( /*@ ghost ub []byte // (VerifiedSCION) the following property is guaranteed by the type system, but Gobra cannot infer it yet // @ assume 0 <= p.path.GetCurrINF(ubPath) // @ sl.SplitRange_Bytes(ub, start, end, HalfPerm) + // @ sl.SplitByIndex_Bytes(ub, 0, start, slayers.CmnHdrLen, R54) + // @ sl.Reslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ slayers.IsSupportedPktSubslice(ub, slayers.CmnHdrLen) // @ p.AbsPktToSubSliceAbsPkt(ub, start, end) // @ p.scionLayer.ValidHeaderOffsetToSubSliceLemma(ub, start) // @ sl.SplitRange_Bytes(ub, start, end, HalfPerm) if err := p.path.SetInfoField(p.infoField, int( /*@ unfolding acc(p.path.Mem(ubPath), R45) in (unfolding acc(p.path.Base.Mem(), R50) in @*/ p.path.PathMeta.CurrINF) /*@ ) , ubPath, @*/); err != nil { + // @ sl.Unslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ sl.CombineAtIndex_Bytes(ub, 0, start, slayers.CmnHdrLen, R54) // @ ghost sl.CombineRange_Bytes(ub, start, end, writePerm) return serrors.WrapStr("update info field", err) } // @ ghost sl.CombineRange_Bytes(ub, start, end, HalfPerm) + // @ slayers.IsSupportedPktSubslice(ub, slayers.CmnHdrLen) + // @ sl.Unslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ sl.CombineAtIndex_Bytes(ub, 0, start, slayers.CmnHdrLen, R54) // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, start) // @ p.SubSliceAbsPktToAbsPkt(ub, start, end) // @ ghost sl.CombineRange_Bytes(ub, start, end, HalfPerm) @@ -2596,6 +2607,7 @@ func (p *scionPacketProcessor) resolveInbound( /*@ ghost ubScionL []byte @*/ ) ( // @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) // @ ensures reserr == nil ==> len(absPkt(ub).CurrSeg.Future) >= 0 // @ ensures reserr == nil ==> absPkt(ub) == AbsProcessEgress(old(absPkt(ub))) +// @ ensures reserr == nil ==> old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub) // @ decreases func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr error) { // @ ghost ubPath := p.scionLayer.UBPath(ub) @@ -2605,6 +2617,9 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr // @ unfold acc(p.scionLayer.Mem(ub), 1-R55) // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) + // @ sl.SplitByIndex_Bytes(ub, 0, startP, slayers.CmnHdrLen, R54) + // @ sl.Reslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ slayers.IsSupportedPktSubslice(ub, slayers.CmnHdrLen) // @ p.AbsPktToSubSliceAbsPkt(ub, startP, endP) // @ p.scionLayer.ValidHeaderOffsetToSubSliceLemma(ub, startP) // @ reveal p.EqAbsInfoField(absPkt(ub)) @@ -2621,6 +2636,8 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr // @ assume 0 <= p.path.GetCurrINF(ubPath) if err := p.path.SetInfoField(p.infoField, int( /*@ unfolding acc(p.path.Mem(ubPath), R45) in (unfolding acc(p.path.Base.Mem(), R50) in @*/ p.path.PathMeta.CurrINF /*@ ) @*/) /*@ , ubPath @*/); err != nil { // TODO parameter problem invalid path + // @ sl.Unslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ sl.CombineAtIndex_Bytes(ub, 0, startP, slayers.CmnHdrLen, R54) // @ ghost sl.CombineRange_Bytes(ub, startP, endP, writePerm) // @ p.path.DowngradePerm(ubPath) // @ p.scionLayer.PathPoolMemExchange(p.scionLayer.PathType, p.scionLayer.Path) @@ -2633,6 +2650,8 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr // https://github.com/scionproto/scion/issues/4524. //@ TemporaryAssumeForIO(!p.path.GetBase(ubPath).IsXoverSpec()) if err := p.path.IncPath( /*@ ubPath @*/ ); err != nil { + // @ sl.Unslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ sl.CombineAtIndex_Bytes(ub, 0, startP, slayers.CmnHdrLen, R54) // @ ghost sl.CombineRange_Bytes(ub, startP, endP, writePerm) // @ p.scionLayer.PathPoolMemExchange(p.scionLayer.PathType, p.scionLayer.Path) // @ unfold p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]) @@ -2643,6 +2662,9 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr // @ fold acc(p.scionLayer.Mem(ub), R55) // @ assert reveal p.scionLayer.ValidHeaderOffset(ub, startP) // @ ghost sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) + // @ slayers.IsSupportedPktSubslice(ub, slayers.CmnHdrLen) + // @ sl.Unslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ sl.CombineAtIndex_Bytes(ub, 0, startP, slayers.CmnHdrLen, R54) // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, startP) // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP) // @ ghost sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) @@ -2680,6 +2702,7 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr // @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(ub)) // @ ensures reserr == nil ==> p.EqAbsInfoField(absPkt(ub)) // @ ensures reserr == nil ==> absPkt(ub) == AbsDoXover(old(absPkt(ub))) +// @ ensures reserr == nil ==> old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub) // @ decreases func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte @*/ ) (respr processResult, reserr error) { p.segmentChange = true @@ -2689,6 +2712,9 @@ func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte @*/ ) (respr process // @ unfold acc(p.scionLayer.Mem(ub), 1-R55) // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) + // @ sl.SplitByIndex_Bytes(ub, 0, startP, slayers.CmnHdrLen, R54) + // @ sl.Reslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ slayers.IsSupportedPktSubslice(ub, slayers.CmnHdrLen) // @ p.AbsPktToSubSliceAbsPkt(ub, startP, endP) // @ p.scionLayer.ValidHeaderOffsetToSubSliceLemma(ub, startP) // @ p.path.XoverLemma(ubPath) @@ -2700,6 +2726,8 @@ func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte @*/ ) (respr process if err := p.path.IncPath( /*@ ubPath @*/ ); err != nil { // TODO parameter problem invalid path // (VerifiedSCION) we currently expose a lot of internal information from slayers here. Can we avoid it? + // @ sl.Unslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ sl.CombineAtIndex_Bytes(ub, 0, startP, slayers.CmnHdrLen, R54) // @ ghost sl.CombineRange_Bytes(ub, startP, endP, writePerm) // @ unfold p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]) // @ p.scionLayer.PathPoolMemExchange(p.scionLayer.PathType, p.scionLayer.Path) @@ -2709,6 +2737,9 @@ func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte @*/ ) (respr process // @ fold acc(p.scionLayer.Mem(ub), R55) // @ assert reveal p.scionLayer.ValidHeaderOffset(ub, startP) // @ ghost sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) + // @ slayers.IsSupportedPktSubslice(ub, slayers.CmnHdrLen) + // @ sl.Unslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ sl.CombineAtIndex_Bytes(ub, 0, startP, slayers.CmnHdrLen, R54) // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, startP) // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP) // @ assert len(get(old(absPkt(ub)).LeftSeg).Future) > 0 @@ -2875,6 +2906,7 @@ func (p *scionPacketProcessor) validateEgressUp( /*@ ghost oldPkt io.IO_pkt2 @*/ // @ ensures reserr == nil ==> len(absPkt(ub).CurrSeg.Future) > 0 // @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(ub)) // @ ensures reserr == nil ==> absPkt(ub) == old(absPkt(ub)) +// @ ensures reserr == nil ==> old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub) // @ ensures reserr != nil && respr.OutPkt != nil ==> // @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases @@ -2899,10 +2931,15 @@ func (p *scionPacketProcessor) handleIngressRouterAlert( /*@ ghost ub []byte, gh // @ assume 0 <= p.path.GetCurrHF(ubPath) // @ reveal p.LastHopLen(ub) // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) + // @ sl.SplitByIndex_Bytes(ub, 0, startP, slayers.CmnHdrLen, R54) + // @ sl.Reslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ slayers.IsSupportedPktSubslice(ub, slayers.CmnHdrLen) // @ p.AbsPktToSubSliceAbsPkt(ub, startP, endP) // @ p.scionLayer.ValidHeaderOffsetToSubSliceLemma(ub, startP) // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) if err := p.path.SetHopField(p.hopField, int( /*@ unfolding acc(p.path.Mem(ubPath), R50) in (unfolding acc(p.path.Base.Mem(), R55) in @*/ p.path.PathMeta.CurrHF /*@ ) @*/) /*@ , ubPath @*/); err != nil { + // @ sl.Unslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ sl.CombineAtIndex_Bytes(ub, 0, startP, slayers.CmnHdrLen, R54) // @ sl.CombineRange_Bytes(ub, startP, endP, writePerm) // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr("update hop field", err) @@ -2910,6 +2947,9 @@ func (p *scionPacketProcessor) handleIngressRouterAlert( /*@ ghost ub []byte, gh // @ sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) // @ assert p.DstIsLocalIngressID(ub) // @ TemporaryAssumeForIO(scion.validPktMetaHdr(ubPath) && p.path.GetBase(ubPath).EqAbsHeader(ubPath)) // postcondition of SetHopfield + // @ slayers.IsSupportedPktSubslice(ub, slayers.CmnHdrLen) + // @ sl.Unslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ sl.CombineAtIndex_Bytes(ub, 0, startP, slayers.CmnHdrLen, R54) // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, startP) // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP) // @ absPktFutureLemma(ub) @@ -2979,6 +3019,7 @@ func (p *scionPacketProcessor) ingressRouterAlertFlag() (res *bool) { // @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(ub)) // @ ensures reserr == nil ==> p.EqAbsInfoField(absPkt(ub)) // @ ensures reserr == nil ==> absPkt(ub) == old(absPkt(ub)) +// @ ensures reserr == nil ==> old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub) // @ ensures reserr != nil && respr.OutPkt != nil ==> // @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases @@ -3007,16 +3048,24 @@ func (p *scionPacketProcessor) handleEgressRouterAlert( /*@ ghost ub []byte, gho // but Gobra cannot prove it yet // @ assume 0 <= p.path.GetCurrHF(ubPath) // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) + // @ sl.SplitByIndex_Bytes(ub, 0, startP, slayers.CmnHdrLen, R54) + // @ sl.Reslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ slayers.IsSupportedPktSubslice(ub, slayers.CmnHdrLen) // @ p.AbsPktToSubSliceAbsPkt(ub, startP, endP) // @ p.scionLayer.ValidHeaderOffsetToSubSliceLemma(ub, startP) // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) if err := p.path.SetHopField(p.hopField, int( /*@ unfolding acc(p.path.Mem(ubPath), R50) in (unfolding acc(p.path.Base.Mem(), R55) in @*/ p.path.PathMeta.CurrHF /*@ ) @*/) /*@ , ubPath @*/); err != nil { + // @ sl.Unslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ sl.CombineAtIndex_Bytes(ub, 0, startP, slayers.CmnHdrLen, R54) // @ sl.CombineRange_Bytes(ub, startP, endP, writePerm) // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr("update hop field", err) } // @ sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) // @ TemporaryAssumeForIO(scion.validPktMetaHdr(ubPath) && p.path.GetBase(ubPath).EqAbsHeader(ubPath)) // postcondition of SetHopfield + // @ slayers.IsSupportedPktSubslice(ub, slayers.CmnHdrLen) + // @ sl.Unslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ sl.CombineAtIndex_Bytes(ub, 0, startP, slayers.CmnHdrLen, R54) // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, startP) // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP) // @ absPktFutureLemma(ub) @@ -3272,7 +3321,6 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, // @ unfold p.d.validResult(r, aliasesUb) // @ fold p.d.validResult(processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, aliasesUb) // @ assert ub === p.rawPkt - // @ TemporaryAssumeForIO(old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub)) // @ ghost if(slayers.IsSupportedPkt(ub)) { // @ InternalEnterEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, none[io.IO_ifs], ioLock, ioSharedArg, dp) // @ } @@ -3340,7 +3388,6 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, // @ p.d.InDomainExternalInForwardingMetrics(egressID) // @ assert absPkt(ub) == AbsProcessEgress(nextPkt) // @ nextPkt = absPkt(ub) - // @ TemporaryAssumeForIO(old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub)) // @ ghost if(slayers.IsSupportedPkt(ub)) { // @ ghost if(!p.segmentChange) { // enter/exit event @@ -3361,7 +3408,6 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, // @ ghost if p.d.internalNextHops != nil { unfold acc(accAddr(p.d.internalNextHops), _) } if a, ok := p.d.internalNextHops[egressID]; ok { // @ p.d.getInternal() - // @ TemporaryAssumeForIO(old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub)) // @ ghost if(slayers.IsSupportedPkt(ub)) { // @ if(!p.segmentChange) { // @ InternalEnterEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, none[io.IO_ifs], ioLock, ioSharedArg, dp) From 01387ec429131a3b2016c84a37b8df967b1b990e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Thu, 11 Jul 2024 16:22:52 +0200 Subject: [PATCH 50/57] Drop two assumptions and merge validity criteria `StronglyValid` and `FullyValid` (#366) * backup * backup * fix verification error * fix test * fix another verification error * fix verification error * drop StronglyValid criteria * cleanup * Update gobra.yml * Update .github/workflows/gobra.yml * Update .github/workflows/gobra.yml --- .github/workflows/gobra.yml | 7 +- pkg/slayers/path/scion/base_spec.gobra | 64 ++++---- pkg/slayers/path/scion/decoded.go | 8 +- pkg/slayers/path/scion/decoded_spec.gobra | 2 +- pkg/slayers/path/scion/raw.go | 40 +++-- pkg/slayers/path/scion/raw_spec.gobra | 10 +- pkg/slayers/scion_spec.gobra | 6 +- router/dataplane.go | 178 +++++++++++++--------- router/dataplane_test.go | 20 ++- 9 files changed, 187 insertions(+), 148 deletions(-) diff --git a/.github/workflows/gobra.yml b/.github/workflows/gobra.yml index 7cc509301..6c1467bad 100644 --- a/.github/workflows/gobra.yml +++ b/.github/workflows/gobra.yml @@ -415,7 +415,12 @@ jobs: includePaths: ${{ env.includePaths }} assumeInjectivityOnInhale: ${{ env.assumeInjectivityOnInhale }} checkConsistency: ${{ env.checkConsistency }} - chop: 10 + # Due to a bug introduced in https://github.com/viperproject/gobra/pull/776, + # we must currently disable the chopper, otherwise we well-founded orders + # for termination checking are not available at the chopped Viper parts. + # We should reenable it whenever possible, as it reduces verification time in + # ~8 min (20%). + # chop: 10 parallelizeBranches: '1' conditionalizePermissions: '1' moreJoins: 'impure' diff --git a/pkg/slayers/path/scion/base_spec.gobra b/pkg/slayers/path/scion/base_spec.gobra index 586d2fecf..ea5db53d5 100644 --- a/pkg/slayers/path/scion/base_spec.gobra +++ b/pkg/slayers/path/scion/base_spec.gobra @@ -57,8 +57,7 @@ decreases pure func (b Base) ValidCurrFieldsSpec() bool { return 0 <= b.NumINF && b.NumINF <= MaxINFs && 0 <= b.NumHops && b.NumHops <= MaxHops && - b.ValidCurrInfSpec() && - b.ValidCurrHfSpec() + b.ValidCurrInfSpec() && b.ValidCurrHfSpec() } // A `Base` is weakly valid when the fields `NumINF` and `NumHops` are, @@ -71,29 +70,29 @@ pure func (b Base) WeaklyValid() bool { return 0 <= b.NumINF && b.NumINF <= MaxINFs && 0 <= b.NumHops && b.NumHops <= MaxHops && (0 < b.NumINF ==> 0 < b.NumHops) && - b.PathMeta.InBounds() && - b.NumsCompatibleWithSegLen() + b.PathMeta.InBounds() && b.NumsCompatibleWithSegLen() } -// A `Base` is strongly valid iff it is weakly valid and its `CurrHF` -// and `CurrINF` are within bounds. +// A `Base` is valid (a.k.a fully valid) iff it is weakly valid +// and its `CurrHF` and `CurrINF` are within bounds, its `CurrHF` +// is compatible with its `CurrINF`, and there are no singleton +// segments. In the past, there used to be another validity +// criteria, stronger than WeaklyValid and weaker than FullyValid. +// This was known as StronglyValid and has been derprecated. ghost decreases -pure func (b Base) StronglyValid() bool { - return b.WeaklyValid() && b.ValidCurrFieldsSpec() +pure func (b Base) Valid() bool { + return b.WeaklyValid() && + b.ValidCurrFieldsSpec() && + b.CurrInfMatchesCurrHFSpec() && + b.PathMeta.SegLen[0] != 1 && + b.PathMeta.SegLen[1] != 1 && + b.PathMeta.SegLen[2] != 1 } -// A `Base` is fully valid iff it is strongly valid and its `CurrHF` is -// compatible with its `CurrINF`. ghost decreases -pure func (b Base) FullyValid() bool { - return b.StronglyValid() && b.CurrInfMatchesCurrHF() -} - -ghost -decreases -pure func (b Base) CurrInfMatchesCurrHF() bool { +pure func (b Base) CurrInfMatchesCurrHFSpec() bool { return b.PathMeta.CurrINF == b.InfForHfSpec(b.PathMeta.CurrHF) } @@ -167,7 +166,7 @@ ghost requires s.NumINF != 0 requires int(s.PathMeta.CurrHF) < s.NumHops-1 ensures s.WeaklyValid() ==> res.WeaklyValid() -ensures s.StronglyValid() ==> res.StronglyValid() +ensures s.Valid() ==> res.Valid() decreases pure func (s Base) IncPathSpec() (res Base) { return Base { @@ -177,6 +176,12 @@ pure func (s Base) IncPathSpec() (res Base) { } } +ghost +requires s.Valid() +ensures s.IsXoverSpec() ==> !s.IncPathSpec().IsXoverSpec() +decreases +func (s Base) NotIsXoverAfterIncPath() {} + ghost decreases pure func (b Base) ReverseSpec() Base { @@ -208,12 +213,10 @@ pure func (b Base) ReverseSegLen() [3]uint8 { } ghost -requires b.StronglyValid() -ensures b.ReverseSpec().StronglyValid() +requires b.Valid() +ensures b.ReverseSpec().Valid() decreases -pure func (b Base) ReversingBaseStronglyValidSegLenHasValidSegLen() Lemma { - return Lemma{} -} +func (b Base) ReversingBaseValidSegLenHasValidSegLen() { } ghost requires b.Mem() @@ -321,21 +324,6 @@ pure func (s MetaHdr) EqAbsHeader(ub []byte) bool { s == DecodedFrom(binary.BigEndian.Uint32(ub[:MetaLen])) } -ghost -opaque -requires MetaLen <= idx && idx <= len(ub) -requires acc(sl.Bytes(ub, 0, len(ub)), R55) -requires acc(sl.Bytes(ub[:idx], 0, idx), R55) -ensures s.EqAbsHeader(ub) == s.EqAbsHeader(ub[:idx]) -decreases -pure func (s MetaHdr) EqAbsHeaderForSublice(ub []byte, idx int) Lemma { - return let _ := Asserting(ub[:MetaLen] === ub[:idx][:MetaLen]) in - unfolding acc(sl.Bytes(ub, 0, len(ub)), R56) in - unfolding acc(sl.Bytes(ub[:idx], 0, idx), R56) in - let _ := Asserting(s.EqAbsHeader(ub) == (s == DecodedFrom(binary.BigEndian.Uint32(ub[:MetaLen])))) in - Lemma{} -} - /** Lemma proven in /VerifiedSCION/verification/utils/bitwise/proofs.dfy **/ ghost requires m.InBounds() diff --git a/pkg/slayers/path/scion/decoded.go b/pkg/slayers/path/scion/decoded.go index cc64c724e..27eb6e8d3 100644 --- a/pkg/slayers/path/scion/decoded.go +++ b/pkg/slayers/path/scion/decoded.go @@ -213,11 +213,11 @@ func (s *Decoded) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { // @ p.Mem(ubuf) && // @ p == s && // @ typeOf(p) == type[*Decoded] && -// @ (old(s.GetBase(ubuf).StronglyValid()) ==> s.GetBase(ubuf).StronglyValid())) +// @ (old(s.GetBase(ubuf).Valid()) ==> s.GetBase(ubuf).Valid())) // @ ensures r != nil ==> r.ErrorMem() && s.Mem(ubuf) // @ decreases func (s *Decoded) Reverse( /*@ ghost ubuf []byte @*/ ) (p path.Path, r error) { - //@ ghost isValid := s.GetBase(ubuf).StronglyValid() + //@ ghost isValid := s.GetBase(ubuf).Valid() //@ ghost base := s.GetBase(ubuf) //@ ghost metaHdrAferReversingSegLen := MetaHdr { //@ CurrINF: base.PathMeta.CurrINF, @@ -282,8 +282,8 @@ func (s *Decoded) Reverse( /*@ ghost ubuf []byte @*/ ) (p path.Path, r error) { s.PathMeta.CurrINF = uint8(s.NumINF) - s.PathMeta.CurrINF - 1 s.PathMeta.CurrHF = uint8(s.NumHops) - s.PathMeta.CurrHF - 1 //@ assert s.Base == base.ReverseSpec() - //@ ghost if isValid { base.ReversingBaseStronglyValidSegLenHasValidSegLen() } - //@ assert isValid ==> s.Base.StronglyValid() + //@ ghost if isValid { base.ReversingBaseValidSegLenHasValidSegLen() } + //@ assert isValid ==> s.Base.Valid() //@ fold s.Base.Mem() //@ fold s.Mem(ubuf) return s, nil diff --git a/pkg/slayers/path/scion/decoded_spec.gobra b/pkg/slayers/path/scion/decoded_spec.gobra index 58c3aa191..34e98ca0c 100644 --- a/pkg/slayers/path/scion/decoded_spec.gobra +++ b/pkg/slayers/path/scion/decoded_spec.gobra @@ -113,7 +113,7 @@ requires d.Mem(ubuf) ensures e == nil ==> ( d.Mem(ubuf) && d.LenSpec(ubuf) == old(d.LenSpec(ubuf)) && - (old(d.GetBase(ubuf).StronglyValid()) ==> d.GetBase(ubuf).StronglyValid())) + (old(d.GetBase(ubuf).Valid()) ==> d.GetBase(ubuf).Valid())) ensures e != nil ==> d.NonInitMem() && e.ErrorMem() decreases func (d *Decoded) IncPath(ghost ubuf []byte) (e error) { diff --git a/pkg/slayers/path/scion/raw.go b/pkg/slayers/path/scion/raw.go index 95059b614..ca3595a5e 100644 --- a/pkg/slayers/path/scion/raw.go +++ b/pkg/slayers/path/scion/raw.go @@ -139,7 +139,7 @@ func (s *Raw) Reverse( /*@ ghost ubuf []byte @*/ ) (p path.Path, err error) { // @ ensures err == nil ==> ( // @ let newUb := s.RawBufferMem(ubuf) in // @ d.Mem(newUb) && -// @ (old(s.GetBase(ubuf).StronglyValid()) ==> d.GetBase(newUb).StronglyValid())) +// @ (old(s.GetBase(ubuf).Valid()) ==> d.GetBase(newUb).Valid())) // @ ensures err != nil ==> err.ErrorMem() // @ decreases func (s *Raw) ToDecoded( /*@ ghost ubuf []byte @*/ ) (d *Decoded, err error) { @@ -147,9 +147,9 @@ func (s *Raw) ToDecoded( /*@ ghost ubuf []byte @*/ ) (d *Decoded, err error) { //@ unfold acc(s.Base.Mem(), R6) //@ ghost var base Base = s.Base //@ ghost var pathMeta MetaHdr = s.Base.PathMeta - //@ ghost validIdxs := s.GetBase(ubuf).StronglyValid() + //@ ghost validIdxs := s.GetBase(ubuf).Valid() //@ assert validIdxs ==> s.Base.PathMeta.InBounds() - //@ assert validIdxs ==> base.StronglyValid() + //@ assert validIdxs ==> base.Valid() //@ assert s.Raw[:MetaLen] === ubuf[:MetaLen] // (VerifiedSCION) In this method, many slice operations are done in two @@ -207,7 +207,7 @@ func (s *Raw) ToDecoded( /*@ ghost ubuf []byte @*/ ) (d *Decoded, err error) { //@ ghost if validIdxs { //@ s.PathMeta.SerializeAndDeserializeLemma(b0, b1, b2, b3) //@ assert pathMeta == decoded.GetMetaHdr(s.Raw) - //@ assert decoded.GetBase(s.Raw).StronglyValid() + //@ assert decoded.GetBase(s.Raw).Valid() //@ } //@ sl.Unslice_Bytes(ubuf, 0, len(s.Raw), HalfPerm) //@ sl.Unslice_Bytes(ubuf, 0, len(s.Raw), HalfPerm) @@ -265,17 +265,16 @@ func (s *Raw) IncPath( /*@ ghost ubuf []byte @*/ ) (r error) { //@ tail := ubuf[MetaLen:] //@ unfold acc(sl.Bytes(tail, 0, len(tail)), R50) //@ oldHfIdxSeg := oldCurrHfIdx-oldPrevSegLen - //@ WidenCurrSeg(ubuf, oldOffset + MetaLen, oldCurrInfIdx, oldHfIdxSeg, - //@ oldSegLen, MetaLen, MetaLen, len(ubuf)) - //@ WidenLeftSeg(ubuf, oldCurrInfIdx + 1, oldSegs, MetaLen, MetaLen, len(ubuf)) - //@ WidenMidSeg(ubuf, oldCurrInfIdx + 2, oldSegs, MetaLen, MetaLen, len(ubuf)) - //@ WidenRightSeg(ubuf, oldCurrInfIdx - 1, oldSegs, MetaLen, MetaLen, len(ubuf)) - //@ LenCurrSeg(tail, oldOffset, oldCurrInfIdx, oldHfIdxSeg, oldSegLen) + //@ WidenCurrSeg(ubuf, oldOffset + MetaLen, oldCurrInfIdx, oldHfIdxSeg, oldSegLen, MetaLen, MetaLen, len(ubuf)) + //@ WidenLeftSeg(ubuf, oldCurrInfIdx + 1, oldSegs, MetaLen, MetaLen, len(ubuf)) + //@ WidenMidSeg(ubuf, oldCurrInfIdx + 2, oldSegs, MetaLen, MetaLen, len(ubuf)) + //@ WidenRightSeg(ubuf, oldCurrInfIdx - 1, oldSegs, MetaLen, MetaLen, len(ubuf)) + //@ LenCurrSeg(tail, oldOffset, oldCurrInfIdx, oldHfIdxSeg, oldSegLen) //@ oldAbsPkt := reveal s.absPkt(ubuf) //@ sl.SplitRange_Bytes(ubuf, 0, MetaLen, HalfPerm) //@ unfold acc(s.Base.Mem(), R2) err := s.PathMeta.SerializeTo(s.Raw[:MetaLen]) - //@ assert s.Base.StronglyValid() + //@ assert s.Base.Valid() //@ assert s.PathMeta.InBounds() //@ v := s.Raw[:MetaLen] //@ b0 := sl.GetByte(v, 0, MetaLen, 0) @@ -284,7 +283,7 @@ func (s *Raw) IncPath( /*@ ghost ubuf []byte @*/ ) (r error) { //@ b3 := sl.GetByte(v, 0, MetaLen, 3) //@ s.PathMeta.SerializeAndDeserializeLemma(b0, b1, b2, b3) //@ assert s.PathMeta.EqAbsHeader(v) - //@ assert RawBytesToBase(v).StronglyValid() + //@ assert RawBytesToBase(v).Valid() //@ sl.CombineRange_Bytes(ubuf, 0, MetaLen, HalfPerm) //@ ValidPktMetaHdrSublice(ubuf, MetaLen) //@ assert s.EqAbsHeader(ubuf) == s.PathMeta.EqAbsHeader(ubuf) @@ -441,7 +440,7 @@ func (s *Raw) SetInfoField(info path.InfoField, idx int /*@, ghost ubuf []byte @ //@ ghost if idx == currInfIdx { //@ CurrSegEquality(ubuf, offset, currInfIdx, hfIdxSeg, segLen) //@ LeftSegEquality(ubuf, currInfIdx+1, segLens) - //@ MidSegEquality(ubuf, currInfIdx+2, segLens) + //@ MidSegEquality(ubuf, currInfIdx+2, segLens) //@ RightSegEquality(ubuf, currInfIdx-1, segLens) //@ } //@ reveal s.absPkt(ubuf) @@ -582,7 +581,7 @@ func (s *Raw) IsPenultimateHop( /*@ ghost ubuf []byte @*/ ) bool { // IsLastHop returns whether the current hop is the last hop on the path. // @ preserves acc(s.Mem(ubuf), R40) -// @ ensures res == s.IsLastHopSpec(ubuf) +// @ ensures res == s.IsLastHopSpec(ubuf) // @ decreases func (s *Raw) IsLastHop( /*@ ghost ubuf []byte @*/ ) (res bool) { //@ unfold acc(s.Mem(ubuf), R40) @@ -591,3 +590,16 @@ func (s *Raw) IsLastHop( /*@ ghost ubuf []byte @*/ ) (res bool) { //@ defer fold acc(s.Base.Mem(), R40) return int(s.PathMeta.CurrHF) == (s.NumHops - 1) } + +// CurrINFMatchesCurrHF returns whether the the path's current hopfield +// is in the path's current segment. +// @ preserves acc(s.Mem(ub), R40) +// @ ensures res == s.GetBase(ub).CurrInfMatchesCurrHFSpec() +// @ decreases +func (s *Raw) CurrINFMatchesCurrHF( /*@ ghost ub []byte @*/ ) (res bool) { + // @ unfold acc(s.Mem(ub), R40) + // @ defer fold acc(s.Mem(ub), R40) + // @ unfold acc(s.Base.Mem(), R40) + // @ defer fold acc(s.Base.Mem(), R40) + return s.PathMeta.CurrINF == s.infIndexForHF(s.PathMeta.CurrHF) +} diff --git a/pkg/slayers/path/scion/raw_spec.gobra b/pkg/slayers/path/scion/raw_spec.gobra index 5fd0ba9a6..7b9066698 100644 --- a/pkg/slayers/path/scion/raw_spec.gobra +++ b/pkg/slayers/path/scion/raw_spec.gobra @@ -181,7 +181,8 @@ ghost requires acc(r.Mem(ub), _) decreases pure func (r *Raw) GetCurrHF(ghost ub []byte) uint8 { - return unfolding acc(r.Mem(ub), _) in (unfolding acc(r.Base.Mem(), _) in r.PathMeta.CurrHF) + return unfolding acc(r.Mem(ub), _) in + (unfolding acc(r.Base.Mem(), _) in r.PathMeta.CurrHF) } ghost @@ -436,9 +437,8 @@ pure func validPktMetaHdr(raw []byte) bool { let seg3 := int(metaHdr.SegLen[2]) in let segs := io.CombineSegLens(seg1, seg2, seg3) in let base := RawBytesToBase(raw) in - 0 < metaHdr.SegLen[0] && - base.StronglyValid() && - base.CurrInfMatchesCurrHF() && + 0 < metaHdr.SegLen[0] && + base.Valid() && PktLen(segs, MetaLen) <= len(raw) } @@ -463,7 +463,7 @@ func ValidPktMetaHdrSublice(raw []byte, idx int) { ghost requires acc(s.Mem(ub), R54) requires acc(sl.Bytes(ub, 0, len(ub)), R55) -requires s.GetBase(ub).FullyValid() +requires s.GetBase(ub).Valid() requires s.GetBase(ub).EqAbsHeader(ub) ensures acc(sl.Bytes(ub, 0, len(ub)), R55) ensures acc(s.Mem(ub), R54) diff --git a/pkg/slayers/scion_spec.gobra b/pkg/slayers/scion_spec.gobra index 00069e6a0..73e838c08 100644 --- a/pkg/slayers/scion_spec.gobra +++ b/pkg/slayers/scion_spec.gobra @@ -189,9 +189,9 @@ pure func (s *SCION) ValidPathMetaData(ghost ub []byte) bool { return unfolding acc(s.Mem(ub), _) in let ubPath := s.UBPath(ub) in (typeOf(s.Path) == type[*scion.Raw] ==> - s.Path.(*scion.Raw).GetBase(ubPath).StronglyValid()) && + s.Path.(*scion.Raw).GetBase(ubPath).Valid()) && (typeOf(s.Path) == type[*epic.Path] ==> - s.Path.(*epic.Path).GetBase(ubPath).StronglyValid()) + s.Path.(*epic.Path).GetBase(ubPath).Valid()) } // TODO: simplify the body of the predicate when let expressions @@ -470,7 +470,7 @@ pure func ValidPktMetaHdr(raw []byte) bool { let segs := io.CombineSegLens(seg1, seg2, seg3) in let base := scion.Base{metaHdr, segs.NumInfoFields(), segs.TotalHops()} in 0 < metaHdr.SegLen[0] && - base.FullyValid() && + base.Valid() && scion.PktLen(segs, start + scion.MetaLen) <= length } diff --git a/router/dataplane.go b/router/dataplane.go index db8b812b5..e06dfaa73 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -1916,6 +1916,9 @@ func (p *scionPacketProcessor) packSCMP( // @ requires p.path === p.scionLayer.GetPath(ub) // @ requires acc(&p.hopField) && acc(&p.infoField) // @ requires acc(sl.Bytes(ub, 0, len(ub)), R1) +// Preconditions for IO: +// @ requires p.scionLayer.EqAbsHeader(ub) +// @ requires p.scionLayer.ValidScionInitSpec(ub) // @ ensures acc(sl.Bytes(ub, 0, len(ub)), R1) // @ ensures acc(&p.d, R50) // @ ensures acc(p.scionLayer.Mem(ub), R6) @@ -1923,20 +1926,16 @@ func (p *scionPacketProcessor) packSCMP( // @ ensures p.path === p.scionLayer.GetPath(ub) // @ ensures acc(&p.hopField) && acc(&p.infoField) // @ ensures respr === processResult{} -// @ ensures reserr == nil ==> ( +// @ ensures reserr == nil ==> // @ let ubPath := p.scionLayer.UBPath(ub) in // @ unfolding acc(p.scionLayer.Mem(ub), R10) in -// @ p.path.GetCurrHF(ubPath) < p.path.GetNumHops(ubPath)) +// @ p.path.GetBase(ubPath).Valid() // @ ensures acc(p.scionLayer.Mem(ub), R6) // @ ensures p.d.validResult(respr, false) -// @ ensures reserr == nil ==> ( -// @ let ubPath := p.scionLayer.UBPath(ub) in -// @ unfolding acc(p.scionLayer.Mem(ub), R10) in -// @ p.path.GetCurrINF(ubPath) < p.path.GetNumINF(ubPath)) // @ ensures reserr != nil ==> reserr.ErrorMem() -// contracts for IO-spec -// @ requires p.scionLayer.EqAbsHeader(ub) && p.scionLayer.ValidScionInitSpec(ub) -// @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// Postconditions for IO: +// @ ensures reserr == nil ==> +// @ slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) // @ ensures reserr == nil ==> len(absPkt(ub).CurrSeg.Future) > 0 // @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(ub)) // @ ensures reserr == nil ==> p.EqAbsInfoField(absPkt(ub)) @@ -1971,9 +1970,24 @@ func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte @*/ ) (respr proce // TODO(lukedirtwalker) parameter problem invalid path? return processResult{}, err } - // (VerifiedSCION) This assumption will be dropped after clarifying - // https://github.com/scionproto/scion/issues/4531 - // @ TemporaryAssumeForIO(p.path.GetBase(ubPath).CurrInfMatchesCurrHF()) + // Segments without the Peering flag must consist of at least two HFs: + // https://github.com/scionproto/scion/issues/4524 + // (VerifiedSCION) The version verified here is prior to the support of peering + // links, so we do not check the Peering flag here. + hasSingletonSegment := + // @ unfolding acc(p.path.Mem(ubPath), _) in + // @ unfolding acc(p.path.Base.Mem(), _) in + p.path.PathMeta.SegLen[0] == 1 || + p.path.PathMeta.SegLen[1] == 1 || + p.path.PathMeta.SegLen[2] == 1 + if hasSingletonSegment { + // @ establishMemMalformedPath() + return processResult{}, malformedPath + } + if !p.path.CurrINFMatchesCurrHF( /*@ ubPath @*/ ) { + // @ establishMemMalformedPath() + return processResult{}, malformedPath + } // @ p.EstablishEqAbsHeader(ub, startP, endP) // @ p.path.EstablishValidPktMetaHdr(ubPath) // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP) @@ -2264,9 +2278,12 @@ func (p *scionPacketProcessor) validateTransitUnderlaySrc( /*@ ghost ub []byte @ // @ requires len(oldPkt.CurrSeg.Future) > 0 // @ requires p.EqAbsHopField(oldPkt) // @ requires p.EqAbsInfoField(oldPkt) -// @ requires p.segmentChange ==> oldPkt.RightSeg != none[io.IO_seg2] && len(get(oldPkt.RightSeg).Past) > 0 -// @ requires !p.segmentChange ==> AbsValidateIngressIDConstraint(oldPkt, path.ifsToIO_ifs(p.ingressID)) -// @ requires p.segmentChange ==> AbsValidateIngressIDConstraintXover(oldPkt, path.ifsToIO_ifs(p.ingressID)) +// @ requires p.segmentChange ==> +// @ oldPkt.RightSeg != none[io.IO_seg2] && len(get(oldPkt.RightSeg).Past) > 0 +// @ requires !p.segmentChange ==> +// @ AbsValidateIngressIDConstraint(oldPkt, path.ifsToIO_ifs(p.ingressID)) +// @ requires p.segmentChange ==> +// @ AbsValidateIngressIDConstraintXover(oldPkt, path.ifsToIO_ifs(p.ingressID)) // @ ensures reserr == nil ==> p.NoBouncingPkt(oldPkt) // @ ensures reserr == nil && !p.segmentChange ==> // @ AbsValidateEgressIDConstraint(oldPkt, (p.ingressID != 0), dp) @@ -2586,28 +2603,30 @@ func (p *scionPacketProcessor) resolveInbound( /*@ ghost ubScionL []byte @*/ ) ( } } -// @ requires acc(&p.path, R20) -// @ requires p.scionLayer.Mem(ub) -// @ requires p.path === p.scionLayer.GetPath(ub) -// @ requires sl.Bytes(ub, 0, len(ub)) -// @ requires acc(&p.infoField) -// @ requires acc(&p.hopField, R20) -// @ ensures acc(&p.infoField) -// @ ensures acc(&p.hopField, R20) -// @ ensures sl.Bytes(ub, 0, len(ub)) -// @ ensures acc(&p.path, R20) -// @ ensures reserr == nil ==> p.scionLayer.Mem(ub) -// @ ensures reserr != nil ==> p.scionLayer.NonInitMem() -// @ ensures reserr != nil ==> reserr.ErrorMem() -// contracts for IO-spec -// @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) -// @ requires len(absPkt(ub).CurrSeg.Future) > 0 -// @ requires p.EqAbsHopField(absPkt(ub)) -// @ requires p.EqAbsInfoField(absPkt(ub)) -// @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) -// @ ensures reserr == nil ==> len(absPkt(ub).CurrSeg.Future) >= 0 -// @ ensures reserr == nil ==> absPkt(ub) == AbsProcessEgress(old(absPkt(ub))) -// @ ensures reserr == nil ==> old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub) +// @ requires acc(&p.path, R20) +// @ requires p.scionLayer.Mem(ub) +// @ requires p.path === p.scionLayer.GetPath(ub) +// @ requires sl.Bytes(ub, 0, len(ub)) +// @ requires acc(&p.infoField) +// @ requires acc(&p.hopField, R20) +// @ requires !p.GetIsXoverSpec(ub) +// Preconditions for IO: +// @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ requires len(absPkt(ub).CurrSeg.Future) > 0 +// @ requires p.EqAbsHopField(absPkt(ub)) +// @ requires p.EqAbsInfoField(absPkt(ub)) +// @ ensures acc(&p.infoField) +// @ ensures acc(&p.hopField, R20) +// @ ensures sl.Bytes(ub, 0, len(ub)) +// @ ensures acc(&p.path, R20) +// @ ensures reserr == nil ==> p.scionLayer.Mem(ub) +// @ ensures reserr != nil ==> p.scionLayer.NonInitMem() +// @ ensures reserr != nil ==> reserr.ErrorMem() +// Postconditions for IO: +// @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ ensures reserr == nil ==> len(absPkt(ub).CurrSeg.Future) >= 0 +// @ ensures reserr == nil ==> absPkt(ub) == AbsProcessEgress(old(absPkt(ub))) +// @ ensures reserr == nil ==> old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub) // @ decreases func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr error) { // @ ghost ubPath := p.scionLayer.UBPath(ub) @@ -2646,9 +2665,6 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr return serrors.WrapStr("update info field", err) } } - // (VerifiedSCION) This assumption will be dropped after clarifying - // https://github.com/scionproto/scion/issues/4524. - //@ TemporaryAssumeForIO(!p.path.GetBase(ubPath).IsXoverSpec()) if err := p.path.IncPath( /*@ ubPath @*/ ); err != nil { // @ sl.Unslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) // @ sl.CombineAtIndex_Bytes(ub, 0, startP, slayers.CmnHdrLen, R54) @@ -2674,37 +2690,50 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr return nil } -// @ requires acc(&p.path, R20) -// @ requires p.scionLayer.Mem(ub) -// @ requires p.path == p.scionLayer.GetPath(ub) -// @ requires sl.Bytes(ub, 0, len(ub)) -// @ preserves acc(&p.segmentChange) -// @ preserves acc(&p.hopField) -// @ preserves acc(&p.infoField) -// @ ensures sl.Bytes(ub, 0, len(ub)) -// @ ensures acc(&p.path, R20) -// @ ensures reserr == nil ==> p.scionLayer.Mem(ub) -// @ ensures reserr == nil ==> p.scionLayer.UBPath(ub) === old(p.scionLayer.UBPath(ub)) -// @ ensures reserr == nil ==> p.scionLayer.GetPath(ub) == old(p.scionLayer.GetPath(ub)) -// @ ensures reserr != nil ==> p.scionLayer.NonInitMem() -// @ ensures p.segmentChange -// @ ensures respr === processResult{} -// @ ensures reserr != nil ==> reserr.ErrorMem() -// contract for IO-spec -// @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) -// @ requires p.GetIsXoverSpec(ub) -// @ ensures reserr == nil ==> len(old(absPkt(ub)).CurrSeg.Future) == 1 -// @ ensures reserr == nil ==> old(absPkt(ub)).LeftSeg != none[io.IO_seg2] -// @ ensures reserr == nil ==> len(get(old(absPkt(ub)).LeftSeg).Future) > 0 -// @ ensures reserr == nil ==> len(get(old(absPkt(ub)).LeftSeg).History) == 0 -// @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) -// @ ensures reserr == nil ==> len(absPkt(ub).CurrSeg.Future) > 0 -// @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(ub)) -// @ ensures reserr == nil ==> p.EqAbsInfoField(absPkt(ub)) -// @ ensures reserr == nil ==> absPkt(ub) == AbsDoXover(old(absPkt(ub))) -// @ ensures reserr == nil ==> old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub) +// @ requires acc(&p.path, R20) +// @ requires p.scionLayer.Mem(ub) +// @ requires p.path == p.scionLayer.GetPath(ub) +// @ requires sl.Bytes(ub, 0, len(ub)) +// @ requires acc(&p.segmentChange) +// @ requires acc(&p.hopField) +// @ requires acc(&p.infoField) +// Preconditions for IO: +// @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ requires p.GetIsXoverSpec(ub) +// @ requires let ubPath := p.scionLayer.UBPath(ub) in +// @ (unfolding acc(p.scionLayer.Mem(ub), _) in p.path.GetBase(ubPath)) == currBase +// @ requires currBase.Valid() +// @ ensures acc(&p.segmentChange) +// @ ensures acc(&p.hopField) +// @ ensures acc(&p.infoField) +// @ ensures sl.Bytes(ub, 0, len(ub)) +// @ ensures acc(&p.path, R20) +// @ ensures reserr == nil ==> p.scionLayer.Mem(ub) +// @ ensures reserr == nil ==> p.scionLayer.UBPath(ub) === old(p.scionLayer.UBPath(ub)) +// @ ensures reserr == nil ==> p.scionLayer.GetPath(ub) == old(p.scionLayer.GetPath(ub)) +// @ ensures reserr != nil ==> p.scionLayer.NonInitMem() +// @ ensures p.segmentChange +// @ ensures respr === processResult{} +// @ ensures reserr != nil ==> reserr.ErrorMem() +// Postconditions for IO: +// @ ensures reserr == nil ==> len(old(absPkt(ub)).CurrSeg.Future) == 1 +// @ ensures reserr == nil ==> old(absPkt(ub)).LeftSeg != none[io.IO_seg2] +// @ ensures reserr == nil ==> len(get(old(absPkt(ub)).LeftSeg).Future) > 0 +// @ ensures reserr == nil ==> len(get(old(absPkt(ub)).LeftSeg).History) == 0 +// @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ ensures reserr == nil ==> len(absPkt(ub).CurrSeg.Future) > 0 +// @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(ub)) +// @ ensures reserr == nil ==> p.EqAbsInfoField(absPkt(ub)) +// @ ensures reserr == nil ==> absPkt(ub) == AbsDoXover(old(absPkt(ub))) +// @ ensures reserr == nil ==> old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub) +// @ ensures reserr == nil ==> +// @ let ubPath := p.scionLayer.UBPath(ub) in +// @ (unfolding acc(p.scionLayer.Mem(ub), _) in +// @ p.path === p.scionLayer.GetPath(ub) && +// @ p.path.GetBase(ubPath) == currBase.IncPathSpec() && +// @ currBase.IncPathSpec().Valid()) // @ decreases -func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte @*/ ) (respr processResult, reserr error) { +func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte, ghost currBase scion.Base @*/ ) (respr processResult, reserr error) { p.segmentChange = true // @ ghost startP := p.scionLayer.PathStartIdx(ub) // @ ghost endP := p.scionLayer.PathEndIdx(ub) @@ -3333,8 +3362,9 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, // @ ghost ubPath := p.scionLayer.UBPath(ub) if p.path.IsXover( /*@ ubPath @*/ ) { // @ assert p.GetIsXoverSpec(ub) + // @ ghost currBase := p.path.GetBase(ubPath) // @ fold acc(p.scionLayer.Mem(ub), R3) - if r, err := p.doXover( /*@ ub @*/ ); err != nil { + if r, err := p.doXover( /*@ ub, currBase @*/ ); err != nil { // @ fold p.d.validResult(processResult{}, false) return r, err /*@, false, absReturnErr(r) @*/ } @@ -3353,6 +3383,8 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, // @ assert AbsVerifyCurrentMACConstraint(nextPkt, dp) // @ unfold acc(p.scionLayer.Mem(ub), R3) } + // @ assert p.path.GetBase(ubPath).Valid() + // @ p.path.GetBase(ubPath).NotIsXoverAfterIncPath() // @ fold acc(p.scionLayer.Mem(ub), R3) // @ assert p.segmentChange ==> nextPkt.RightSeg != none[io.IO_seg2] if r, err := p.validateEgressID( /*@ nextPkt, dp @*/ ); err != nil { @@ -3390,10 +3422,8 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, // @ nextPkt = absPkt(ub) // @ ghost if(slayers.IsSupportedPkt(ub)) { // @ ghost if(!p.segmentChange) { - // enter/exit event // @ ExternalEnterOrExitEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, path.ifsToIO_ifs(egressID), ioLock, ioSharedArg, dp) // @ } else { - // xover event // @ XoverEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, path.ifsToIO_ifs(egressID), ioLock, ioSharedArg, dp) // @ } // @ } @@ -3982,7 +4012,7 @@ func (p *scionPacketProcessor) prepareSCMP( _, external := p.d.external[p.ingressID] if external { // @ requires revPath.Mem(rawPath) - // @ requires revPath.GetBase(rawPath).StronglyValid() + // @ requires revPath.GetBase(rawPath).Valid() // @ ensures revPath.Mem(rawPath) // @ decreases // @ outline( diff --git a/router/dataplane_test.go b/router/dataplane_test.go index 4ae857cb9..1623157d2 100644 --- a/router/dataplane_test.go +++ b/router/dataplane_test.go @@ -749,8 +749,9 @@ func TestProcessPkt(t *testing.T) { dpath := &scion.Decoded{ Base: scion.Base{ PathMeta: scion.MetaHdr{ - CurrHF: 2, - SegLen: [3]uint8{2, 2, 0}, + CurrINF: 0, + CurrHF: 1, + SegLen: [3]uint8{2, 2, 0}, }, NumINF: 2, NumHops: 4, @@ -762,20 +763,23 @@ func TestProcessPkt(t *testing.T) { {SegID: 0x222, ConsDir: false, Timestamp: util.TimeToSecs(now)}, }, HopFields: []path.HopField{ - {ConsIngress: 0, ConsEgress: 1}, // IA 110 {ConsIngress: 31, ConsEgress: 0}, // Src - {ConsIngress: 0, ConsEgress: 51}, // Dst + {ConsIngress: 0, ConsEgress: 51}, // IA 110 {ConsIngress: 3, ConsEgress: 0}, // IA 110 + {ConsIngress: 0, ConsEgress: 1}, // Dst }, } - dpath.HopFields[2].Mac = computeMAC(t, key, dpath.InfoFields[0], dpath.HopFields[2]) - dpath.HopFields[3].Mac = computeMAC(t, key, dpath.InfoFields[1], dpath.HopFields[3]) + dpath.HopFields[1].Mac = computeMAC(t, key, dpath.InfoFields[0], dpath.HopFields[1]) + dpath.HopFields[2].Mac = computeMAC(t, key, dpath.InfoFields[1], dpath.HopFields[2]) if !afterProcessing { - dpath.InfoFields[0].UpdateSegID(dpath.HopFields[2].Mac) + dpath.InfoFields[0].UpdateSegID(dpath.HopFields[1].Mac) return toMsg(t, spkt, dpath) } - require.NoError(t, dpath.IncPath()) + + dpath.PathMeta.CurrHF++ + dpath.PathMeta.CurrINF++ + ret := toMsg(t, spkt, dpath) ret.Addr = &net.UDPAddr{IP: net.ParseIP("10.0.200.200").To4(), Port: 30043} ret.Flags, ret.NN, ret.N, ret.OOB = 0, 0, 0, nil From 2d69d42fdb7728cd3731dfbbd7264c3dc5f0ebf4 Mon Sep 17 00:00:00 2001 From: Markus Limbeck <92801626+mlimbeck@users.noreply.github.com> Date: Mon, 15 Jul 2024 14:48:13 +0200 Subject: [PATCH 51/57] Drop SetHopfield related assumptions (#368) * proof of sethopfield and io-assumptions * fix syntax errors * fix termination measure * fix verification errors * simplifications and comments * fix syntax error * feedback * fix verification error * renaming * space between arithmetic operands * increase timeout of path/scion * fix verification error * test: parallelizeBranches for dependencies * test: increase timeout for scion package to 20 min * test: increase timeout for scion package to 1h * use parallelizeBranches only for scion package * stronger precondition for setHopfield * Revert "stronger precondition for setHopfield" * test: scion pkg without parallelizeBranches * Revert "test: scion pkg without parallelizeBranches" * fix merging mistake * assume postconditions in setHopfield * add comment to IO assumptions * increase timeout for scion package * revert timeout increase * feedback --- .github/workflows/gobra.yml | 3 +- pkg/slayers/path/hopfield.go | 28 +-- .../path/scion/info_hop_setter_lemmas.gobra | 193 ++++++++++++++++-- pkg/slayers/path/scion/raw.go | 42 +++- pkg/slayers/path/scion/raw_spec.gobra | 53 +++-- router/dataplane.go | 67 +++--- router/io-spec-abstract-transitions.gobra | 27 +-- router/io-spec-lemmas.gobra | 16 +- verification/io/io_spec_definitions.gobra | 145 +++++++------ 9 files changed, 389 insertions(+), 185 deletions(-) diff --git a/.github/workflows/gobra.yml b/.github/workflows/gobra.yml index 6c1467bad..d88022b09 100644 --- a/.github/workflows/gobra.yml +++ b/.github/workflows/gobra.yml @@ -431,5 +431,4 @@ jobs: useZ3API: ${{ env.useZ3API }} disableNL: '0' viperBackend: ${{ env.viperBackend }} - unsafeWildcardOptimization: '0' - + unsafeWildcardOptimization: '0' \ No newline at end of file diff --git a/pkg/slayers/path/hopfield.go b/pkg/slayers/path/hopfield.go index c26b6d5e1..57452ac26 100644 --- a/pkg/slayers/path/hopfield.go +++ b/pkg/slayers/path/hopfield.go @@ -79,8 +79,8 @@ type HopField struct { // @ preserves acc(sl.Bytes(raw, 0, HopLen), R45) // @ ensures h.Mem() // @ ensures err == nil -// @ ensures unfolding h.Mem() in -// @ BytesToIO_HF(raw, 0, 0, HopLen) == h.ToIO_HF() +// @ ensures BytesToIO_HF(raw, 0, 0, HopLen) == +// @ unfolding acc(h.Mem(), R10) in h.ToIO_HF() // @ decreases func (h *HopField) DecodeFromBytes(raw []byte) (err error) { if len(raw) < HopLen { @@ -114,16 +114,13 @@ func (h *HopField) DecodeFromBytes(raw []byte) (err error) { // @ preserves acc(h.Mem(), R10) // @ preserves sl.Bytes(b, 0, HopLen) // @ ensures err == nil +// @ ensures BytesToIO_HF(b, 0, 0, HopLen) == +// @ unfolding acc(h.Mem(), R10) in h.ToIO_HF() // @ decreases func (h *HopField) SerializeTo(b []byte) (err error) { if len(b) < HopLen { return serrors.New("buffer for HopField too short", "expected", MacLen, "actual", len(b)) } - //@ requires len(b) >= HopLen - //@ preserves acc(h.Mem(), R11) - //@ preserves sl.Bytes(b, 0, HopLen) - //@ decreases - //@ outline( //@ unfold sl.Bytes(b, 0, HopLen) //@ unfold acc(h.Mem(), R11) b[0] = 0 @@ -139,24 +136,17 @@ func (h *HopField) SerializeTo(b []byte) (err error) { //@ assert &b[4:6][0] == &b[4] && &b[4:6][1] == &b[5] binary.BigEndian.PutUint16(b[4:6], h.ConsEgress) //@ assert forall i int :: { &b[i] } 0 <= i && i < HopLen ==> acc(&b[i]) - //@ fold sl.Bytes(b, 0, HopLen) - //@ fold acc(h.Mem(), R11) - //@ ) - //@ requires len(b) >= HopLen - //@ preserves acc(h.Mem(), R11) - //@ preserves sl.Bytes(b, 0, HopLen) - //@ decreases - //@ outline( - //@ unfold sl.Bytes(b, 0, HopLen) - //@ unfold acc(h.Mem(), R11) //@ assert forall i int :: { &h.Mac[:][i] } 0 <= i && i < len(h.Mac) ==> //@ &h.Mac[i] == &h.Mac[:][i] //@ assert forall i int :: { &b[6:6+MacLen][i] }{ &b[i+6] } 0 <= i && i < MacLen ==> //@ &b[6:6+MacLen][i] == &b[i+6] - copy(b[6:6+MacLen], h.Mac[:] /*@, R11 @*/) + copy(b[6:6+MacLen], h.Mac[:] /*@, R47 @*/) + //@ assert forall i int :: {&h.Mac[:][i]} 0 <= i && i < MacLen ==> h.Mac[:][i] == b[6:6+MacLen][i] + //@ assert forall i int :: {&h.Mac[i]} 0 <= i && i < MacLen ==> h.Mac[:][i] == h.Mac[i] + //@ EqualBytesImplyEqualMac(b[6:6+MacLen], h.Mac) //@ fold sl.Bytes(b, 0, HopLen) + //@ assert h.ToIO_HF() == BytesToIO_HF(b, 0, 0, HopLen) //@ fold acc(h.Mem(), R11) - //@ ) return nil } diff --git a/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra b/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra index 09e421953..04aa00308 100644 --- a/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra +++ b/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra @@ -45,7 +45,7 @@ pure func InfofieldByteSlice(raw []byte, currInfIdx int) ([]byte) { return let infOffset := currInfIdx == 4 ? path.InfoFieldOffset(0, MetaLen) : path.InfoFieldOffset(currInfIdx, MetaLen) in - raw[infOffset:infOffset+path.InfoLen] + raw[infOffset:infOffset + path.InfoLen] } // HopfieldsStartIdx returns index of the first byte of the hopfields of a segment @@ -61,8 +61,8 @@ pure func HopfieldsStartIdx(currInfIdx int, segs io.SegLens) int { return let numInf := segs.NumInfoFields() in let infOffset := path.InfoFieldOffset(numInf, MetaLen) in (currInfIdx == 0 || currInfIdx == 4) ? infOffset : - currInfIdx == 1 ? infOffset+segs.Seg1Len*path.HopLen : - infOffset+(segs.Seg1Len+segs.Seg2Len)*path.HopLen + currInfIdx == 1 ? infOffset + segs.Seg1Len * path.HopLen : + infOffset + (segs.Seg1Len + segs.Seg2Len) * path.HopLen } // HopfieldsStartIdx returns index of the last byte of the hopfields of a segment @@ -77,9 +77,9 @@ decreases pure func HopfieldsEndIdx(currInfIdx int, segs io.SegLens) int { return let numInf := segs.NumInfoFields() in let infOffset := path.InfoFieldOffset(numInf, MetaLen) in - (currInfIdx == 0 || currInfIdx == 4) ? infOffset+segs.Seg1Len*path.HopLen : - currInfIdx == 1 ? infOffset+(segs.Seg1Len+segs.Seg2Len)*path.HopLen : - infOffset+(segs.Seg1Len+segs.Seg2Len+segs.Seg3Len)*path.HopLen + (currInfIdx == 0 || currInfIdx == 4) ? infOffset + segs.Seg1Len * path.HopLen : + currInfIdx == 1 ? infOffset + (segs.Seg1Len + segs.Seg2Len) * path.HopLen : + infOffset + (segs.Seg1Len + segs.Seg2Len + segs.Seg3Len) * path.HopLen } // HopfieldsStartIdx returns returns the byte slice of the hopfields of a segment @@ -107,9 +107,9 @@ requires segs.Valid() requires PktLen(segs, MetaLen) <= len(raw) requires acc(sl.Bytes(raw, 0, len(raw)), p) ensures acc(sl.Bytes(raw[:HopfieldsStartIdx(0, segs)], 0, HopfieldsStartIdx(0, segs)), p) -ensures acc(sl.Bytes(HopfieldsByteSlice(raw, 0, segs), 0, segs.Seg1Len*path.HopLen), p) -ensures acc(sl.Bytes(HopfieldsByteSlice(raw, 1, segs), 0, segs.Seg2Len*path.HopLen), p) -ensures acc(sl.Bytes(HopfieldsByteSlice(raw, 2, segs), 0, segs.Seg3Len*path.HopLen), p) +ensures acc(sl.Bytes(HopfieldsByteSlice(raw, 0, segs), 0, segs.Seg1Len * path.HopLen), p) +ensures acc(sl.Bytes(HopfieldsByteSlice(raw, 1, segs), 0, segs.Seg2Len * path.HopLen), p) +ensures acc(sl.Bytes(HopfieldsByteSlice(raw, 2, segs), 0, segs.Seg3Len * path.HopLen), p) ensures acc(sl.Bytes(raw[HopfieldsEndIdx(2, segs):], 0, len(raw[HopfieldsEndIdx(2, segs):])), p) decreases func SliceBytesIntoSegments(raw []byte, segs io.SegLens, p perm) { @@ -152,7 +152,7 @@ func CombineBytesFromSegments(raw []byte, segs io.SegLens, p perm) { ghost requires 0 < p requires segs.Valid() -requires PktLen(segs, MetaLen) <= len(raw) +requires MetaLen + numInf * path.InfoLen <= len(raw) requires numInf == segs.NumInfoFields() requires acc(sl.Bytes(raw, 0, len(raw)), p) ensures acc(sl.Bytes(raw[:MetaLen], 0, MetaLen), p) @@ -184,7 +184,7 @@ func SliceBytesIntoInfoFields(raw []byte, numInf int, segs io.SegLens, p perm) { ghost requires 0 < p requires segs.Valid() -requires PktLen(segs, MetaLen) <= len(raw) +requires MetaLen + numInf * path.InfoLen <= len(raw) requires numInf == segs.NumInfoFields() requires acc(sl.Bytes(raw[:MetaLen], 0, MetaLen), p) requires acc(sl.Bytes(InfofieldByteSlice(raw, 0), 0, path.InfoLen), p) @@ -220,7 +220,7 @@ ghost opaque requires 0 < SegLen requires 0 <= currHfIdx && currHfIdx <= SegLen -requires SegLen*path.HopLen == len(hopfields) +requires SegLen * path.HopLen == len(hopfields) requires acc(sl.Bytes(hopfields, 0, len(hopfields)), R56) decreases pure func CurrSegWithInfo(hopfields []byte, currHfIdx int, SegLen int, inf io.AbsInfoField) io.IO_seg3 { @@ -240,7 +240,7 @@ requires (currInfIdx == 1 && segs.Seg2Len > 0) || let start := HopfieldsStartIdx(currInfIdx, segs) in let end := HopfieldsEndIdx(currInfIdx, segs) in inf != none[io.AbsInfoField] && - len(hopfields) == end-start && + len(hopfields) == end - start && acc(sl.Bytes(hopfields, 0, len(hopfields)), R49) decreases pure func LeftSegWithInfo( @@ -267,7 +267,7 @@ requires (currInfIdx == 0 && segs.Seg2Len > 0) || let start := HopfieldsStartIdx(currInfIdx, segs) in let end := HopfieldsEndIdx(currInfIdx, segs) in inf != none[io.AbsInfoField] && - len(hopfields) == end-start && + len(hopfields) == end - start && acc(sl.Bytes(hopfields, 0, len(hopfields)), R49) decreases pure func RightSegWithInfo( @@ -294,7 +294,7 @@ requires (segs.Seg2Len > 0 && segs.Seg3Len > 0 && let start := HopfieldsStartIdx(currInfIdx, segs) in let end := HopfieldsEndIdx(currInfIdx, segs) in inf != none[io.AbsInfoField] && - len(hopfields) == end-start && + len(hopfields) == end - start && acc(sl.Bytes(hopfields, 0, len(hopfields)), R49) decreases pure func MidSegWithInfo( @@ -318,10 +318,10 @@ requires offset + path.HopLen * SegLen <= len(raw) requires 0 <= currHfIdx && currHfIdx <= SegLen requires 0 <= currInfIdx && currInfIdx < 3 preserves acc(sl.Bytes(raw, 0, len(raw)), R50) -preserves acc(sl.Bytes(raw[offset:offset+SegLen*path.HopLen], 0, SegLen*path.HopLen), R50) +preserves acc(sl.Bytes(raw[offset:offset + SegLen * path.HopLen], 0, SegLen * path.HopLen), R50) preserves acc(sl.Bytes(InfofieldByteSlice(raw, currInfIdx), 0, path.InfoLen), R50) ensures let inf := path.BytesToAbsInfoField(InfofieldByteSlice(raw, currInfIdx), 0) in - CurrSegWithInfo(raw[offset:offset+SegLen*path.HopLen], currHfIdx, SegLen, inf) == + CurrSegWithInfo(raw[offset:offset + SegLen * path.HopLen], currHfIdx, SegLen, inf) == CurrSeg(raw, offset, currInfIdx, currHfIdx, SegLen, MetaLen) decreases func CurrSegEquality(raw []byte, offset int, currInfIdx int, currHfIdx int, SegLen int) { @@ -333,11 +333,11 @@ func CurrSegEquality(raw []byte, offset int, currInfIdx int, currHfIdx int, SegL assert reveal path.BytesToAbsInfoField(raw, infOffset) == reveal path.BytesToAbsInfoField(infoBytes, 0) reveal CurrSeg(raw, offset, currInfIdx, currHfIdx, SegLen, MetaLen) - reveal CurrSegWithInfo(raw[offset:offset+SegLen*path.HopLen], currHfIdx, SegLen, inf) + reveal CurrSegWithInfo(raw[offset:offset + SegLen * path.HopLen], currHfIdx, SegLen, inf) fold acc(sl.Bytes(raw, 0, len(raw)), R56) fold acc(sl.Bytes(infoBytes, 0, path.InfoLen), R56) widenSegment(raw, offset, currHfIdx, inf.AInfo, inf.UInfo, inf.ConsDir, - inf.Peer, SegLen, offset, offset+SegLen*path.HopLen) + inf.Peer, SegLen, offset, offset + SegLen * path.HopLen) } // UpdateCurrSegInfo proves that updating the infofield from inf1 to inf2 does not alter the hopfields @@ -345,7 +345,7 @@ func CurrSegEquality(raw []byte, offset int, currInfIdx int, currHfIdx int, SegL ghost requires 0 < SegLen requires 0 <= currHfIdx && currHfIdx <= SegLen -requires SegLen*path.HopLen == len(raw) +requires SegLen * path.HopLen == len(raw) preserves acc(sl.Bytes(raw, 0, len(raw)), R50) ensures CurrSegWithInfo(raw, currHfIdx, SegLen, inf1).UpdateCurrSeg(inf2) == CurrSegWithInfo(raw, currHfIdx, SegLen, inf2) @@ -543,4 +543,157 @@ func MidSegEquality(raw []byte, currInfIdx int, segs io.SegLens) { } else { reveal MidSegWithInfo(nil, currInfIdx, segs, none[io.AbsInfoField]) } +} + +// `BytesStoreCurrSeg(hopfields, currHfIdx, segLen, inf)` holds iff `hopfields` contains the +// serialization of the hopfields of the current segment, which has been traversed until the +// `currHfIdx`-th hop (out of `segLen` hops in total). +ghost +requires 0 <= currHfIdx && currHfIdx < segLen +requires segLen * path.HopLen == len(hopfields) +requires acc(sl.Bytes(hopfields, 0, len(hopfields)), R50) +requires let currHfStart := currHfIdx * path.HopLen in + let currHfEnd := currHfStart + path.HopLen in + acc(sl.Bytes(hopfields[:currHfStart], 0, currHfStart), R50) && + acc(sl.Bytes(hopfields[currHfStart:currHfEnd], 0, path.HopLen), R50) && + acc(sl.Bytes(hopfields[currHfEnd:], 0, (segLen - currHfIdx - 1) * path.HopLen), R50) +decreases +pure func BytesStoreCurrSeg(hopfields []byte, currHfIdx int, segLen int, inf io.AbsInfoField) bool { + return let currseg := CurrSegWithInfo(hopfields, currHfIdx, segLen, inf) in + let currHfStart := currHfIdx * path.HopLen in + let currHfEnd := currHfStart + path.HopLen in + len(currseg.Future) > 0 && + currseg.Future[0] == path.BytesToIO_HF(hopfields[currHfStart:currHfEnd], 0, 0, path.HopLen) && + currseg.Future[1:] == hopFields(hopfields[currHfEnd:], 0, 0, (segLen - currHfIdx - 1)) && + currseg.Past == segPast(hopFields(hopfields[:currHfStart], 0, 0, currHfIdx)) && + currseg.History == segHistory(hopFields(hopfields[:currHfStart], 0, 0, currHfIdx)) && + currseg.AInfo == inf.AInfo && + currseg.UInfo == inf.UInfo && + currseg.ConsDir == inf.ConsDir && + currseg.Peer == inf.Peer +} + +// `EstablishBytesStoreCurrSeg` shows that the raw bytes containing all hopfields +// can be split into three slices, one that exclusively contains all past hopfields, one +// that exclusively contains all future ones and another one for the current hopfield. +// This helps in proving that the future and past hopfields remain unchanged when the +// current hopfield is modified. +ghost +requires 0 <= currHfIdx && currHfIdx < segLen +requires segLen * path.HopLen == len(hopfields) +preserves acc(sl.Bytes(hopfields, 0, len(hopfields)), R49) +preserves let currHfStart := currHfIdx * path.HopLen in + let currHfEnd := currHfStart + path.HopLen in + acc(sl.Bytes(hopfields[:currHfStart], 0, currHfStart), R49) && + acc(sl.Bytes(hopfields[currHfStart:currHfEnd], 0, path.HopLen), R49) && + acc(sl.Bytes(hopfields[currHfEnd:], 0, (segLen - currHfIdx - 1) * path.HopLen), R49) +ensures BytesStoreCurrSeg(hopfields, currHfIdx, segLen, inf) +decreases +func EstablishBytesStoreCurrSeg(hopfields []byte, currHfIdx int, segLen int, inf io.AbsInfoField) { + currseg := reveal CurrSegWithInfo(hopfields, currHfIdx, segLen, inf) + currHfStart := currHfIdx * path.HopLen + currHfEnd := currHfStart + path.HopLen + unfold acc(sl.Bytes(hopfields, 0, len(hopfields)), R56) + unfold acc(sl.Bytes(hopfields[currHfStart:currHfEnd], 0, path.HopLen), R56) + hf := hopFields(hopfields, 0, 0, segLen) + hopFieldsBytePositionsLemma(hopfields, 0, 0, segLen, R54) + reveal hopFieldsBytePositions(hopfields, 0, 0, segLen, hf) + assert len(currseg.Future) > 0 + assert currseg.Future[0] == path.BytesToIO_HF(hopfields[currHfStart:currHfEnd], 0, 0, path.HopLen) + splitHopFieldsInPastAndFuture(hopfields, currHfIdx, segLen) + assert currseg.Past == segPast(hopFields(hopfields[:currHfStart], 0, 0, currHfIdx)) + assert currseg.Future[0] == hf[currHfIdx] + assert hf[currHfIdx:][1:] == hf[currHfIdx + 1:] + assert currseg.Future == hf[currHfIdx:] + assert currseg.Future[1:] == hopFields(hopfields[currHfEnd:], 0, 0, (segLen - currHfIdx- 1)) + assert currseg.History == segHistory(hopFields(hopfields[:currHfStart], 0, 0, currHfIdx)) + fold acc(sl.Bytes(hopfields[currHfStart:currHfEnd], 0, path.HopLen), R56) + fold acc(sl.Bytes(hopfields, 0, len(hopfields)), R56) +} + +// `splitHopFieldsInPastAndFuture` shows that the raw bytes containing all hopfields +// can be split into two slices, one that exclusively contains all past hopfields and another +// that exclusively contains all future ones. This helps in proving that the future and past +// hopfields remain unchanged when the current hopfield is modified. +ghost +requires 0 < segLen +requires 0 <= currHfIdx && currHfIdx < segLen +requires segLen * path.HopLen == len(hopfields) +preserves acc(sl.Bytes(hopfields, 0, len(hopfields)), R50) +preserves let currHfStart := currHfIdx * path.HopLen in + let currHfEnd := currHfStart + path.HopLen in + acc(sl.Bytes(hopfields[:currHfStart], 0, currHfStart), R50) && + acc(sl.Bytes(hopfields[currHfEnd:], 0, (segLen - currHfIdx - 1) * path.HopLen), R50) +ensures let currHfStart := currHfIdx * path.HopLen in + let currHfEnd := currHfStart + path.HopLen in + hopFields(hopfields, 0, 0, segLen)[:currHfIdx] == + hopFields(hopfields[:currHfStart], 0, 0, currHfIdx) && + hopFields(hopfields, 0, 0, segLen)[currHfIdx + 1:] == + hopFields(hopfields[currHfEnd:], 0, 0, segLen - currHfIdx - 1) +decreases +func splitHopFieldsInPastAndFuture(hopfields []byte, currHfIdx int, segLen int) { + currHfStart := currHfIdx * path.HopLen + currHfEnd := currHfStart + path.HopLen + hf := hopFields(hopfields, 0, 0, segLen) + hopFieldsBytePositionsLemma(hopfields, 0, 0, segLen, R54) + reveal hopFieldsBytePositions(hopfields, 0, 0, segLen, hf) + + hfPast := hopFields(hopfields, 0, 0, currHfIdx) + hopFieldsBytePositionsLemma(hopfields, 0, 0, currHfIdx, R54) + reveal hopFieldsBytePositions(hopfields, 0, 0, currHfIdx, hfPast) + widenHopFields(hopfields, 0, 0, currHfIdx, 0, currHfStart, R52) + + hfFuture := hopFields(hopfields, currHfEnd, 0, segLen - currHfIdx - 1) + hopFieldsBytePositionsLemma(hopfields, currHfEnd, 0, segLen - currHfIdx - 1, R54) + reveal hopFieldsBytePositions(hopfields, currHfEnd, 0, segLen - currHfIdx - 1, hfFuture) + widenHopFields(hopfields, currHfEnd, 0, segLen - currHfIdx - 1, + currHfEnd, segLen * path.HopLen, R52) +} + +// `SplitHopfields` splits the permission to the raw bytes of a segment into the permission +// to the subslice containing all past hopfields, to the sublice containing the current hopfield, +// and to another containing all future hopfields. +ghost +requires 0 < p +requires 0 <= currHfIdx && currHfIdx < segLen +requires segLen * path.HopLen == len(hopfields) +requires acc(sl.Bytes(hopfields, 0, len(hopfields)), p) +ensures let currHfStart := currHfIdx * path.HopLen in + let currHfEnd := currHfStart + path.HopLen in + acc(sl.Bytes(hopfields[:currHfStart], 0, currHfStart), p) && + acc(sl.Bytes(hopfields[currHfStart:currHfEnd], 0, path.HopLen), p) && + acc(sl.Bytes(hopfields[currHfEnd:], 0, (segLen - currHfIdx - 1) * path.HopLen), p) +decreases +func SplitHopfields(hopfields []byte, currHfIdx int, segLen int, p perm) { + currHfStart := currHfIdx * path.HopLen + currHfEnd := currHfStart + path.HopLen + sl.SplitByIndex_Bytes(hopfields, 0, len(hopfields), currHfStart, p) + sl.SplitByIndex_Bytes(hopfields, currHfStart, len(hopfields), currHfEnd, p) + sl.Reslice_Bytes(hopfields, 0, currHfStart, p) + sl.Reslice_Bytes(hopfields, currHfStart, currHfEnd, p) + sl.Reslice_Bytes(hopfields, currHfEnd, len(hopfields), p) +} + +// `CombineHopfields` combines the permissions to the slices of bytes storing the past hopfields, +// current hopfield, and future hopfields of a segment into a single permission to the slice +// containing all hopfields of that segment. +ghost +requires 0 < p +requires 0 <= currHfIdx && currHfIdx < segLen +requires segLen * path.HopLen == len(hopfields) +requires let currHfStart := currHfIdx * path.HopLen in + let currHfEnd := currHfStart + path.HopLen in + acc(sl.Bytes(hopfields[:currHfStart], 0, currHfStart), p) && + acc(sl.Bytes(hopfields[currHfStart:currHfEnd], 0, path.HopLen), p) && + acc(sl.Bytes(hopfields[currHfEnd:], 0, (segLen - currHfIdx - 1) * path.HopLen), p) +ensures acc(sl.Bytes(hopfields, 0, len(hopfields)), p) +decreases +func CombineHopfields(hopfields []byte, currHfIdx int, segLen int, p perm) { + currHfStart := currHfIdx * path.HopLen + currHfEnd := currHfStart + path.HopLen + sl.Unslice_Bytes(hopfields, currHfEnd, len(hopfields), p) + sl.Unslice_Bytes(hopfields, currHfStart, currHfEnd, p) + sl.Unslice_Bytes(hopfields, 0, currHfStart, p) + sl.CombineAtIndex_Bytes(hopfields, currHfStart, len(hopfields), currHfEnd, p) + sl.CombineAtIndex_Bytes(hopfields, 0, len(hopfields), currHfStart, p) } \ No newline at end of file diff --git a/pkg/slayers/path/scion/raw.go b/pkg/slayers/path/scion/raw.go index ca3595a5e..4ee0008c6 100644 --- a/pkg/slayers/path/scion/raw.go +++ b/pkg/slayers/path/scion/raw.go @@ -224,7 +224,7 @@ func (s *Raw) ToDecoded( /*@ ghost ubuf []byte @*/ ) (d *Decoded, err error) { // pres for IO: // @ requires s.GetBase(ubuf).EqAbsHeader(ubuf) // @ requires validPktMetaHdr(ubuf) -// @ requires len(s.absPkt(ubuf).CurrSeg.Future) > 0 +// @ requires s.absPkt(ubuf).PathNotFullyTraversed() // @ requires s.GetBase(ubuf).IsXoverSpec() ==> // @ s.absPkt(ubuf).LeftSeg != none[io.IO_seg3] // @ ensures sl.Bytes(ubuf, 0, len(ubuf)) @@ -529,16 +529,34 @@ func (s *Raw) GetCurrentHopField( /*@ ghost ubuf []byte @*/ ) (res path.HopField // SetHopField updates the HopField at a given index. // @ requires 0 <= idx -// @ preserves acc(s.Mem(ubuf), R20) -// @ preserves sl.Bytes(ubuf, 0, len(ubuf)) -// @ ensures r != nil ==> r.ErrorMem() +// @ requires acc(s.Mem(ubuf), R20) +// @ requires sl.Bytes(ubuf, 0, len(ubuf)) +// pres for IO: +// @ requires validPktMetaHdr(ubuf) +// @ requires s.GetBase(ubuf).EqAbsHeader(ubuf) +// @ requires s.absPkt(ubuf).PathNotFullyTraversed() +// @ ensures acc(s.Mem(ubuf), R20) +// @ ensures sl.Bytes(ubuf, 0, len(ubuf)) +// @ ensures r != nil ==> r.ErrorMem() +// posts for IO: +// @ ensures r == nil ==> +// @ validPktMetaHdr(ubuf) && +// @ s.GetBase(ubuf).EqAbsHeader(ubuf) +// @ ensures r == nil && idx == int(old(s.GetCurrHF(ubuf))) ==> +// @ let oldPkt := old(s.absPkt(ubuf)) in +// @ let newPkt := oldPkt.UpdateHopField(hop.ToIO_HF()) in +// @ s.absPkt(ubuf) == newPkt // @ decreases +// @ #backend[exhaleMode(1)] func (s *Raw) SetHopField(hop path.HopField, idx int /*@, ghost ubuf []byte @*/) (r error) { - //@ share hop + // (VerifiedSCION) Due to an incompleteness (https://github.com/viperproject/gobra/issues/770), + // we introduce a temporary variable to be able to call `path.AbsMacArrayCongruence()`. + tmpHopField /*@@@*/ := hop + //@ path.AbsMacArrayCongruence(hop.Mac, tmpHopField.Mac) // (VerifiedSCION) Cannot assert bounds of uint: // https://github.com/viperproject/gobra/issues/192 - //@ assume 0 <= hop.ConsIngress && 0 <= hop.ConsEgress - //@ fold hop.Mem() + //@ assume 0 <= tmpHopField.ConsIngress && 0 <= tmpHopField.ConsEgress + //@ fold acc(tmpHopField.Mem(), R9) //@ unfold acc(s.Mem(ubuf), R20) //@ unfold acc(s.Base.Mem(), R20) if idx >= s.NumHops { @@ -552,11 +570,19 @@ func (s *Raw) SetHopField(hop path.HopField, idx int /*@, ghost ubuf []byte @*/) //@ sl.SplitRange_Bytes(ubuf, 0, len(s.Raw), writePerm) //@ assert sl.Bytes(s.Raw, 0, len(s.Raw)) //@ sl.SplitRange_Bytes(s.Raw, hopOffset, hopOffset+path.HopLen, writePerm) - ret := hop.SerializeTo(s.Raw[hopOffset : hopOffset+path.HopLen]) + ret := tmpHopField.SerializeTo(s.Raw[hopOffset : hopOffset+path.HopLen]) //@ sl.CombineRange_Bytes(s.Raw, hopOffset, hopOffset+path.HopLen, writePerm) //@ sl.CombineRange_Bytes(ubuf, 0, len(s.Raw), writePerm) //@ fold acc(s.Base.Mem(), R20) //@ fold acc(s.Mem(ubuf), R20) + // (VerifiedSCION) The proof for these assumptions is provided in PR #361 + // (https://github.com/viperproject/VerifiedSCION/pull/361), which will + // be merged once the performance issues are resolved. + //@ TemporaryAssumeForIO(validPktMetaHdr(ubuf) && s.GetBase(ubuf).EqAbsHeader(ubuf)) + //@ TemporaryAssumeForIO(idx == int(old(s.GetCurrHF(ubuf))) ==> + //@ let oldPkt := old(s.absPkt(ubuf)) in + //@ let newPkt := oldPkt.UpdateHopField(hop.ToIO_HF()) in + //@ s.absPkt(ubuf) == newPkt) return ret } diff --git a/pkg/slayers/path/scion/raw_spec.gobra b/pkg/slayers/path/scion/raw_spec.gobra index 7b9066698..9bf4537db 100644 --- a/pkg/slayers/path/scion/raw_spec.gobra +++ b/pkg/slayers/path/scion/raw_spec.gobra @@ -233,31 +233,21 @@ pure func hopFields( } ghost -requires -1 <= currHfIdx && currHfIdx < len(hopfields) -ensures len(res) == currHfIdx + 1 -decreases currHfIdx + 1 -pure func segPast(hopfields seq[io.IO_HF], currHfIdx int) (res seq[io.IO_HF]) { - return currHfIdx == -1 ? - seq[io.IO_HF]{} : - seq[io.IO_HF]{hopfields[currHfIdx]} ++ segPast(hopfields, currHfIdx - 1) +ensures len(res) == len(hopfields) +decreases len(hopfields) +pure func segPast(hopfields seq[io.IO_HF]) (res seq[io.IO_HF]) { + return len(hopfields) == 0 ? seq[io.IO_HF]{} : + seq[io.IO_HF]{hopfields[len(hopfields)-1]} ++ segPast( + hopfields[:len(hopfields)-1]) } ghost -requires 0 <= currHfIdx && currHfIdx <= len(hopfields) -ensures len(res) == len(hopfields) - currHfIdx -decreases len(hopfields) - currHfIdx -pure func segFuture(hopfields seq[io.IO_HF], currHfIdx int) (res seq[io.IO_HF]) { - return currHfIdx == len(hopfields) ? seq[io.IO_HF]{} : - seq[io.IO_HF]{hopfields[currHfIdx]} ++ segFuture(hopfields, currHfIdx + 1) -} - -ghost -requires -1 <= currHfIdx && currHfIdx < len(hopfields) -ensures len(res) == currHfIdx + 1 -decreases currHfIdx + 1 -pure func segHistory(hopfields seq[io.IO_HF], currHfIdx int) (res seq[io.IO_ahi]) { - return currHfIdx == -1 ? seq[io.IO_ahi]{} : - seq[io.IO_ahi]{hopfields[currHfIdx].Toab()} ++ segHistory(hopfields, currHfIdx - 1) +ensures len(res) == len(hopfields) +decreases len(hopfields) +pure func segHistory(hopfields seq[io.IO_HF]) (res seq[io.IO_ahi]) { + return len(hopfields) == 0 ? seq[io.IO_ahi]{} : + seq[io.IO_ahi]{hopfields[len(hopfields)-1].Toab()} ++ segHistory( + hopfields[:len(hopfields)-1]) } ghost @@ -284,9 +274,9 @@ pure func segment(raw []byte, UInfo : uinfo, ConsDir : consDir, Peer : peer, - Past : segPast(hopfields, currHfIdx - 1), - Future : segFuture(hopfields, currHfIdx), - History : segHistory(hopfields, currHfIdx - 1), + Past : segPast(hopfields[:currHfIdx]), + Future : hopfields[currHfIdx:], + History : segHistory(hopfields[:currHfIdx]), } } @@ -408,7 +398,7 @@ requires acc(sl.Bytes(raw, 0, len(raw)), R56) decreases pure func RawBytesToMetaHdr(raw []byte) MetaHdr { return unfolding acc(sl.Bytes(raw, 0, len(raw)), R56) in - let hdr := binary.BigEndian.Uint32(raw[:MetaLen]) in + let hdr := binary.BigEndian.Uint32(raw[:MetaLen]) in DecodedFrom(hdr) } @@ -489,7 +479,7 @@ func (s *Raw) EstablishValidPktMetaHdr(ghost ub []byte) { ghost requires oldPkt.LeftSeg != none[io.IO_seg2] -requires len(oldPkt.CurrSeg.Future) > 0 +requires oldPkt.PathNotFullyTraversed() decreases pure func AbsXover(oldPkt io.IO_pkt2) (newPkt io.IO_pkt2) { return io.IO_Packet2 { @@ -501,7 +491,7 @@ pure func AbsXover(oldPkt io.IO_pkt2) (newPkt io.IO_pkt2) { } ghost -requires len(oldPkt.CurrSeg.Future) > 0 +requires oldPkt.PathNotFullyTraversed() decreases pure func AbsIncPath(oldPkt io.IO_pkt2) (newPkt io.IO_pkt2) { return io.IO_Packet2 { @@ -652,7 +642,7 @@ func (s *Raw) XoverLemma(ubuf []byte) { ghost opaque -requires len(pkt.CurrSeg.Future) > 0 +requires pkt.PathNotFullyTraversed() decreases pure func (s *Raw) EqAbsHopField(pkt io.IO_pkt2, hop io.IO_HF) bool { return let currHF := pkt.CurrSeg.Future[0] in @@ -675,7 +665,7 @@ preserves acc(s.Mem(ubuf), R53) preserves acc(sl.Bytes(ubuf, 0, len(ubuf)), R53) preserves validPktMetaHdr(ubuf) preserves s.GetBase(ubuf).EqAbsHeader(ubuf) -preserves len(s.absPkt(ubuf).CurrSeg.Future) > 0 +preserves s.absPkt(ubuf).PathNotFullyTraversed() preserves s.GetBase(ubuf).ValidCurrInfSpec() preserves s.GetBase(ubuf).ValidCurrHfSpec() preserves s.CorrectlyDecodedInf(ubuf, info) @@ -754,6 +744,9 @@ decreases func IncCurrSeg(raw []byte, offset int, currInfIdx int, currHfIdx int, segLen int) { currseg := reveal CurrSeg(raw, offset, currInfIdx, currHfIdx, segLen, 0) incseg := reveal CurrSeg(raw, offset, currInfIdx, currHfIdx+1, segLen, 0) + hf := hopFields(raw, offset, 0, segLen) + hfPast := hf[:currHfIdx+1] + assert hfPast[:len(hfPast)-1] == hf[:currHfIdx] assert currseg.AInfo == incseg.AInfo assert currseg.UInfo == incseg.UInfo assert currseg.ConsDir == incseg.ConsDir diff --git a/router/dataplane.go b/router/dataplane.go index e06dfaa73..7b81f812e 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -1936,7 +1936,7 @@ func (p *scionPacketProcessor) packSCMP( // Postconditions for IO: // @ ensures reserr == nil ==> // @ slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) -// @ ensures reserr == nil ==> len(absPkt(ub).CurrSeg.Future) > 0 +// @ ensures reserr == nil ==> absPkt(ub).PathNotFullyTraversed() // @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(ub)) // @ ensures reserr == nil ==> p.EqAbsInfoField(absPkt(ub)) // @ ensures old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub) @@ -2058,7 +2058,7 @@ func (p *scionPacketProcessor) validateHopExpiry() (respr processResult, reserr // @ ensures reserr == nil && !p.infoField.ConsDir ==> ( // @ p.ingressID == 0 || p.hopField.ConsEgress == p.ingressID) // contracts for IO-spec -// @ requires len(oldPkt.CurrSeg.Future) > 0 +// @ requires oldPkt.PathNotFullyTraversed() // @ requires p.EqAbsHopField(oldPkt) // @ requires p.EqAbsInfoField(oldPkt) // @ ensures reserr == nil ==> @@ -2275,7 +2275,7 @@ func (p *scionPacketProcessor) validateTransitUnderlaySrc( /*@ ghost ub []byte @ // @ requires dp.Valid() // @ requires p.d.WellConfigured() // @ requires p.d.DpAgreesWithSpec(dp) -// @ requires len(oldPkt.CurrSeg.Future) > 0 +// @ requires oldPkt.PathNotFullyTraversed() // @ requires p.EqAbsHopField(oldPkt) // @ requires p.EqAbsInfoField(oldPkt) // @ requires p.segmentChange ==> @@ -2391,7 +2391,7 @@ func (p *scionPacketProcessor) validateEgressID( /*@ ghost oldPkt io.IO_pkt2, gh // @ requires acc(&p.ingressID, R21) // preconditions for IO: // @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) -// @ requires len(absPkt(ub).CurrSeg.Future) > 0 +// @ requires absPkt(ub).PathNotFullyTraversed() // @ requires acc(&p.d, R55) && acc(p.d.Mem(), _) && acc(&p.ingressID, R55) // @ requires p.LastHopLen(ub) // @ requires p.EqAbsHopField(absPkt(ub)) @@ -2406,7 +2406,7 @@ func (p *scionPacketProcessor) validateEgressID( /*@ ghost oldPkt io.IO_pkt2, gh // posconditions for IO: // @ ensures acc(&p.d, R55) && acc(p.d.Mem(), _) && acc(&p.ingressID, R55) // @ ensures err == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) -// @ ensures err == nil ==> len(absPkt(ub).CurrSeg.Future) > 0 +// @ ensures err == nil ==> absPkt(ub).PathNotFullyTraversed() // @ ensures err == nil ==> // @ absPkt(ub) == AbsUpdateNonConsDirIngressSegID(old(absPkt(ub)), path.ifsToIO_ifs(p.ingressID)) // @ ensures err == nil ==> p.LastHopLen(ub) @@ -2520,7 +2520,7 @@ func (p *scionPacketProcessor) currentHopPointer( /*@ ghost ubScionL []byte @*/ // @ ensures sl.Bytes(p.cachedMac, 0, len(p.cachedMac)) // @ ensures reserr != nil ==> reserr.ErrorMem() // contracts for IO-spec -// @ requires len(oldPkt.CurrSeg.Future) > 0 +// @ requires oldPkt.PathNotFullyTraversed() // @ requires p.EqAbsHopField(oldPkt) // @ requires p.EqAbsInfoField(oldPkt) // @ ensures reserr == nil ==> AbsVerifyCurrentMACConstraint(oldPkt, dp) @@ -2612,7 +2612,7 @@ func (p *scionPacketProcessor) resolveInbound( /*@ ghost ubScionL []byte @*/ ) ( // @ requires !p.GetIsXoverSpec(ub) // Preconditions for IO: // @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) -// @ requires len(absPkt(ub).CurrSeg.Future) > 0 +// @ requires absPkt(ub).PathNotFullyTraversed() // @ requires p.EqAbsHopField(absPkt(ub)) // @ requires p.EqAbsInfoField(absPkt(ub)) // @ ensures acc(&p.infoField) @@ -2721,7 +2721,7 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr // @ ensures reserr == nil ==> len(get(old(absPkt(ub)).LeftSeg).Future) > 0 // @ ensures reserr == nil ==> len(get(old(absPkt(ub)).LeftSeg).History) == 0 // @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) -// @ ensures reserr == nil ==> len(absPkt(ub).CurrSeg.Future) > 0 +// @ ensures reserr == nil ==> absPkt(ub).PathNotFullyTraversed() // @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(ub)) // @ ensures reserr == nil ==> p.EqAbsInfoField(absPkt(ub)) // @ ensures reserr == nil ==> absPkt(ub) == AbsDoXover(old(absPkt(ub))) @@ -2776,13 +2776,21 @@ func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte, ghost currBase scio // @ assert slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) // @ assert absPkt(ub) == reveal AbsDoXover(old(absPkt(ub))) var err error - if p.hopField, err = p.path.GetCurrentHopField( /*@ ubPath @*/ ); err != nil { + // (VerifiedSCION) Due to an incompleteness (https://github.com/viperproject/gobra/issues/770), + // we introduce a temporary variable to be able to call `path.AbsMacArrayCongruence()`. + var tmpHopField path.HopField + if tmpHopField, err = p.path.GetCurrentHopField( /*@ ubPath @*/ ); err != nil { // @ ghost sl.CombineRange_Bytes(ub, startP, endP, writePerm) // @ fold acc(p.scionLayer.Mem(ub), 1-R55) // @ p.scionLayer.DowngradePerm(ub) // TODO parameter problem invalid path return processResult{}, err } + p.hopField = tmpHopField + // @ path.AbsMacArrayCongruence(p.hopField.Mac, tmpHopField.Mac) + // @ assert p.hopField.ToIO_HF() == tmpHopField.ToIO_HF() + // @ assert reveal p.path.CorrectlyDecodedHf(ubPath, tmpHopField) + // @ assert reveal p.path.CorrectlyDecodedHf(ubPath, p.hopField) if p.infoField, err = p.path.GetCurrentInfoField( /*@ ubPath @*/ ); err != nil { // @ ghost sl.CombineRange_Bytes(ub, startP, endP, writePerm) // @ fold acc(p.scionLayer.Mem(ub), 1-R55) @@ -2791,8 +2799,13 @@ func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte, ghost currBase scio return processResult{}, err } // @ ghost sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) - // @ TemporaryAssumeForIO(p.EqAbsHopField(absPkt(ub))) - // @ TemporaryAssumeForIO(p.EqAbsInfoField(absPkt(ub))) + // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP) + // @ absPktFutureLemma(ub) + // @ p.path.DecodingLemma(ubPath, p.infoField, p.hopField) + // @ assert reveal p.path.EqAbsInfoField(p.path.absPkt(ubPath), p.infoField.ToAbsInfoField()) + // @ assert reveal p.path.EqAbsHopField(p.path.absPkt(ubPath), p.hopField.ToIO_HF()) + // @ assert reveal p.EqAbsHopField(absPkt(ub)) + // @ assert reveal p.EqAbsInfoField(absPkt(ub)) // @ fold acc(p.scionLayer.Mem(ub), 1-R55) return processResult{}, nil } @@ -2832,7 +2845,7 @@ func (p *scionPacketProcessor) ingressInterface( /*@ ghost ubPath []byte @*/ ) u // @ ensures acc(&p.infoField, R21) // @ ensures acc(&p.hopField, R21) // contracts for IO-spec -// @ requires len(oldPkt.CurrSeg.Future) > 0 +// @ requires oldPkt.PathNotFullyTraversed() // @ requires p.EqAbsInfoField(oldPkt) // @ requires p.EqAbsHopField(oldPkt) // @ ensures p.EqAbsInfoField(oldPkt) @@ -2862,7 +2875,7 @@ func (p *scionPacketProcessor) egressInterface( /*@ ghost oldPkt io.IO_pkt2 @*/ // @ reserr != nil && sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() // contracts for IO-spec -// @ requires len(oldPkt.CurrSeg.Future) > 0 +// @ requires oldPkt.PathNotFullyTraversed() // @ requires p.EqAbsInfoField(oldPkt) // @ requires p.EqAbsHopField(oldPkt) // @ ensures reserr != nil && respr.OutPkt != nil ==> @@ -2927,12 +2940,12 @@ func (p *scionPacketProcessor) validateEgressUp( /*@ ghost oldPkt io.IO_pkt2 @*/ // @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) // @ requires p.DstIsLocalIngressID(ub) // @ requires p.LastHopLen(ub) -// @ requires len(absPkt(ub).CurrSeg.Future) > 0 +// @ requires absPkt(ub).PathNotFullyTraversed() // @ requires p.EqAbsHopField(absPkt(ub)) // @ ensures reserr == nil ==> p.DstIsLocalIngressID(ub) // @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) // @ ensures reserr == nil ==> p.LastHopLen(ub) -// @ ensures reserr == nil ==> len(absPkt(ub).CurrSeg.Future) > 0 +// @ ensures reserr == nil ==> absPkt(ub).PathNotFullyTraversed() // @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(ub)) // @ ensures reserr == nil ==> absPkt(ub) == old(absPkt(ub)) // @ ensures reserr == nil ==> old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub) @@ -2940,6 +2953,9 @@ func (p *scionPacketProcessor) validateEgressUp( /*@ ghost oldPkt io.IO_pkt2 @*/ // @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases func (p *scionPacketProcessor) handleIngressRouterAlert( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int @*/ ) (respr processResult, reserr error) { + // @ reveal p.EqAbsHopField(absPkt(ub)) + // @ assert let fut := absPkt(ub).CurrSeg.Future in + // @ fut == seq[io.IO_HF]{p.hopField.ToIO_HF()} ++ fut[1:] // @ ghost ubPath := p.scionLayer.UBPath(ub) // @ ghost startP := p.scionLayer.PathStartIdx(ub) // @ ghost endP := p.scionLayer.PathEndIdx(ub) @@ -2974,20 +2990,16 @@ func (p *scionPacketProcessor) handleIngressRouterAlert( /*@ ghost ub []byte, gh return processResult{}, serrors.WrapStr("update hop field", err) } // @ sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) - // @ assert p.DstIsLocalIngressID(ub) - // @ TemporaryAssumeForIO(scion.validPktMetaHdr(ubPath) && p.path.GetBase(ubPath).EqAbsHeader(ubPath)) // postcondition of SetHopfield // @ slayers.IsSupportedPktSubslice(ub, slayers.CmnHdrLen) // @ sl.Unslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) // @ sl.CombineAtIndex_Bytes(ub, 0, startP, slayers.CmnHdrLen, R54) // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, startP) // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP) // @ absPktFutureLemma(ub) - // @ TemporaryAssumeForIO(p.EqAbsHopField(absPkt(ub))) // postcondition of SetHopfield - // @ TemporaryAssumeForIO(absPkt(ub) == old(absPkt(ub))) - // @ sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) - // @ assert slayers.ValidPktMetaHdr(ub) + // @ assert reveal p.EqAbsHopField(absPkt(ub)) // @ assert reveal p.LastHopLen(ub) // @ assert p.scionLayer.EqAbsHeader(ub) + // @ sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) // @ ghost var ubLL []byte // @ ghost if &p.scionLayer === p.lastLayer { @@ -3040,11 +3052,11 @@ func (p *scionPacketProcessor) ingressRouterAlertFlag() (res *bool) { // @ ensures reserr != nil ==> reserr.ErrorMem() // constracts for IO-spec // @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) -// @ requires len(absPkt(ub).CurrSeg.Future) > 0 +// @ requires absPkt(ub).PathNotFullyTraversed() // @ requires p.EqAbsHopField(absPkt(ub)) // @ requires p.EqAbsInfoField(absPkt(ub)) // @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) -// @ ensures reserr == nil ==> len(absPkt(ub).CurrSeg.Future) > 0 +// @ ensures reserr == nil ==> absPkt(ub).PathNotFullyTraversed() // @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(ub)) // @ ensures reserr == nil ==> p.EqAbsInfoField(absPkt(ub)) // @ ensures reserr == nil ==> absPkt(ub) == old(absPkt(ub)) @@ -3053,6 +3065,9 @@ func (p *scionPacketProcessor) ingressRouterAlertFlag() (res *bool) { // @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases func (p *scionPacketProcessor) handleEgressRouterAlert( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int @*/ ) (respr processResult, reserr error) { + // @ reveal p.EqAbsHopField(absPkt(ub)) + // @ assert let fut := absPkt(ub).CurrSeg.Future in + // @ fut == seq[io.IO_HF]{p.hopField.ToIO_HF()} ++ fut[1:] // @ ghost ubPath := p.scionLayer.UBPath(ub) // @ ghost startP := p.scionLayer.PathStartIdx(ub) // @ ghost endP := p.scionLayer.PathEndIdx(ub) @@ -3091,17 +3106,15 @@ func (p *scionPacketProcessor) handleEgressRouterAlert( /*@ ghost ub []byte, gho return processResult{}, serrors.WrapStr("update hop field", err) } // @ sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) - // @ TemporaryAssumeForIO(scion.validPktMetaHdr(ubPath) && p.path.GetBase(ubPath).EqAbsHeader(ubPath)) // postcondition of SetHopfield // @ slayers.IsSupportedPktSubslice(ub, slayers.CmnHdrLen) // @ sl.Unslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) // @ sl.CombineAtIndex_Bytes(ub, 0, startP, slayers.CmnHdrLen, R54) // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, startP) // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP) // @ absPktFutureLemma(ub) - // @ TemporaryAssumeForIO(p.EqAbsHopField(absPkt(ub))) // postcondition of SetHopfield - // @ TemporaryAssumeForIO(p.EqAbsInfoField(absPkt(ub))) + // @ assert reveal p.EqAbsHopField(absPkt(ub)) + // @ assert reveal p.EqAbsInfoField(absPkt(ub)) // @ sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) - // @ TemporaryAssumeForIO(absPkt(ub) == old(absPkt(ub))) // @ ghost var ubLL []byte // @ ghost if &p.scionLayer === p.lastLayer { diff --git a/router/io-spec-abstract-transitions.gobra b/router/io-spec-abstract-transitions.gobra index b238a4ddd..8aa346ff3 100644 --- a/router/io-spec-abstract-transitions.gobra +++ b/router/io-spec-abstract-transitions.gobra @@ -18,6 +18,7 @@ package router import ( "github.com/scionproto/scion/pkg/slayers/path" + "github.com/scionproto/scion/pkg/slayers/path/scion" "github.com/scionproto/scion/pkg/slayers" . "verification/utils/definitions" io "verification/io" @@ -26,7 +27,7 @@ import ( ) ghost -requires len(pkt.CurrSeg.Future) > 0 +requires pkt.PathNotFullyTraversed() decreases pure func CurrSegIO_ifs(pkt io.IO_pkt2, dir bool) option[io.IO_ifs] { return let currseg := pkt.CurrSeg in @@ -35,8 +36,8 @@ pure func CurrSegIO_ifs(pkt io.IO_pkt2, dir bool) option[io.IO_ifs] { ghost opaque -requires len(oldPkt.CurrSeg.Future) > 0 -ensures len(newPkt.CurrSeg.Future) > 0 +requires oldPkt.PathNotFullyTraversed() +ensures newPkt.PathNotFullyTraversed() ensures len(newPkt.CurrSeg.Future) == len(oldPkt.CurrSeg.Future) decreases pure func AbsUpdateNonConsDirIngressSegID(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs]) (newPkt io.IO_pkt2) { @@ -50,7 +51,7 @@ pure func AbsUpdateNonConsDirIngressSegID(oldPkt io.IO_pkt2, ingressID option[io ghost opaque -requires len(pkt.CurrSeg.Future) > 0 +requires pkt.PathNotFullyTraversed() decreases pure func AbsValidateIngressIDConstraint(pkt io.IO_pkt2, ingressID option[io.IO_ifs]) bool { return let currseg := pkt.CurrSeg in @@ -71,7 +72,7 @@ pure func AbsValidateIngressIDConstraintXover(pkt io.IO_pkt2, ingressID option[i ghost opaque -requires len(pkt.CurrSeg.Future) > 0 +requires pkt.PathNotFullyTraversed() decreases pure func AbsEgressInterfaceConstraint(pkt io.IO_pkt2, egressID option[io.IO_ifs]) bool { return let currseg := pkt.CurrSeg in @@ -81,7 +82,7 @@ pure func AbsEgressInterfaceConstraint(pkt io.IO_pkt2, egressID option[io.IO_ifs ghost opaque requires dp.Valid() -requires len(pkt.CurrSeg.Future) > 0 +requires pkt.PathNotFullyTraversed() decreases pure func AbsValidateEgressIDConstraint(pkt io.IO_pkt2, enter bool, dp io.DataPlaneSpec) bool { return let currseg := pkt.CurrSeg in @@ -90,7 +91,7 @@ pure func AbsValidateEgressIDConstraint(pkt io.IO_pkt2, enter bool, dp io.DataPl ghost opaque -requires len(oldPkt.CurrSeg.Future) > 0 +requires oldPkt.PathNotFullyTraversed() ensures len(newPkt.CurrSeg.Future) >= 0 decreases pure func AbsProcessEgress(oldPkt io.IO_pkt2) (newPkt io.IO_pkt2) { @@ -108,7 +109,7 @@ requires oldPkt.LeftSeg != none[io.IO_seg2] requires len(oldPkt.CurrSeg.Future) == 1 requires len(get(oldPkt.LeftSeg).Future) > 0 requires len(get(oldPkt.LeftSeg).History) == 0 -ensures len(newPkt.CurrSeg.Future) > 0 +ensures newPkt.PathNotFullyTraversed() ensures newPkt.RightSeg != none[io.IO_seg2] ensures len(get(newPkt.RightSeg).Past) > 0 decreases @@ -124,7 +125,7 @@ pure func AbsDoXover(oldPkt io.IO_pkt2) (newPkt io.IO_pkt2) { ghost opaque requires dp.Valid() -requires len(pkt.CurrSeg.Future) > 0 +requires pkt.PathNotFullyTraversed() requires pkt.RightSeg != none[io.IO_seg2] requires len(get(pkt.RightSeg).Past) > 0 decreases @@ -137,7 +138,7 @@ pure func AbsValidateEgressIDConstraintXover(pkt io.IO_pkt2, dp io.DataPlaneSpec ghost opaque -requires len(pkt.CurrSeg.Future) > 0 +requires pkt.PathNotFullyTraversed() decreases pure func AbsVerifyCurrentMACConstraint(pkt io.IO_pkt2, dp io.DataPlaneSpec) bool { return let currseg := pkt.CurrSeg in @@ -154,7 +155,7 @@ ghost requires dp.Valid() requires ingressID != none[io.IO_ifs] requires egressID == none[io.IO_ifs] -requires len(oldPkt.CurrSeg.Future) > 0 +requires oldPkt.PathNotFullyTraversed() requires ElemWitness(ioSharedArg.IBufY, ingressID, oldPkt) requires newPkt == AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID) requires AbsValidateIngressIDConstraint(oldPkt, ingressID) @@ -184,7 +185,7 @@ ghost requires dp.Valid() requires egressID != none[io.IO_ifs] requires get(egressID) in domain(dp.GetNeighborIAs()) -requires len(oldPkt.CurrSeg.Future) > 0 +requires oldPkt.PathNotFullyTraversed() requires ElemWitness(ioSharedArg.IBufY, ingressID, oldPkt) requires AbsValidateIngressIDConstraint(oldPkt, ingressID) requires AbsVerifyCurrentMACConstraint(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID), dp) @@ -216,7 +217,7 @@ func ExternalEnterOrExitEvent(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], ne ghost requires dp.Valid() requires ingressID != none[io.IO_ifs] -requires len(oldPkt.CurrSeg.Future) > 0 +requires oldPkt.PathNotFullyTraversed() requires ElemWitness(ioSharedArg.IBufY, ingressID, oldPkt) requires AbsValidateIngressIDConstraint(oldPkt, ingressID) requires AbsVerifyCurrentMACConstraint(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID), dp) diff --git a/router/io-spec-lemmas.gobra b/router/io-spec-lemmas.gobra index e6733d2ce..ba26a992a 100644 --- a/router/io-spec-lemmas.gobra +++ b/router/io-spec-lemmas.gobra @@ -32,7 +32,7 @@ preserves acc(sl.Bytes(raw, 0, len(raw)), R55) ensures slayers.ValidPktMetaHdr(raw) && slayers.IsSupportedPkt(raw) ==> absIO_val(raw, ingressID).isIO_val_Pkt2 && absIO_val(raw, ingressID).IO_val_Pkt2_2 == absPkt(raw) && - len(absPkt(raw).CurrSeg.Future) > 0 + absPkt(raw).PathNotFullyTraversed() decreases func absIO_valLemma(raw []byte, ingressID uint16) { if(slayers.ValidPktMetaHdr(raw) && slayers.IsSupportedPkt(raw)){ @@ -48,7 +48,7 @@ requires acc(sl.Bytes(raw, 0, len(raw)), R56) requires slayers.ValidPktMetaHdr(raw) ensures acc(sl.Bytes(raw, 0, len(raw)), R56) ensures slayers.ValidPktMetaHdr(raw) -ensures len(absPkt(raw).CurrSeg.Future) > 0 +ensures absPkt(raw).PathNotFullyTraversed() decreases func absPktFutureLemma(raw []byte) { reveal slayers.ValidPktMetaHdr(raw) @@ -70,11 +70,11 @@ func absPktFutureLemma(raw []byte) { offset := scion.HopFieldOffset(numINF, prevSegLen, headerOffsetWithMetaLen) pkt := reveal absPkt(raw) assert pkt.CurrSeg == reveal scion.CurrSeg(raw, offset, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen) - assert len(pkt.CurrSeg.Future) > 0 + assert pkt.PathNotFullyTraversed() } ghost -requires len(oldPkt.CurrSeg.Future) > 0 +requires oldPkt.PathNotFullyTraversed() requires AbsValidateIngressIDConstraint(oldPkt, ingressID) requires newPkt == AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID) ensures AbsValidateIngressIDConstraint(newPkt, ingressID) @@ -166,7 +166,7 @@ ghost opaque requires acc(&p.d, R55) && acc(p.d.Mem(), _) requires acc(&p.ingressID, R55) -requires len(pkt.CurrSeg.Future) > 0 +requires pkt.PathNotFullyTraversed() decreases pure func (p *scionPacketProcessor) NoBouncingPkt(pkt io.IO_pkt2) bool { return let currseg := pkt.CurrSeg in @@ -178,7 +178,7 @@ pure func (p *scionPacketProcessor) NoBouncingPkt(pkt io.IO_pkt2) bool { ghost requires acc(&p.d, R55) && acc(p.d.Mem(), _) requires acc(&p.ingressID, R55) -requires len(pkt.CurrSeg.Future) > 0 +requires pkt.PathNotFullyTraversed() requires AbsEgressInterfaceConstraint(pkt, path.ifsToIO_ifs(egressID)) requires (egressID in p.d.getDomExternal()) || p.ingressID != 0 ensures acc(&p.d, R55) && acc(p.d.Mem(), _) @@ -193,7 +193,7 @@ func (p *scionPacketProcessor) EstablishNoBouncingPkt(pkt io.IO_pkt2, egressID u ghost requires acc(&p.d, R55) && acc(p.d.Mem(), _) requires acc(&p.ingressID, R55) -requires len(pkt.CurrSeg.Future) > 0 +requires pkt.PathNotFullyTraversed() requires AbsEgressInterfaceConstraint(pkt, path.ifsToIO_ifs(egressID)) requires p.NoBouncingPkt(pkt) requires !(egressID in p.d.getDomExternal()) @@ -369,7 +369,7 @@ func (p* scionPacketProcessor) SubSliceAbsPktToAbsPkt(ub []byte, start int, end ghost opaque requires acc(&p.hopField, R55) -requires len(pkt.CurrSeg.Future) > 0 +requires pkt.PathNotFullyTraversed() decreases pure func (p* scionPacketProcessor) EqAbsHopField(pkt io.IO_pkt2) bool { return let absHop := p.hopField.ToIO_HF() in diff --git a/verification/io/io_spec_definitions.gobra b/verification/io/io_spec_definitions.gobra index 9f78b8969..e7dc18e96 100644 --- a/verification/io/io_spec_definitions.gobra +++ b/verification/io/io_spec_definitions.gobra @@ -23,18 +23,18 @@ ghost requires len(currseg.Future) > 0 decreases pure func establishGuardTraversedseg(currseg IO_seg3, direction bool) IO_seg3 { - return let uinfo := direction ? - upd_uinfo(currseg.UInfo, currseg.Future[0]) : - currseg.UInfo in - IO_seg3_ { - AInfo: currseg.AInfo, - UInfo: uinfo, - ConsDir: currseg.ConsDir, - Peer: currseg.Peer, - Past: currseg.Past, - Future: currseg.Future, - History: currseg.History, - } + return let uinfo := direction ? + upd_uinfo(currseg.UInfo, currseg.Future[0]) : + currseg.UInfo in + IO_seg3_ { + AInfo: currseg.AInfo, + UInfo: uinfo, + ConsDir: currseg.ConsDir, + Peer: currseg.Peer, + Past: currseg.Past, + Future: currseg.Future, + History: currseg.History, + } } // Establishes the traversed segment for packets that are incremented (external). @@ -42,91 +42,114 @@ ghost requires len(currseg.Future) > 0 decreases pure func establishGuardTraversedsegInc(currseg IO_seg3, direction bool) IO_seg3 { - return let uinfo := direction ? - upd_uinfo(currseg.UInfo, currseg.Future[0]) : - currseg.UInfo in - IO_seg3_ { - AInfo: currseg.AInfo, - UInfo: uinfo, - ConsDir: currseg.ConsDir, - Peer: currseg.Peer, - Past: seq[IO_HF]{currseg.Future[0]} ++ currseg.Past, - Future: currseg.Future[1:], - History: seq[IO_ahi]{currseg.Future[0].Toab()} ++ currseg.History, - } + return let uinfo := direction ? + upd_uinfo(currseg.UInfo, currseg.Future[0]) : + currseg.UInfo in + IO_seg3_ { + AInfo: currseg.AInfo, + UInfo: uinfo, + ConsDir: currseg.ConsDir, + Peer: currseg.Peer, + Past: seq[IO_HF]{currseg.Future[0]} ++ currseg.Past, + Future: currseg.Future[1:], + History: seq[IO_ahi]{currseg.Future[0].Toab()} ++ currseg.History, + } } ghost +requires len(seg.Future) > 0 decreases -pure func (seg IO_seg3) UpdateCurrSeg( info AbsInfoField) IO_seg3 { - return IO_seg3_ { - info.AInfo, - info.UInfo, - info.ConsDir, - info.Peer, - seg.Past, - seg.Future, - seg.History, - } +pure func (seg IO_seg3) UpdateCurrHf(hf IO_HF) IO_seg3 { + return IO_seg3_ { + seg.AInfo, + seg.UInfo, + seg.ConsDir, + seg.Peer, + seg.Past, + seq[IO_HF]{hf} ++ seg.Future[1:], + seg.History, + } +} + +ghost +requires pkt.PathNotFullyTraversed() +decreases +pure func (pkt IO_pkt2) UpdateHopField(hf IO_HF) IO_pkt2 { + return let newCurrSeg := pkt.CurrSeg.UpdateCurrHf(hf) in + IO_Packet2{newCurrSeg, pkt.LeftSeg, pkt.MidSeg, pkt.RightSeg} +} + +ghost +decreases +pure func (seg IO_seg3) UpdateCurrSeg(info AbsInfoField) IO_seg3 { + return IO_seg3_ { + info.AInfo, + info.UInfo, + info.ConsDir, + info.Peer, + seg.Past, + seg.Future, + seg.History, + } } ghost decreases pure func (pkt IO_pkt2) UpdateInfoField(info AbsInfoField) IO_pkt2 { - return let newCurrSeg := pkt.CurrSeg.UpdateCurrSeg(info) in - IO_Packet2{newCurrSeg, pkt.LeftSeg, pkt.MidSeg, pkt.RightSeg} + return let newCurrSeg := pkt.CurrSeg.UpdateCurrSeg(info) in + IO_Packet2{newCurrSeg, pkt.LeftSeg, pkt.MidSeg, pkt.RightSeg} } // This type simplifies the infoField, making it easier // to use than the IO_seg3 from the IO-spec. type AbsInfoField adt { - AbsInfoField_ { - AInfo IO_ainfo - UInfo set[IO_msgterm] - ConsDir bool - Peer bool - } + AbsInfoField_ { + AInfo IO_ainfo + UInfo set[IO_msgterm] + ConsDir bool + Peer bool + } } // The segment lengths of a packet are frequently used together. // This type combines them into a single structure to simplify // their specification. type SegLens adt { - SegLens_ { - Seg1Len int - Seg2Len int - Seg3Len int - } + SegLens_ { + Seg1Len int + Seg2Len int + Seg3Len int + } } ghost decreases pure func (s SegLens) Valid() bool { - return s.Seg1Len > 0 && - s.Seg2Len >= 0 && - s.Seg3Len >= 0 + return s.Seg1Len > 0 && + s.Seg2Len >= 0 && + s.Seg3Len >= 0 } ghost decreases pure func CombineSegLens(seg1Len int, seg2Len int, seg3Len int) SegLens { - return SegLens_ { - seg1Len, - seg2Len, - seg3Len, - } + return SegLens_ { + seg1Len, + seg2Len, + seg3Len, + } } ghost decreases pure func (s SegLens) NumInfoFields() int { - return s.Seg3Len > 0 ? 3 : (s.Seg2Len > 0 ? 2 : (s.Seg1Len > 0 ? 1 : 0)) + return s.Seg3Len > 0 ? 3 : (s.Seg2Len > 0 ? 2 : (s.Seg1Len > 0 ? 1 : 0)) } ghost decreases pure func (s SegLens) TotalHops() int { - return s.Seg1Len + s.Seg2Len + s.Seg3Len + return s.Seg1Len + s.Seg2Len + s.Seg3Len } ghost @@ -141,4 +164,10 @@ ensures res <= currHF decreases pure func (s SegLens) LengthOfPrevSeg(currHF int) (res int) { return s.Seg1Len > currHF ? 0 : ((s.Seg1Len + s.Seg2Len) > currHF ? s.Seg1Len : s.Seg1Len + s.Seg2Len) -} \ No newline at end of file +} + +ghost +decreases +pure func (pkt IO_pkt2) PathNotFullyTraversed() bool { + return len(pkt.CurrSeg.Future) > 0 +} From 3a6f9f4db2388da4e23fd61c16952cf9ff801c27 Mon Sep 17 00:00:00 2001 From: Markus Limbeck <92801626+mlimbeck@users.noreply.github.com> Date: Tue, 23 Jul 2024 08:08:47 +0200 Subject: [PATCH 52/57] Drop IO-assumptions in processOHP (#369) * move assumption from processOHP to updateSCIONLayer * proof of assumptions in processOHP * test: moreJoins for SerializeTo in slayers * fix verification error * feedback * add moreJoins to SerializeTo() --- pkg/slayers/scion.go | 81 +++++++++++-------- pkg/slayers/scion_spec.gobra | 41 +++++++++- router/dataplane.go | 68 ++++++++-------- .../github.com/google/gopacket/writer.gobra | 14 +++- verification/utils/seqs/seqs.gobra | 12 ++- verification/utils/slices/slices_test.gobra | 2 - 6 files changed, 141 insertions(+), 77 deletions(-) diff --git a/pkg/slayers/scion.go b/pkg/slayers/scion.go index 6ffb1b741..fdf5a4232 100644 --- a/pkg/slayers/scion.go +++ b/pkg/slayers/scion.go @@ -228,13 +228,16 @@ func (s *SCION) NetworkFlow() (res gopacket.Flow) { // @ ensures e == nil && s.HasOneHopPath(ubuf) ==> // @ (unfolding acc(s.Mem(ubuf), R55) in CmnHdrLen + s.AddrHdrLenSpecInternal() + s.Path.LenSpec(ubuf[CmnHdrLen+s.AddrHdrLenSpecInternal() : s.HdrLen*LineLen])) <= len(ubuf) // @ ensures e != nil ==> e.ErrorMem() +// post for IO: +// @ ensures e == nil && old(s.EqPathType(ubuf)) ==> +// @ IsSupportedRawPkt(b.View()) == old(IsSupportedPkt(ubuf)) // @ decreases func (s *SCION) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions /* @ , ghost ubuf []byte @*/) (e error) { - // @ unfold acc(s.Mem(ubuf), R0) - // @ defer fold acc(s.Mem(ubuf), R0) - // @ sl.SplitRange_Bytes(ubuf, int(CmnHdrLen+s.AddrHdrLen(nil, true)), int(s.HdrLen*LineLen), writePerm) - // @ ghost defer sl.CombineRange_Bytes(ubuf, int(CmnHdrLen+s.AddrHdrLenSpecInternal()), int(s.HdrLen*LineLen), writePerm) + // @ unfold acc(s.Mem(ubuf), R1) + // @ defer fold acc(s.Mem(ubuf), R1) + // @ sl.SplitRange_Bytes(ubuf, int(CmnHdrLen+s.AddrHdrLen(nil, true)), int(s.HdrLen*LineLen), R10) scnLen := CmnHdrLen + s.AddrHdrLen( /*@ nil, true @*/ ) + s.Path.Len( /*@ ubuf[CmnHdrLen+s.AddrHdrLen(nil, true) : s.HdrLen*LineLen] @*/ ) + // @ sl.CombineRange_Bytes(ubuf, int(CmnHdrLen+s.AddrHdrLenSpecInternal()), int(s.HdrLen*LineLen), R10) if scnLen > MaxHdrLen { return serrors.New("header length exceeds maximum", "max", MaxHdrLen, "actual", scnLen) @@ -255,60 +258,70 @@ func (s *SCION) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeO // @ ghost uSerBufN := b.UBuf() // @ assert buf === uSerBufN[:scnLen] // @ b.ExchangePred() - // @ sl.SplitRange_Bytes(uSerBufN, 0, scnLen, writePerm) + // @ unfold acc(sl.Bytes(uSerBufN, 0, len(uSerBufN)), writePerm) // Serialize common header. firstLine := uint32(s.Version&0xF)<<28 | uint32(s.TrafficClass)<<20 | s.FlowID&0xFFFFF - // @ sl.SplitRange_Bytes(buf, 0, 4, writePerm) - // @ unfold acc(sl.Bytes(buf[:4], 0, 4), writePerm) binary.BigEndian.PutUint32(buf[:4], firstLine) - // @ fold acc(sl.Bytes(buf[:4], 0, 4), writePerm) - // @ sl.CombineRange_Bytes(buf, 0, 4, writePerm) - // @ unfold acc(sl.Bytes(buf, 0, len(buf)), writePerm) buf[4] = uint8(s.NextHdr) buf[5] = s.HdrLen - // @ fold acc(sl.Bytes(buf, 0, len(buf)), writePerm) - // @ sl.SplitRange_Bytes(buf, 6, 8, writePerm) - // @ unfold acc(sl.Bytes(buf[6:8], 0, 2), writePerm) + // @ assert &buf[6:8][0] == &buf[6] && &buf[6:8][1] == &buf[7] binary.BigEndian.PutUint16(buf[6:8], s.PayloadLen) - // @ fold acc(sl.Bytes(buf[6:8], 0, 2), writePerm) - // @ sl.CombineRange_Bytes(buf, 6, 8, writePerm) - // @ unfold acc(sl.Bytes(buf, 0, len(buf)), writePerm) buf[8] = uint8(s.PathType) buf[9] = uint8(s.DstAddrType&0x7)<<4 | uint8(s.SrcAddrType&0x7) - // @ fold acc(sl.Bytes(buf, 0, len(buf)), writePerm) - // @ sl.SplitRange_Bytes(buf, 10, 12, writePerm) - // @ unfold acc(sl.Bytes(buf[10:12], 0, 2), writePerm) + // @ assert &buf[10:12][0] == &buf[10] && &buf[10:12][1] == &buf[11] binary.BigEndian.PutUint16(buf[10:12], 0) - // @ fold acc(sl.Bytes(buf[10:12], 0, 2), writePerm) - // @ sl.CombineRange_Bytes(buf, 10, 12, writePerm) - - // @ ghost sPath := s.Path - // @ ghost pathSlice := ubuf[CmnHdrLen+s.AddrHdrLenSpecInternal() : s.HdrLen*LineLen] - // @ sl.CombineRange_Bytes(ubuf, CmnHdrLen+s.AddrHdrLenSpecInternal(), int(s.HdrLen*LineLen), R10) + // @ fold acc(sl.Bytes(uSerBufN, 0, len(uSerBufN)), writePerm) + // @ ghost if s.EqPathType(ubuf) { + // @ assert reveal s.EqPathTypeWithBuffer(ubuf, uSerBufN) + // @ s.IsSupportedPktLemma(ubuf, uSerBufN) + // @ } // Serialize address header. - // @ sl.SplitRange_Bytes(buf, CmnHdrLen, len(buf), writePerm) + // @ sl.SplitRange_Bytes(uSerBufN, CmnHdrLen, scnLen, HalfPerm) + // @ sl.Reslice_Bytes(uSerBufN, 0, CmnHdrLen, R54) + // @ IsSupportedPktSubslice(uSerBufN, CmnHdrLen) + // @ sl.SplitRange_Bytes(uSerBufN, CmnHdrLen, scnLen, HalfPerm) // @ sl.SplitRange_Bytes(ubuf, CmnHdrLen, len(ubuf), R10) if err := s.SerializeAddrHdr(buf[CmnHdrLen:] /*@ , ubuf[CmnHdrLen:] @*/); err != nil { - // @ sl.CombineRange_Bytes(buf, CmnHdrLen, len(buf), writePerm) + // @ sl.Unslice_Bytes(uSerBufN, 0, CmnHdrLen, R54) + // @ sl.CombineRange_Bytes(uSerBufN, CmnHdrLen, scnLen, writePerm) // @ sl.CombineRange_Bytes(ubuf, CmnHdrLen, len(ubuf), R10) - // @ sl.SplitRange_Bytes(ubuf, CmnHdrLen+s.AddrHdrLenSpecInternal(), int(s.HdrLen*LineLen), R10) - // @ sl.CombineRange_Bytes(uSerBufN, 0, scnLen, writePerm) // @ b.RestoreMem(uSerBufN) return err } offset := CmnHdrLen + s.AddrHdrLen( /*@ nil, true @*/ ) - // @ sl.CombineRange_Bytes(buf, CmnHdrLen, len(buf), writePerm) + // @ sl.CombineRange_Bytes(uSerBufN, CmnHdrLen, scnLen, HalfPerm) // @ sl.CombineRange_Bytes(ubuf, CmnHdrLen, len(ubuf), R10) - // @ sl.SplitRange_Bytes(ubuf, CmnHdrLen+s.AddrHdrLenSpecInternal(), int(s.HdrLen*LineLen), R10) + // @ IsSupportedPktSubslice(uSerBufN, CmnHdrLen) + // @ sl.Unslice_Bytes(uSerBufN, 0, CmnHdrLen, R54) + // @ sl.CombineRange_Bytes(uSerBufN, CmnHdrLen, scnLen, HalfPerm) + // Serialize path header. - // @ sl.SplitRange_Bytes(buf, offset, len(buf), writePerm) + // @ ghost startP := int(CmnHdrLen+s.AddrHdrLenSpecInternal()) + // @ ghost endP := int(s.HdrLen*LineLen) + // @ ghost pathSlice := ubuf[startP : endP] + // @ sl.SplitRange_Bytes(uSerBufN, offset, scnLen, HalfPerm) + // @ sl.SplitRange_Bytes(ubuf, startP, endP, HalfPerm) + // @ sl.Reslice_Bytes(uSerBufN, 0, offset, R54) + // @ sl.Reslice_Bytes(ubuf, 0, startP, R54) + // @ IsSupportedPktSubslice(uSerBufN, offset) + // @ IsSupportedPktSubslice(ubuf, startP) + // @ sl.SplitRange_Bytes(uSerBufN, offset, scnLen, HalfPerm) + // @ sl.SplitRange_Bytes(ubuf, startP, endP, HalfPerm) tmp := s.Path.SerializeTo(buf[offset:] /*@, pathSlice @*/) - // @ sl.CombineRange_Bytes(buf, offset, len(buf), writePerm) - // @ sl.CombineRange_Bytes(uSerBufN, 0, scnLen, writePerm) + // @ sl.CombineRange_Bytes(uSerBufN, offset, scnLen, HalfPerm) + // @ sl.CombineRange_Bytes(ubuf, startP, endP, HalfPerm) + // @ IsSupportedPktSubslice(uSerBufN, offset) + // @ IsSupportedPktSubslice(ubuf, startP) + // @ sl.Unslice_Bytes(uSerBufN, 0, offset, R54) + // @ sl.Unslice_Bytes(ubuf, 0, startP, R54) + // @ sl.CombineRange_Bytes(uSerBufN, offset, scnLen, HalfPerm) + // @ sl.CombineRange_Bytes(ubuf, startP, endP, HalfPerm) + // @ reveal IsSupportedPkt(uSerBufN) // @ b.RestoreMem(uSerBufN) + // @ reveal IsSupportedRawPkt(b.View()) return tmp } diff --git a/pkg/slayers/scion_spec.gobra b/pkg/slayers/scion_spec.gobra index 73e838c08..37e82f182 100644 --- a/pkg/slayers/scion_spec.gobra +++ b/pkg/slayers/scion_spec.gobra @@ -31,6 +31,7 @@ import ( sl "verification/utils/slices" "verification/io" "encoding/binary" + "verification/utils/seqs" ) pred PathPoolMem(pathPool []path.Path, pathPoolRaw path.Path) { @@ -486,6 +487,17 @@ pure func IsSupportedPkt(raw []byte) bool { nextHdr != L4SCMP } +ghost +opaque +decreases +pure func IsSupportedRawPkt(raw seq[byte]) bool { + return CmnHdrLen <= len(raw) && + let pathType := path.Type(raw[8]) in + let nextHdr := L4ProtocolType(raw[4]) in + pathType == scion.PathType && + nextHdr != L4SCMP +} + ghost requires CmnHdrLen <= idx && idx <= len(raw) preserves acc(sl.Bytes(raw, 0, len(raw)), R55) @@ -501,6 +513,21 @@ func IsSupportedPktSubslice(raw []byte, idx int) { fold acc(sl.Bytes(raw[:idx], 0, idx), R56) } +ghost +preserves acc(s.Mem(ub), R55) +preserves acc(sl.Bytes(ub, 0, len(ub)), R55) +preserves acc(sl.Bytes(buffer, 0, len(buffer)), R55) +preserves s.EqPathType(ub) +preserves s.EqPathTypeWithBuffer(ub, buffer) +ensures IsSupportedPkt(ub) == IsSupportedPkt(buffer) +decreases +func (s *SCION) IsSupportedPktLemma(ub []byte, buffer []byte) { + reveal s.EqPathType(ub) + reveal s.EqPathTypeWithBuffer(ub, buffer) + reveal IsSupportedPkt(ub) + reveal IsSupportedPkt(buffer) +} + ghost requires acc(sl.Bytes(ub, 0, len(ub)), _) requires CmnHdrLen <= len(ub) @@ -558,9 +585,19 @@ requires acc(s.Mem(ub), _) requires acc(sl.Bytes(ub, 0, len(ub)), _) decreases pure func (s *SCION) EqPathType(ub []byte) bool { + return reveal s.EqPathTypeWithBuffer(ub, ub) +} + +ghost +opaque +requires acc(s.Mem(ub), _) +requires acc(sl.Bytes(buffer, 0, len(buffer)), _) +decreases +pure func (s *SCION) EqPathTypeWithBuffer(ub []byte, buffer []byte) bool { return unfolding acc(s.Mem(ub), _) in - path.Type(GetPathType(ub)) == s.PathType && - L4ProtocolType(GetNextHdr(ub)) == s.NextHdr + CmnHdrLen <= len(buffer) && + path.Type(GetPathType(buffer)) == s.PathType && + L4ProtocolType(GetNextHdr(buffer)) == s.NextHdr } ghost diff --git a/router/dataplane.go b/router/dataplane.go index 7b81f812e..0eab409f4 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -3515,23 +3515,20 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // @ p.scionLayer.ExtractAcc(ubScionL) s := p.scionLayer // @ ghost ubPath := p.scionLayer.UBPath(ubScionL) - // @ unfold acc(p.scionLayer.Mem(ubScionL), 1-R15) - // @ apply acc(&p.scionLayer, R16) --* acc(p.scionLayer.Mem(ubScionL), R15) // @ unfold acc(p.scionLayer.Mem(ubScionL), R15) + // @ defer fold acc(p.scionLayer.Mem(ubScionL), R15) + // @ apply acc(&p.scionLayer, R16) --* acc(p.scionLayer.Mem(ubScionL), R15) // @ assert s.Path === p.scionLayer.Path - // @ assert s.Path.Mem(ubPath) ohp, ok := s.Path.(*onehop.Path) if !ok { // TODO parameter problem -> invalid path // @ establishMemMalformedPath() - // @ fold p.scionLayer.Mem(ubScionL) // @ fold p.d.validResult(processResult{}, false) return processResult{}, malformedPath /*@ , false, absReturnErr(processResult{}) @*/ } if /*@ unfolding acc(s.Path.Mem(ubPath), R50) in @*/ !ohp.Info.ConsDir { // TODO parameter problem -> invalid path // @ establishMemMalformedPath() - // @ defer fold p.scionLayer.Mem(ubScionL) // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr( "OneHop path in reverse construction direction is not allowed", @@ -3544,7 +3541,6 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / if !p.d.localIA.Equal(s.SrcIA) { // @ establishCannotRoute() // TODO parameter problem -> invalid path - // @ defer fold p.scionLayer.Mem(ubScionL) // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr("bad source IA", cannotRoute, "type", "ohp", "egress", ( /*@ unfolding acc(ohp.Mem(ubPath), R50) in (unfolding acc(ohp.FirstHop.Mem(), R55) in @*/ ohp.FirstHop.ConsEgress /*@ ) @*/), @@ -3555,48 +3551,48 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / if !ok { // @ establishCannotRoute() // TODO parameter problem invalid interface - // @ defer fold p.scionLayer.Mem(ubScionL) // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WithCtx(cannotRoute, "type", "ohp", "egress", ( /*@ unfolding acc(ohp.Mem(ubPath), R50) in (unfolding acc(ohp.FirstHop.Mem(), R55) in @*/ ohp.FirstHop.ConsEgress /*@ ) @*/)) /*@ , false, absReturnErr(processResult{}) @*/ } if !neighborIA.Equal(s.DstIA) { // @ establishCannotRoute() - // @ defer fold p.scionLayer.Mem(ubScionL) // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr("bad destination IA", cannotRoute, "type", "ohp", "egress", ( /*@ unfolding acc(ohp.Mem(ubPath), R50) in (unfolding acc(ohp.FirstHop.Mem(), R55) in @*/ ohp.FirstHop.ConsEgress /*@ ) @*/), "neighborIA", neighborIA, "dstIA", s.DstIA) /*@ , false, absReturnErr(processResult{}) @*/ } - // @ unfold s.Path.Mem(ubPath) - // @ unfold ohp.FirstHop.Mem() - // @ preserves acc(&ohp.Info, R15) && acc(&ohp.FirstHop, R15) - // @ preserves acc(&p.macBuffers.scionInput, R15) - // @ preserves acc(&p.mac, R15) && p.mac != nil && p.mac.Mem() + // @ unfold acc(ohp.Mem(ubPath), R50) + // @ defer fold acc(ohp.Mem(ubPath), R50) + // @ unfold acc(ohp.FirstHop.Mem(), R54) + // @ defer fold acc(ohp.FirstHop.Mem(), R54) + // @ preserves acc(&ohp.Info, R55) && acc(&ohp.FirstHop, R55) + // @ preserves acc(&p.macBuffers.scionInput, R55) + // @ preserves acc(&p.mac, R55) && p.mac != nil && p.mac.Mem() // @ preserves sl.Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) // @ decreases // @ outline ( mac /*@@@*/ := path.MAC(p.mac, ohp.Info, ohp.FirstHop, p.macBuffers.scionInput) // (VerifiedSCION) introduced separate copy to avoid exposing quantified permissions outside the scope of this outline block. macCopy := mac - // @ fold acc(sl.Bytes(ohp.FirstHop.Mac[:], 0, len(ohp.FirstHop.Mac[:])), R20) - // @ fold acc(sl.Bytes(mac[:], 0, len(mac)), R20) + // @ fold acc(sl.Bytes(ohp.FirstHop.Mac[:], 0, len(ohp.FirstHop.Mac[:])), R56) + // @ fold acc(sl.Bytes(mac[:], 0, len(mac)), R56) compRes := subtle.ConstantTimeCompare(ohp.FirstHop.Mac[:], mac[:]) == 0 - // @ unfold acc(sl.Bytes(ohp.FirstHop.Mac[:], 0, len(ohp.FirstHop.Mac[:])), R20) + // @ unfold acc(sl.Bytes(ohp.FirstHop.Mac[:], 0, len(ohp.FirstHop.Mac[:])), R56) // @ ) if compRes { - // @ defer fold p.scionLayer.Mem(ubScionL) - // @ defer fold s.Path.Mem(ubPath) - // @ defer fold ohp.FirstHop.Mem() // TODO parameter problem -> invalid MAC // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.New("MAC", "expected", fmt.Sprintf("%x", macCopy), "actual", fmt.Sprintf("%x", ohp.FirstHop.Mac), "type", "ohp") /*@ , false, absReturnErr(processResult{}) @*/ } + // @ assert reveal p.scionLayer.EqPathType(p.rawPkt) + // @ unfold acc(p.scionLayer.Mem(ubScionL), 1-R15) + // @ unfold acc(s.Path.Mem(ubPath), 1-R50) ohp.Info.UpdateSegID(ohp.FirstHop.Mac /*@, ohp.FirstHop.ToIO_HF() @*/) - // @ fold ohp.FirstHop.Mem() - // @ fold s.Path.Mem(ubPath) - // @ fold p.scionLayer.Mem(ubScionL) + // @ fold acc(s.Path.Mem(ubPath), 1-R50) + // @ fold acc(p.scionLayer.Mem(ubScionL), 1-R15) + // @ assert reveal p.scionLayer.EqPathType(p.rawPkt) // (VerifiedSCION) the second parameter was changed from 's' to 'p.scionLayer' due to the // changes made to 'updateSCIONLayer'. @@ -3604,12 +3600,6 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // @ fold p.d.validResult(processResult{}, false) return processResult{}, err /*@ , false, absReturnErr(processResult{}) @*/ } - // @ unfold p.scionLayer.Mem(ubScionL) - // @ defer fold p.scionLayer.Mem(ubScionL) - // @ unfold s.Path.Mem(ubPath) - // @ defer fold s.Path.Mem(ubPath) - // @ unfold ohp.FirstHop.Mem() - // @ defer fold ohp.FirstHop.Mem() // OHP should always be directed to the correct BR. // @ p.d.getExternalMem() // @ ghost if p.d.external != nil { unfold acc(accBatchConn(p.d.external), _) } @@ -3618,7 +3608,6 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // @ assert ohp.FirstHop.ConsEgress in p.d.getDomExternal() // @ p.d.InDomainExternalInForwardingMetrics(ohp.FirstHop.ConsEgress) // @ fold p.d.validResult(processResult{EgressID: ohp.FirstHop.ConsEgress, OutConn: c, OutPkt: p.rawPkt}, false) - // @ TemporaryAssumeForIO(!slayers.IsSupportedPkt(p.rawPkt)) return processResult{EgressID: ohp.FirstHop.ConsEgress, OutConn: c, OutPkt: p.rawPkt}, nil /*@ , false, reveal absIO_val(respr.OutPkt, respr.EgressID) @*/ } @@ -3628,12 +3617,10 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / return processResult{}, serrors.WithCtx(cannotRoute, "type", "ohp", "egress", ohp.FirstHop.ConsEgress, "consDir", ohp.Info.ConsDir) /*@ , false, absReturnErr(processResult{}) @*/ } - // OHP entering our IA // @ p.d.getLocalIA() if !p.d.localIA.Equal(s.DstIA) { // @ establishCannotRoute() - // @ defer fold p.scionLayer.Mem(ubScionL) // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr("bad destination IA", cannotRoute, "type", "ohp", "ingress", p.ingressID, @@ -3643,13 +3630,13 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / neighborIA := p.d.neighborIAs[p.ingressID] if !neighborIA.Equal(s.SrcIA) { // @ establishCannotRoute() - // @ defer fold p.scionLayer.Mem(ubScionL) // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr("bad source IA", cannotRoute, "type", "ohp", "ingress", p.ingressID, "neighborIA", neighborIA, "srcIA", s.SrcIA) /*@ , false, absReturnErr(processResult{}) @*/ } - + // @ assert reveal p.scionLayer.EqPathType(p.rawPkt) + // @ unfold acc(p.scionLayer.Mem(ubScionL), 1-R15) // @ unfold s.Path.Mem(ubPath) // @ unfold ohp.SecondHop.Mem() ohp.SecondHop = path.HopField{ @@ -3665,10 +3652,11 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / ohp.SecondHop.Mac = path.MAC(p.mac, ohp.Info, ohp.SecondHop, p.macBuffers.scionInput) // @ fold ohp.SecondHop.Mem() // @ fold s.Path.Mem(ubPath) + // @ fold acc(p.scionLayer.Mem(ubScionL), 1-R15) + // @ assert reveal p.scionLayer.EqPathType(p.rawPkt) // (VerifiedSCION) the second parameter was changed from 's' to 'p.scionLayer' due to the // changes made to 'updateSCIONLayer'. - // @ fold p.scionLayer.Mem(ubScionL) if err := updateSCIONLayer(p.rawPkt, &p.scionLayer /* s */, p.buffer); err != nil { // @ fold p.d.validResult(processResult{}, false) return processResult{}, err /*@ , false, absReturnErr(processResult{}) @*/ @@ -3686,7 +3674,6 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // @ p.d.getInternal() // @ assert p.d.internal != nil ==> acc(p.d.internal.Mem(), _) // @ fold p.d.validResult(processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, addrAliases) - // @ TemporaryAssumeForIO(!slayers.IsSupportedPkt(p.rawPkt)) return processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, nil /*@ , addrAliases, reveal absIO_val(respr.OutPkt, 0) @*/ } @@ -3773,10 +3760,16 @@ func addEndhostPort(dst *net.IPAddr) (res *net.UDPAddr) { // the scion.Raw path. // @ requires acc(s.Mem(rawPkt), R00) // @ requires s.HasOneHopPath(rawPkt) +// @ requires sl.Bytes(rawPkt, 0, len(rawPkt)) // @ preserves buffer != nil && buffer.Mem() -// @ preserves sl.Bytes(rawPkt, 0, len(rawPkt)) +// pres for IO: +// @ requires s.EqPathType(rawPkt) +// @ requires !slayers.IsSupportedPkt(rawPkt) +// @ ensures sl.Bytes(rawPkt, 0, len(rawPkt)) // @ ensures acc(s.Mem(rawPkt), R00) // @ ensures res != nil ==> res.ErrorMem() +// post for IO: +// @ ensures res == nil ==> !slayers.IsSupportedPkt(rawPkt) // @ decreases // (VerifiedSCION) the type of 's' was changed from slayers.SCION to *slayers.SCION. This makes // specs a lot easier and, makes the implementation faster as well by avoiding passing large data-structures @@ -3788,10 +3781,12 @@ func updateSCIONLayer(rawPkt []byte, s *slayers.SCION, buffer gopacket.Serialize if err := s.SerializeTo(buffer, gopacket.SerializeOptions{} /*@ , rawPkt @*/); err != nil { return err } + // @ reveal slayers.IsSupportedRawPkt(buffer.View()) // TODO(lukedirtwalker): We should add a method to the scion layers // which can write into the existing buffer, see also the discussion in // https://fsnets.slack.com/archives/C8ADBBG0J/p1592805884250700 rawContents := buffer.Bytes() + // @ assert !(reveal slayers.IsSupportedPkt(rawContents)) // @ s.InferSizeOHP(rawPkt) // @ assert len(rawContents) <= len(rawPkt) // @ unfold sl.Bytes(rawPkt, 0, len(rawPkt)) @@ -3805,6 +3800,7 @@ func updateSCIONLayer(rawPkt []byte, s *slayers.SCION, buffer gopacket.Serialize // @ fold sl.Bytes(rawPkt, 0, len(rawPkt)) // @ fold acc(sl.Bytes(rawContents, 0, len(rawContents)), R20) // @ buffer.RestoreMem(rawContents) + // @ assert !(reveal slayers.IsSupportedPkt(rawPkt)) return nil } diff --git a/verification/dependencies/github.com/google/gopacket/writer.gobra b/verification/dependencies/github.com/google/gopacket/writer.gobra index 7cee58d03..413e26b8b 100644 --- a/verification/dependencies/github.com/google/gopacket/writer.gobra +++ b/verification/dependencies/github.com/google/gopacket/writer.gobra @@ -8,8 +8,9 @@ package gopacket -import . "github.com/scionproto/scion/verification/utils/definitions" -import sl "github.com/scionproto/scion/verification/utils/slices" +import . "verification/utils/definitions" +import sl "verification/utils/slices" +import "verification/utils/seqs" type SerializableLayer interface { pred Mem(ubuf []byte) @@ -44,11 +45,18 @@ type SerializeBuffer interface { decreases UBuf() []byte + ghost + pure + requires acc(Mem(), _) + decreases + View() (ghost res seq[byte]) + ghost requires Mem() ensures res === old(UBuf()) ensures sl.Bytes(res, 0, len(res)) ensures MemWithoutUBuf(res) + ensures old(View()) == seqs.ToSeqByte(res) decreases ExchangePred() (res []byte) @@ -56,6 +64,7 @@ type SerializeBuffer interface { requires MemWithoutUBuf(ub) requires sl.Bytes(ub, 0, len(ub)) ensures Mem() && UBuf() === ub + ensures View() == old(seqs.ToSeqByte(ub)) decreases RestoreMem(ghost ub []byte) @@ -63,6 +72,7 @@ type SerializeBuffer interface { ensures res === old(UBuf()) ensures sl.Bytes(res, 0, len(res)) ensures MemWithoutUBuf(res) + ensures old(View()) == seqs.ToSeqByte(res) decreases Bytes() (res []byte) diff --git a/verification/utils/seqs/seqs.gobra b/verification/utils/seqs/seqs.gobra index 6860a3762..af788fbe8 100644 --- a/verification/utils/seqs/seqs.gobra +++ b/verification/utils/seqs/seqs.gobra @@ -16,6 +16,8 @@ package seqs +import sl "verification/utils/slices" + ghost pure requires 0 <= n @@ -36,4 +38,12 @@ requires size >= 0 ensures len(res) == size ensures forall i int :: { res[i] } 0 <= i && i < size ==> res[i] == nil decreases _ -pure func NewSeqByteSlice(size int) (res seq[[]byte]) \ No newline at end of file +pure func NewSeqByteSlice(size int) (res seq[[]byte]) + +ghost +requires acc(sl.Bytes(ub, 0, len(ub)), _) +ensures len(res) == len(ub) +ensures forall i int :: { res[i] } 0 <= i && i < len(ub) ==> + res[i] == sl.GetByte(ub, 0, len(ub), i) +decreases _ +pure func ToSeqByte(ub []byte) (res seq[byte]) \ No newline at end of file diff --git a/verification/utils/slices/slices_test.gobra b/verification/utils/slices/slices_test.gobra index 2d3a10827..399161928 100644 --- a/verification/utils/slices/slices_test.gobra +++ b/verification/utils/slices/slices_test.gobra @@ -16,8 +16,6 @@ package slices -import "github.com/scionproto/scion/verification/utils/seqs" - /** Bytes **/ func Bytes_test() { From 275e0eb6be566103d94eeb390492a086a24b2f97 Mon Sep 17 00:00:00 2001 From: mlimbeck Date: Wed, 24 Jul 2024 14:36:29 +0200 Subject: [PATCH 53/57] fix sytnax errors --- router/dataplane.go | 101 +++++++++++++++++++++++--------------------- 1 file changed, 54 insertions(+), 47 deletions(-) diff --git a/router/dataplane.go b/router/dataplane.go index 1243500fd..7d87238f8 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -1595,11 +1595,11 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, // @ sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, writePerm) // @ } // @ } - // @ assert sl.AbsSlice_Bytes(p.rawPkt, 0, len(p.rawPkt)) - v1, v2 /*@ , addrAliasesPkt @*/ := p.processEPIC( /*@ p.rawPkt, ub == nil, llStart, llEnd @*/ ) + // @ assert sl.Bytes(p.rawPkt, 0, len(p.rawPkt)) + v1, v2 /*@ , addrAliasesPkt, newAbsPkt @*/ := p.processEPIC( /*@ p.rawPkt, ub == nil, llStart, llEnd, ioLock, ioSharedArg, dp @*/ ) // @ ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, v2 == nil, hasHbhLayer, hasE2eLayer) // @ fold p.sInit() - return v1, v2 /*@, addrAliasesPkt, io.IO_val_Unit{} @*/ + return v1, v2 /*@, addrAliasesPkt, newAbsPkt @*/ default: // @ ghost if mustCombineRanges { ghost defer sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, HalfPerm) } // @ ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, true, hasHbhLayer, hasE2eLayer) @@ -1797,7 +1797,13 @@ func (p *scionPacketProcessor) processSCION( /*@ ghost ub []byte, ghost llIsNil // @ requires acc(&p.rawPkt, R1) && ub === p.rawPkt // @ requires acc(&p.path) // @ requires p.scionLayer.Mem(ub) -// @ requires sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires sl.Bytes(ub, 0, len(ub)) +// pres for IO: +// @ requires p.d.DpAgreesWithSpec(dp) +// @ requires dp.Valid() +// @ requires p.scionLayer.EqAbsHeader(ub) && p.scionLayer.EqPathType(ub) && p.scionLayer.ValidScionInitSpec(ub) +// @ requires acc(ioLock.LockP(), _) +// @ requires ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> // @ preserves acc(&p.srcAddr, R10) && acc(p.srcAddr.Mem(), _) // @ preserves acc(&p.lastLayer, R10) // @ preserves p.lastLayer != nil @@ -1811,23 +1817,23 @@ func (p *scionPacketProcessor) processSCION( /*@ ghost ub []byte, ghost llIsNil // @ preserves acc(&p.segmentChange) // @ preserves acc(&p.mac, R10) && p.mac != nil && p.mac.Mem() // @ preserves acc(&p.macBuffers.scionInput, R10) -// @ preserves sl.AbsSlice_Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) +// @ preserves sl.Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) // @ preserves acc(&p.cachedMac) // @ ensures acc(&p.d, R5) // @ ensures acc(&p.path) // @ ensures acc(&p.rawPkt, R1) // @ ensures reserr == nil ==> p.scionLayer.Mem(ub) // @ ensures reserr != nil ==> p.scionLayer.NonInitMem() -// @ ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), 1 - R15) +// @ ensures acc(sl.Bytes(ub, 0, len(ub)), 1 - R15) // @ ensures p.d.validResult(respr, addrAliasesPkt) // @ ensures addrAliasesPkt ==> ( // @ respr.OutAddr != nil && -// @ (acc(respr.OutAddr.Mem(), R15) --* acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R15))) -// @ ensures !addrAliasesPkt ==> acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R15) +// @ (acc(respr.OutAddr.Mem(), R15) --* acc(sl.Bytes(ub, 0, len(ub)), R15))) +// @ ensures !addrAliasesPkt ==> acc(sl.Bytes(ub, 0, len(ub)), R15) // @ ensures respr.OutPkt !== ub && respr.OutPkt != nil ==> -// @ sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() -func (p *scionPacketProcessor) processEPIC( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int @*/ ) (respr processResult, reserr error /*@ , addrAliasesPkt bool @*/) { +func (p *scionPacketProcessor) processEPIC( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int, ghost ioLock gpointer[gsync.GhostMutex], ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error /*@, ghost addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { // @ unfold acc(p.scionLayer.Mem(ub), R10) epicPath, ok := p.scionLayer.Path.(*epic.Path) if !ok { @@ -1835,7 +1841,7 @@ func (p *scionPacketProcessor) processEPIC( /*@ ghost ub []byte, ghost llIsNil b // @ p.scionLayer.DowngradePerm(ub) // @ establishMemMalformedPath() // @ fold p.d.validResult(respr, false) - return processResult{}, malformedPath /*@ , false @*/ + return processResult{}, malformedPath /*@ , false, io.IO_val_Unit{} @*/ } // @ ghost startP := p.scionLayer.PathStartIdx(ub) @@ -1849,7 +1855,7 @@ func (p *scionPacketProcessor) processEPIC( /*@ ghost ub []byte, ghost llIsNil b // @ p.scionLayer.DowngradePerm(ub) // @ establishMemMalformedPath() // @ fold p.d.validResult(respr, false) - return processResult{}, malformedPath /*@ , false @*/ + return processResult{}, malformedPath /*@ , false, io.IO_val_Unit{} @*/ } isPenultimate := p.path.IsPenultimateHop( /*@ ubPath[epic.MetadataLen:] @*/ ) @@ -1857,9 +1863,9 @@ func (p *scionPacketProcessor) processEPIC( /*@ ghost ub []byte, ghost llIsNil b // @ fold acc(epicPath.Mem(ubPath), R10) // @ fold acc(p.scionLayer.Mem(ub), R10) - result, err /*@ , addrAliases @*/ := p.process( /*@ ub, llIsNil, startLL, endLL @*/ ) + result, err /*@ , addrAliases, newAbsPkt @*/ := p.process( /*@ ub, llIsNil, startLL, endLL, ioLock, ioSharedArg, dp @*/ ) if err != nil { - return result, err /*@ , addrAliases @*/ + return result, err /*@ , addrAliases, newAbsPkt @*/ } // @ TODO() @@ -1868,7 +1874,7 @@ func (p *scionPacketProcessor) processEPIC( /*@ ghost ub []byte, ghost llIsNil b if err != nil { // @ p.scionLayer.DowngradePerm(ub) // @ fold p.d.validResult(respr, false) - return processResult{}, err /*@ , false @*/ + return processResult{}, err /*@ , false, io.IO_val_Unit{} @*/ } timestamp := time.Unix(int64(firstInfo.Timestamp), 0) @@ -1877,7 +1883,7 @@ func (p *scionPacketProcessor) processEPIC( /*@ ghost ub []byte, ghost llIsNil b // @ p.scionLayer.DowngradePerm(ub) // @ fold p.d.validResult(respr, false) // TODO(mawyss): Send back SCMP packet - return processResult{}, err /*@ , false @*/ + return processResult{}, err /*@ , false, io.IO_val_Unit{} @*/ } HVF := epicPath.PHVF @@ -1889,11 +1895,11 @@ func (p *scionPacketProcessor) processEPIC( /*@ ghost ub []byte, ghost llIsNil b if err != nil { // @ p.scionLayer.DowngradePerm(ub) // TODO(mawyss): Send back SCMP packet - return processResult{}, err /*@ , false @*/ + return processResult{}, err /*@ , false, io.IO_val_Unit{} @*/ } } - return result, nil /*@ , false @*/ + return result, nil /*@ , false, io.IO_val_Unit{} @*/ } // scionPacketProcessor processes packets. It contains pre-allocated per-packet @@ -2009,9 +2015,10 @@ func (p *scionPacketProcessor) packSCMP( // @ decreases func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte @*/ ) (respr processResult, reserr error) { var err error - - // @ unfold acc(p.scionLayer.Path.Mem(ubPath), R7) - // @ defer fold acc(p.scionLayer.Path.Mem(ubPath), R7) + // @ ghost ubPath := p.scionLayer.UBPath(ub) + // @ ghost ubScionPath := p.scionLayer.UBScionPath(ub) + // @ unfold acc(p.scionLayer.Path.Mem(ubScionPath), R7) + // @ defer fold acc(p.scionLayer.Path.Mem(ubScionPath), R7) // @ ghost startScionP := p.scionLayer.PathScionStartIdx(ub) // @ ghost endScionP := p.scionLayer.PathScionEndIdx(ub) // @ ghost ubScionPath := ub[startScionP:endScionP] @@ -2020,12 +2027,12 @@ func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte @*/ ) (respr proce // (VerifiedSCION) Due to an incompleteness (https://github.com/viperproject/gobra/issues/770), // we introduce a temporary variable to be able to call `path.AbsMacArrayCongruence()`. var tmpHopField path.HopField - tmpHopField, err = p.path.GetCurrentHopField( /*@ ubPath @*/ ) + tmpHopField, err = p.path.GetCurrentHopField( /*@ ubScionPath @*/ ) p.hopField = tmpHopField // @ path.AbsMacArrayCongruence(p.hopField.Mac, tmpHopField.Mac) // @ assert p.hopField.ToIO_HF() == tmpHopField.ToIO_HF() - // @ assert err == nil ==> reveal p.path.CorrectlyDecodedHf(ubPath, tmpHopField) - // @ assert err == nil ==> reveal p.path.CorrectlyDecodedHf(ubPath, p.hopField) + // @ assert err == nil ==> reveal p.path.CorrectlyDecodedHf(ubScionPath, tmpHopField) + // @ assert err == nil ==> reveal p.path.CorrectlyDecodedHf(ubScionPath, p.hopField) // @ fold p.d.validResult(processResult{}, false) if err != nil { // TODO(lukedirtwalker) parameter problem invalid path? @@ -2041,7 +2048,7 @@ func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte @*/ ) (respr proce // (VerifiedSCION) The version verified here is prior to the support of peering // links, so we do not check the Peering flag here. hasSingletonSegment := - // @ unfolding acc(p.path.Mem(ubPath), _) in + // @ unfolding acc(p.path.Mem(ubScionPath), _) in // @ unfolding acc(p.path.Base.Mem(), _) in p.path.PathMeta.SegLen[0] == 1 || p.path.PathMeta.SegLen[1] == 1 || @@ -2050,18 +2057,18 @@ func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte @*/ ) (respr proce // @ establishMemMalformedPath() return processResult{}, malformedPath } - if !p.path.CurrINFMatchesCurrHF( /*@ ubPath @*/ ) { + if !p.path.CurrINFMatchesCurrHF( /*@ ubScionPath @*/ ) { // @ establishMemMalformedPath() return processResult{}, malformedPath } - // @ p.EstablishEqAbsHeader(ub, startP, endP) - // @ p.path.EstablishValidPktMetaHdr(ubPath) - // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP) + // @ p.EstablishEqAbsHeader(ub, startScionP, endScionP) + // @ p.path.EstablishValidPktMetaHdr(ubScionPath) + // @ p.SubSliceAbsPktToAbsPkt(ub, startScionP, endScionP) // @ absPktFutureLemma(ub) - // @ p.path.DecodingLemma(ubPath, p.infoField, p.hopField) - // @ assert reveal p.path.EqAbsInfoField(p.path.absPkt(ubPath), + // @ p.path.DecodingLemma(ubScionPath, p.infoField, p.hopField) + // @ assert reveal p.path.EqAbsInfoField(p.path.absPkt(ubScionPath), // @ p.infoField.ToAbsInfoField()) - // @ assert reveal p.path.EqAbsHopField(p.path.absPkt(ubPath), + // @ assert reveal p.path.EqAbsHopField(p.path.absPkt(ubScionPath), // @ p.hopField.ToIO_HF()) // @ assert reveal p.EqAbsHopField(absPkt(ub)) // @ assert reveal p.EqAbsInfoField(absPkt(ub)) @@ -2493,8 +2500,8 @@ func (p *scionPacketProcessor) validateEgressID( /*@ ghost oldPkt io.IO_pkt2, gh func (p *scionPacketProcessor) updateNonConsDirIngressSegID( /*@ ghost ub []byte @*/ ) (err error) { // @ ghost ubPath := p.scionLayer.UBPath(ub) // @ ghost ubScionPath := p.scionLayer.UBScionPath(ub) - // @ ghost startScion := p.scionLayer.PathScionStartIdx(ub) - // @ ghost endScion := p.scionLayer.PathScionEndIdx(ub) + // @ ghost startScionP := p.scionLayer.PathScionStartIdx(ub) + // @ ghost endScionP := p.scionLayer.PathScionEndIdx(ub) // @ unfold acc(p.scionLayer.Mem(ub), R20) // @ defer fold acc(p.scionLayer.Mem(ub), R20) @@ -2515,26 +2522,26 @@ func (p *scionPacketProcessor) updateNonConsDirIngressSegID( /*@ ghost ub []byte // @ old(io.upd_uinfo(path.AbsUInfoFromUint16(p.infoField.SegID), p.hopField.ToIO_HF())) // (VerifiedSCION) the following property is guaranteed by the type system, but Gobra cannot infer it yet // @ assume 0 <= p.path.GetCurrINF(ubScionPath) - // @ sl.SplitRange_Bytes(ub, start, end, HalfPerm) - // @ sl.SplitByIndex_Bytes(ub, 0, start, slayers.CmnHdrLen, R54) + // @ sl.SplitRange_Bytes(ub, startScionP, endScionP, HalfPerm) + // @ sl.SplitByIndex_Bytes(ub, 0, startScionP, slayers.CmnHdrLen, R54) // @ sl.Reslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) // @ slayers.IsSupportedPktSubslice(ub, slayers.CmnHdrLen) - // @ p.AbsPktToSubSliceAbsPkt(ub, start, end) - // @ p.scionLayer.ValidHeaderOffsetToSubSliceLemma(ub, start) - // @ sl.SplitRange_Bytes(ub, start, end, HalfPerm) + // @ p.AbsPktToSubSliceAbsPkt(ub, startScionP, endScionP) + // @ p.scionLayer.ValidHeaderOffsetToSubSliceLemma(ub, startScionP) + // @ sl.SplitRange_Bytes(ub, startScionP, endScionP, HalfPerm) if err := p.path.SetInfoField(p.infoField, int( /*@ unfolding acc(p.path.Mem(ubScionPath), R45) in (unfolding acc(p.path.Base.Mem(), R50) in @*/ p.path.PathMeta.CurrINF) /*@ ) , ubScionPath, @*/); err != nil { // @ sl.Unslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) - // @ sl.CombineAtIndex_Bytes(ub, 0, start, slayers.CmnHdrLen, R54) - // @ ghost sl.CombineRange_Bytes(ub, start, end, writePerm) + // @ sl.CombineAtIndex_Bytes(ub, 0, startScionP, slayers.CmnHdrLen, R54) + // @ ghost sl.CombineRange_Bytes(ub, startScionP, endScionP, writePerm) return serrors.WrapStr("update info field", err) } - // @ ghost sl.CombineRange_Bytes(ub, start, end, HalfPerm) + // @ ghost sl.CombineRange_Bytes(ub, startScionP, endScionP, HalfPerm) // @ slayers.IsSupportedPktSubslice(ub, slayers.CmnHdrLen) // @ sl.Unslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) - // @ sl.CombineAtIndex_Bytes(ub, 0, start, slayers.CmnHdrLen, R54) - // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, start) - // @ p.SubSliceAbsPktToAbsPkt(ub, start, end) - // @ ghost sl.CombineRange_Bytes(ub, start, end, HalfPerm) + // @ sl.CombineAtIndex_Bytes(ub, 0, startScionP, slayers.CmnHdrLen, R54) + // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, startScionP) + // @ p.SubSliceAbsPktToAbsPkt(ub, startScionP, endScionP) + // @ ghost sl.CombineRange_Bytes(ub, startScionP, endScionP, HalfPerm) // @ absPktFutureLemma(ub) // @ assert absPkt(ub).CurrSeg.UInfo == // @ old(io.upd_uinfo(path.AbsUInfoFromUint16(p.infoField.SegID), p.hopField.ToIO_HF())) @@ -2785,7 +2792,7 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr // @ absPktFutureLemma(ub) // @ assert absPkt(ub) == reveal AbsProcessEgress(old(absPkt(ub))) // @ ghost if typeOf(p.scionLayer.Path) == *epic.Path { - // @ acc(fold p.scionLayer.Path.Mem(ubPath), 1-R55) + // @ fold acc(p.scionLayer.Path.Mem(ubPath), 1-R55) // @ } // @ fold acc(p.scionLayer.Mem(ub), 1-R55) return nil From 353786e3743f6ae3ba26fbdcc1129b6c55237aff Mon Sep 17 00:00:00 2001 From: mlimbeck Date: Wed, 24 Jul 2024 18:01:21 +0200 Subject: [PATCH 54/57] fix some verification errors --- router/dataplane.go | 113 +++++++++++++++++++----------------- router/io-spec-lemmas.gobra | 37 ++++++------ 2 files changed, 80 insertions(+), 70 deletions(-) diff --git a/router/dataplane.go b/router/dataplane.go index ca0ffa1e9..b6ca1c2c6 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -1588,13 +1588,16 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, // @ fold p.sInit() return v1, v2 /*@, addrAliasesPkt, newAbsPkt @*/ case epic.PathType: - // @ sl.CombineRange_Bytes(ub, start, end, writePerm) + // @ sl.CombineRange_Bytes(ub, start, end, HalfPerm) // @ ghost if lastLayerIdx >= 0 { // @ ghost if !offsets[lastLayerIdx].isNil { // @ o := offsets[lastLayerIdx] - // @ sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, writePerm) + // @ sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, HalfPerm) // @ } // @ } + // @ unfold acc(p.d.Mem(), _) + // @ assert reveal p.scionLayer.EqPathType(p.rawPkt) + // @ assert !(reveal slayers.IsSupportedPkt(p.rawPkt)) // @ assert sl.Bytes(p.rawPkt, 0, len(p.rawPkt)) v1, v2 /*@ , addrAliasesPkt, newAbsPkt @*/ := p.processEPIC( /*@ p.rawPkt, ub == nil, llStart, llEnd, ioLock, ioSharedArg, dp @*/ ) // @ ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, v2 == nil, hasHbhLayer, hasE2eLayer) @@ -1798,12 +1801,6 @@ func (p *scionPacketProcessor) processSCION( /*@ ghost ub []byte, ghost llIsNil // @ requires acc(&p.path) // @ requires p.scionLayer.Mem(ub) // @ requires sl.Bytes(ub, 0, len(ub)) -// pres for IO: -// @ requires p.d.DpAgreesWithSpec(dp) -// @ requires dp.Valid() -// @ requires p.scionLayer.EqAbsHeader(ub) && p.scionLayer.EqPathType(ub) && p.scionLayer.ValidScionInitSpec(ub) -// @ requires acc(ioLock.LockP(), _) -// @ requires ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> // @ preserves acc(&p.srcAddr, R10) && acc(p.srcAddr.Mem(), _) // @ preserves acc(&p.lastLayer, R10) // @ preserves p.lastLayer != nil @@ -1814,7 +1811,7 @@ func (p *scionPacketProcessor) processSCION( /*@ ghost ub []byte, ghost llIsNil // @ preserves acc(&p.ingressID, R20) // @ preserves acc(&p.infoField) // @ preserves acc(&p.hopField) -// @ preserves acc(&p.segmentChange) +// @ preserves acc(&p.segmentChange) && !p.segmentChange // @ preserves acc(&p.mac, R10) && p.mac != nil && p.mac.Mem() // @ preserves acc(&p.macBuffers.scionInput, R10) // @ preserves sl.Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) @@ -1833,7 +1830,20 @@ func (p *scionPacketProcessor) processSCION( /*@ ghost ub []byte, ghost llIsNil // @ ensures respr.OutPkt !== ub && respr.OutPkt != nil ==> // @ sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires p.scionLayer.EqPathType(p.rawPkt) +// @ requires !slayers.IsSupportedPkt(p.rawPkt) +// @ requires p.d.DpAgreesWithSpec(dp) +// @ requires dp.Valid() +// @ requires acc(ioLock.LockP(), _) +// @ requires ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> +// @ ensures (respr.OutPkt == nil) == (newAbsPkt == io.IO_val_Unit{}) +// @ ensures respr.OutPkt != nil ==> +// @ newAbsPkt == absIO_val(respr.OutPkt, respr.EgressID) && +// @ newAbsPkt.isIO_val_Unsupported +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (p *scionPacketProcessor) processEPIC( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int, ghost ioLock gpointer[gsync.GhostMutex], ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error /*@, ghost addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { + // @ TODO() // @ unfold acc(p.scionLayer.Mem(ub), R10) epicPath, ok := p.scionLayer.Path.(*epic.Path) if !ok { @@ -1997,10 +2007,7 @@ func (p *scionPacketProcessor) packSCMP( // @ ensures p.path === p.scionLayer.GetScionPath(ub) // @ ensures acc(&p.hopField) && acc(&p.infoField) // @ ensures respr === processResult{} -// @ ensures reserr == nil ==> -// @ let ubPath := p.scionLayer.UBPath(ub) in -// @ unfolding acc(p.scionLayer.Mem(ub), R10) in -// @ p.path.GetBase(ubPath).Valid() +// @ ensures reserr == nil ==> p.scionLayer.ValidPathMetaData(ub) // @ ensures acc(p.scionLayer.Mem(ub), R6) // @ ensures p.d.validResult(respr, false) // @ ensures reserr != nil ==> reserr.ErrorMem() @@ -2015,13 +2022,14 @@ func (p *scionPacketProcessor) packSCMP( // @ decreases func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte @*/ ) (respr processResult, reserr error) { var err error + // @ unfold acc(p.scionLayer.Mem(ub), R6) + // @ defer fold acc(p.scionLayer.Mem(ub), R6) // @ ghost ubPath := p.scionLayer.UBPath(ub) - // @ ghost ubScionPath := p.scionLayer.UBScionPath(ub) - // @ unfold acc(p.scionLayer.Path.Mem(ubScionPath), R7) - // @ defer fold acc(p.scionLayer.Path.Mem(ubScionPath), R7) + // @ unfold acc(p.scionLayer.Path.Mem(ubPath), R7) + // @ defer fold acc(p.scionLayer.Path.Mem(ubPath), R7) // @ ghost startScionP := p.scionLayer.PathScionStartIdx(ub) // @ ghost endScionP := p.scionLayer.PathScionEndIdx(ub) - // @ ghost ubScionPath := ub[startScionP:endScionP] + // @ ghost ubScionPath := p.scionLayer.UBScionPath(ub) // @ sl.SplitRange_Bytes(ub, startScionP, endScionP, R2) // @ ghost defer sl.CombineRange_Bytes(ub, startScionP, endScionP, R2) // (VerifiedSCION) Due to an incompleteness (https://github.com/viperproject/gobra/issues/770), @@ -2164,11 +2172,11 @@ func (p *scionPacketProcessor) validateIngressID( /*@ ghost oldPkt io.IO_pkt2 @* } // @ requires acc(&p.d, R20) && acc(p.d.Mem(), _) -// @ requires acc(p.scionLayer.Mem(ubScionL), R19) +// @ requires acc(p.scionLayer.Mem(ub), R19) // @ requires acc(&p.path, R20) -// @ requires p.path === p.scionLayer.GetScionPath(ubScionL) +// @ requires p.path === p.scionLayer.GetScionPath(ub) // @ preserves acc(&p.ingressID, R21) -// @ ensures acc(p.scionLayer.Mem(ubScionL), R19) +// @ ensures acc(p.scionLayer.Mem(ub), R19) // @ ensures acc(&p.path, R20) // @ ensures acc(&p.d, R20) && acc(p.d.Mem(), _) // @ ensures p.d.validResult(respr, false) @@ -2176,33 +2184,33 @@ func (p *scionPacketProcessor) validateIngressID( /*@ ghost oldPkt io.IO_pkt2 @* // @ reserr != nil && sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() // contracts for IO-spec -// @ requires acc(sl.Bytes(ubScionL, 0, len(ubScionL)), R20) -// @ requires slayers.ValidPktMetaHdr(ubScionL) && p.scionLayer.EqAbsHeader(ubScionL) -// @ ensures acc(sl.Bytes(ubScionL, 0, len(ubScionL)), R20) -// @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ubScionL) -// @ ensures reserr == nil ==> p.DstIsLocalIngressID(ubScionL) -// @ ensures reserr == nil ==> p.LastHopLen(ubScionL) +// @ requires acc(sl.Bytes(ub, 0, len(ub)), R20) +// @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ ensures acc(sl.Bytes(ub, 0, len(ub)), R20) +// @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) +// @ ensures reserr == nil ==> p.DstIsLocalIngressID(ub) +// @ ensures reserr == nil ==> p.LastHopLen(ub) // @ ensures reserr != nil && respr.OutPkt != nil ==> // @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases -func (p *scionPacketProcessor) validateSrcDstIA( /*@ ghost ubScionL []byte @*/ ) (respr processResult, reserr error) { - // @ ghost ubPath := p.scionLayer.UBPath(ubScionL) - // @ unfold acc(p.scionLayer.Mem(ubScionL), R20) - // @ defer fold acc(p.scionLayer.Mem(ubScionL), R20) - // @ ghost startP := p.scionLayer.PathStartIdx(ubScionL) - // @ ghost endP := p.scionLayer.PathEndIdx(ubScionL) - // @ ghost ubScionPath := p.scionLayer.UBScionPath(ubScionL) - // @ sl.SplitRange_Bytes(ubScionL, startP, endP, R50) - // @ p.AbsPktToSubSliceAbsPkt(ubScionL, startP, endP) - // @ p.scionLayer.ValidHeaderOffsetToSubSliceLemma(ubScionL, startP) - // @ ghost defer sl.CombineRange_Bytes(ubScionL, startP, endP, R50) - // @ unfold acc(p.scionLayer.HeaderMem(ubScionL[slayers.CmnHdrLen:]), R20) - // @ defer fold acc(p.scionLayer.HeaderMem(ubScionL[slayers.CmnHdrLen:]), R20) - // @ p.d.getLocalIA() +func (p *scionPacketProcessor) validateSrcDstIA( /*@ ghost ub []byte @*/ ) (respr processResult, reserr error) { + // @ ghost ubPath := p.scionLayer.UBPath(ub) + // @ unfold acc(p.scionLayer.Mem(ub), R20) + // @ defer fold acc(p.scionLayer.Mem(ub), R20) // @ ghost if typeOf(p.scionLayer.Path) == *epic.Path { // @ unfold acc(p.scionLayer.Path.Mem(ubPath), R20) // @ defer fold acc(p.scionLayer.Path.Mem(ubPath), R20) // @ } + // @ ghost startScionP := p.scionLayer.PathScionStartIdx(ub) + // @ ghost endScionP := p.scionLayer.PathScionEndIdx(ub) + // @ ghost ubScionPath := p.scionLayer.UBScionPath(ub) + // @ sl.SplitRange_Bytes(ub, startScionP, endScionP, R50) + // @ p.AbsPktToSubSliceAbsPkt(ub, startScionP, endScionP) + // @ p.scionLayer.ValidHeaderOffsetToSubSliceLemma(ub, startScionP) + // @ ghost defer sl.CombineRange_Bytes(ub, startScionP, endScionP, R50) + // @ unfold acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R20) + // @ defer fold acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R20) + // @ p.d.getLocalIA() srcIsLocal := (p.scionLayer.SrcIA == p.d.localIA) dstIsLocal := (p.scionLayer.DstIA == p.d.localIA) if p.ingressID == 0 { @@ -2230,20 +2238,20 @@ func (p *scionPacketProcessor) validateSrcDstIA( /*@ ghost ubScionL []byte @*/ ) } // @ ghost if(p.path.IsLastHopSpec(ubPath)) { // @ p.path.LastHopLemma(ubPath) - // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ubScionL, startP) - // @ p.SubSliceAbsPktToAbsPkt(ubScionL, startP, endP) + // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, startScionP) + // @ p.SubSliceAbsPktToAbsPkt(ub, startScionP, endScionP) // @ } } // @ fold p.d.validResult(processResult{}, false) - // @ assert (unfolding acc(p.scionLayer.Mem(ubScionL), R55) in - // @ (unfolding acc(p.scionLayer.HeaderMem(ubScionL[slayers.CmnHdrLen:]), R55) in + // @ assert (unfolding acc(p.scionLayer.Mem(ub), R55) in + // @ (unfolding acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R55) in // @ p.scionLayer.DstIA) == (unfolding acc(p.d.Mem(), _) in p.d.localIA)) ==> p.ingressID != 0 - // @ assert (unfolding acc(p.scionLayer.Mem(ubScionL), R55) in - // @ (unfolding acc(p.scionLayer.HeaderMem(ubScionL[slayers.CmnHdrLen:]), R55) in + // @ assert (unfolding acc(p.scionLayer.Mem(ub), R55) in + // @ (unfolding acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R55) in // @ p.scionLayer.DstIA) == (unfolding acc(p.d.Mem(), _) in p.d.localIA)) ==> p.path.IsLastHopSpec(ubPath) - // @ assert reveal p.DstIsLocalIngressID(ubScionL) - // @ assert reveal p.LastHopLen(ubScionL) + // @ assert reveal p.DstIsLocalIngressID(ub) + // @ assert reveal p.LastHopLen(ub) return processResult{}, nil } @@ -2281,9 +2289,7 @@ func (p *scionPacketProcessor) invalidDstIA() (processResult, error) { // @ requires p.path === p.scionLayer.GetScionPath(ub) // @ requires acc(&p.ingressID, R21) // @ requires acc(&p.infoField, R4) && acc(&p.hopField, R4) -// @ requires let ubPath := p.scionLayer.UBPath(ub) in -// @ unfolding acc(p.scionLayer.Mem(ub), R10) in -// @ p.path.GetBase(ubPath).Valid() +// @ requires p.scionLayer.ValidPathMetaData(ub) // @ requires acc(&p.d, R20) && acc(p.d.Mem(), _) // @ requires acc(&p.srcAddr, R20) && acc(p.srcAddr.Mem(), _) // @ preserves acc(sl.Bytes(ub, 0, len(ub)), R4) @@ -2727,7 +2733,6 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr // @ unfold acc(p.scionLayer.Mem(ub), 1-R55) // @ ghost if typeOf(p.scionLayer.Path) == *epic.Path { - // TODO: Markus two unfolds neccessary? // @ unfold acc(p.scionLayer.Path.Mem(ubPath), R55) // @ } // @ sl.SplitRange_Bytes(ub, startScionP, endScionP, HalfPerm) @@ -2742,7 +2747,6 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr // @ reveal p.scionLayer.ValidHeaderOffset(ub, startScionP) // @ unfold acc(p.scionLayer.Mem(ub), R55) // @ ghost if typeOf(p.scionLayer.Path) == *epic.Path { - // TODO: Markus two unfolds neccessary? // @ unfold acc(p.scionLayer.Path.Mem(ubPath), R55) // @ } // we are the egress router and if we go in construction direction we @@ -2780,6 +2784,9 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr // TODO parameter problem invalid path return serrors.WrapStr("incrementing path", err) } + // @ ghost if typeOf(p.scionLayer.Path) == *epic.Path { + // @ fold acc(p.scionLayer.Path.Mem(ubPath), R55) + // @ } // @ fold acc(p.scionLayer.Mem(ub), R55) // @ assert reveal p.scionLayer.ValidHeaderOffset(ub, startScionP) // @ ghost sl.CombineRange_Bytes(ub, startScionP, endScionP, HalfPerm) diff --git a/router/io-spec-lemmas.gobra b/router/io-spec-lemmas.gobra index ba26a992a..2d38dc9cc 100644 --- a/router/io-spec-lemmas.gobra +++ b/router/io-spec-lemmas.gobra @@ -20,6 +20,7 @@ import ( "sync" "github.com/scionproto/scion/pkg/slayers/path" "github.com/scionproto/scion/pkg/slayers/path/scion" + "github.com/scionproto/scion/pkg/slayers/path/epic" "github.com/scionproto/scion/pkg/slayers" "verification/dependencies/encoding/binary" io "verification/io" @@ -154,12 +155,14 @@ func (p* scionPacketProcessor) LocalDstLemma(ub []byte) { ghost requires acc(p.scionLayer.Mem(ub), R55) -requires acc(&p.path, R55) && p.path == p.scionLayer.GetPath(ub) +requires acc(&p.path, R55) && p.path == p.scionLayer.GetScionPath(ub) decreases pure func (p* scionPacketProcessor) GetIsXoverSpec(ub []byte) bool { return let ubPath := p.scionLayer.UBPath(ub) in + let ubScionPath := p.scionLayer.UBScionPath(ub) in unfolding acc(p.scionLayer.Mem(ub), R55) in - p.path.GetBase(ubPath).IsXoverSpec() + unfolding acc(p.scionLayer.Path.Mem(ubPath), _) in + p.path.GetBase(ubScionPath).IsXoverSpec() } ghost @@ -212,17 +215,17 @@ requires acc(p.scionLayer.Mem(ub), R55) requires acc(sl.Bytes(ub, 0, len(ub)), R50) requires acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R50) requires acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) -requires p.path === p.scionLayer.GetPath(ub) -requires start == p.scionLayer.PathStartIdx(ub) -requires end == p.scionLayer.PathEndIdx(ub) +requires p.path === p.scionLayer.GetScionPath(ub) +requires start == p.scionLayer.PathScionStartIdx(ub) +requires end == p.scionLayer.PathScionEndIdx(ub) requires p.scionLayer.EqAbsHeader(ub) requires p.scionLayer.ValidScionInitSpec(ub) ensures acc(sl.Bytes(ub, 0, len(ub)), R50) ensures acc(p.scionLayer.Mem(ub), R55) ensures acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R50) ensures acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) -ensures start == p.scionLayer.PathStartIdx(ub) -ensures end == p.scionLayer.PathEndIdx(ub) +ensures start == p.scionLayer.PathScionStartIdx(ub) +ensures end == p.scionLayer.PathScionEndIdx(ub) ensures p.path.GetBase(ub[start:end]).EqAbsHeader(ub[start:end]) ensures p.path.GetBase(ub[start:end]).WeaklyValid() ensures p.scionLayer.ValidHeaderOffset(ub, len(ub)) @@ -248,10 +251,10 @@ requires acc(p.scionLayer.Mem(ub), R55) requires acc(sl.Bytes(ub, 0, len(ub)), R50) requires acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R50) requires acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) -requires p.path === p.scionLayer.GetPath(ub) +requires p.path === p.scionLayer.GetScionPath(ub) requires slayers.ValidPktMetaHdr(ub) -requires start == p.scionLayer.PathStartIdx(ub) -requires end == p.scionLayer.PathEndIdx(ub) +requires start == p.scionLayer.PathScionStartIdx(ub) +requires end == p.scionLayer.PathScionEndIdx(ub) requires p.scionLayer.EqAbsHeader(ub) ensures acc(sl.Bytes(ub, 0, len(ub)), R50) ensures acc(p.scionLayer.Mem(ub), R55) @@ -259,8 +262,8 @@ ensures acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R50) ensures acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) ensures slayers.ValidPktMetaHdr(ub) ensures p.scionLayer.EqAbsHeader(ub) -ensures start == p.scionLayer.PathStartIdx(ub) -ensures end == p.scionLayer.PathEndIdx(ub) +ensures start == p.scionLayer.PathScionStartIdx(ub) +ensures end == p.scionLayer.PathScionEndIdx(ub) ensures scion.validPktMetaHdr(ub[start:end]) ensures p.path.GetBase(ub[start:end]).EqAbsHeader(ub[start:end]) ensures p.scionLayer.ValidHeaderOffset(ub, len(ub)) @@ -310,10 +313,10 @@ requires acc(p.scionLayer.Mem(ub), R55) requires acc(sl.Bytes(ub, 0, len(ub)), R50) requires acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R50) requires acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) -requires p.path === p.scionLayer.GetPath(ub) +requires p.path === p.scionLayer.GetScionPath(ub) requires scion.validPktMetaHdr(ub[start:end]) -requires start == p.scionLayer.PathStartIdx(ub) -requires end == p.scionLayer.PathEndIdx(ub) +requires start == p.scionLayer.PathScionStartIdx(ub) +requires end == p.scionLayer.PathScionEndIdx(ub) requires p.path.GetBase(ub[start:end]).EqAbsHeader(ub[start:end]) requires p.scionLayer.ValidHeaderOffset(ub, len(ub)) ensures acc(sl.Bytes(ub, 0, len(ub)), R50) @@ -321,8 +324,8 @@ ensures acc(p.scionLayer.Mem(ub), R55) ensures acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R50) ensures acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) ensures slayers.ValidPktMetaHdr(ub) -ensures start == p.scionLayer.PathStartIdx(ub) -ensures end == p.scionLayer.PathEndIdx(ub) +ensures start == p.scionLayer.PathScionStartIdx(ub) +ensures end == p.scionLayer.PathScionEndIdx(ub) ensures scion.validPktMetaHdr(ub[start:end]) ensures p.scionLayer.EqAbsHeader(ub) ensures absPkt(ub) == p.path.absPkt(ub[start:end]) From 70bd6bda299715a79f9424f726305539380cd44a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Thu, 25 Jul 2024 16:04:27 +0200 Subject: [PATCH 55/57] make `hf_valid` opaque (#372) * make hf_valid opaque * backup * backup * might have to reverse this * backup * backup * Update router/io-spec-abstract-transitions.gobra --- .../github.com/google/gopacket/base.gobra | 4 +- verification/io/dataplane_abstract.gobra | 12 ++-- verification/io/other_defs.gobra | 1 + verification/io/router.gobra | 61 +++++++++++++------ 4 files changed, 52 insertions(+), 26 deletions(-) diff --git a/verification/dependencies/github.com/google/gopacket/base.gobra b/verification/dependencies/github.com/google/gopacket/base.gobra index 6a55f340e..9c0ae5c81 100644 --- a/verification/dependencies/github.com/google/gopacket/base.gobra +++ b/verification/dependencies/github.com/google/gopacket/base.gobra @@ -60,8 +60,8 @@ ensures len(res) != 0 ==> res === ub[start:end] decreases func (p Payload) Payload(ghost ub []byte) (res []byte, ghost start int, ghost end int) { res = []byte(p) - assert unfolding acc(p.Mem(ub), R20) in true - return res, 0, len(p) + assert unfolding acc(p.Mem(ub), R20) in true + return res, 0, len(p) } requires b != nil && b.Mem() diff --git a/verification/io/dataplane_abstract.gobra b/verification/io/dataplane_abstract.gobra index ba717326a..e78c901e9 100644 --- a/verification/io/dataplane_abstract.gobra +++ b/verification/io/dataplane_abstract.gobra @@ -21,16 +21,16 @@ package io // to the interface y of AS a2. type DataPlaneSpec adt { DataPlaneSpec_{ - linkTypes dict[IO_ifs]IO_Link - neighborIAs dict[IO_ifs]IO_as - localIA IO_as - links dict[AsIfsPair]AsIfsPair + linkTypes dict[IO_ifs]IO_Link + neighborIAs dict[IO_ifs]IO_as + localIA IO_as + links dict[AsIfsPair]AsIfsPair } } type AsIfsPair struct { - asid IO_as - ifs IO_ifs + asid IO_as + ifs IO_ifs } ghost diff --git a/verification/io/other_defs.gobra b/verification/io/other_defs.gobra index a5c48ccae..7f600a457 100644 --- a/verification/io/other_defs.gobra +++ b/verification/io/other_defs.gobra @@ -200,6 +200,7 @@ type IO_dp2_state adt { } ghost +opaque decreases pure func (m IO_msgterm) extract_asid() IO_as { return m.MsgTerm_Hash_.MsgTerm_MPair_1.MsgTerm_Key_.Key_macK_ diff --git a/verification/io/router.gobra b/verification/io/router.gobra index f1741bed5..533bdee8d 100644 --- a/verification/io/router.gobra +++ b/verification/io/router.gobra @@ -27,47 +27,72 @@ pure func if2term(ifs option[IO_ifs]) IO_msgterm { case none[IO_ifs]: MsgTerm_Empty{} default: - IO_msgterm(MsgTerm_AS{IO_as(get(ifs))}) + MsgTerm_AS{IO_as(get(ifs))} } } ghost decreases pure func (dp DataPlaneSpec) hf_valid(d bool, ts uint, uinfo set[IO_msgterm], hf IO_HF) bool { + return hf_valid_impl(dp.Asid(), ts, uinfo, hf) +} + +ghost +decreases +pure func hf_valid_impl(asid IO_as, ts uint, uinfo set[IO_msgterm], hf IO_HF) bool { return let inif := hf.InIF2 in let egif := hf.EgIF2 in - let x := hf.HVF in - let l := IO_msgterm(MsgTerm_L{ - seq[IO_msgterm]{ - IO_msgterm(MsgTerm_Num{ts}), - if2term(inif), - if2term(egif), - IO_msgterm(MsgTerm_FS{uinfo})}}) in - x == mac(macKey(asidToKey(dp.Asid())), l) + let hvf := hf.HVF in + let next := nextMsgtermSpec(asid, inif, egif, ts, uinfo) in + hvf == next +} + +ghost +opaque +ensures result.extract_asid() == asid +decreases +pure func nextMsgtermSpec(asid IO_as, inif option[IO_ifs], egif option[IO_ifs], ts uint, uinfo set[IO_msgterm]) (result IO_msgterm) { + return let l := plaintextToMac(inif, egif, ts, uinfo) in + let res := mac(macKey(asidToKey(asid)), l) in + let _ := reveal res.extract_asid() in + res +} + +ghost +decreases +pure func plaintextToMac(inif option[IO_ifs], egif option[IO_ifs], ts uint, uinfo set[IO_msgterm]) IO_msgterm { + return MsgTerm_L { + seq[IO_msgterm]{ + MsgTerm_Num{ts}, + if2term(inif), + if2term(egif), + MsgTerm_FS{uinfo}, + }, + } } ghost decreases pure func macKey(key IO_key) IO_msgterm { - return IO_msgterm(MsgTerm_Key{key}) + return MsgTerm_Key{key} } ghost decreases pure func mac(fst IO_msgterm, snd IO_msgterm) IO_msgterm { - return IO_msgterm( MsgTerm_Hash { - MsgTerm_Hash_ : IO_msgterm( MsgTerm_MPair{ - MsgTerm_MPair_1 : fst, - MsgTerm_MPair_2 : snd, - }), - }) + return MsgTerm_Hash { + MsgTerm_Hash_: MsgTerm_MPair { + MsgTerm_MPair_1: fst, + MsgTerm_MPair_2: snd, + }, + } } // helper function, not defined in IO spec ghost decreases -pure func asidToKey(asid IO_as) IO_key{ - return IO_key(Key_macK{asid}) +pure func asidToKey(asid IO_as) IO_key { + return Key_macK{asid} } ghost From 76b066125337d98f5a65b9edea3901329a2471ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Fri, 26 Jul 2024 10:28:34 +0200 Subject: [PATCH 56/57] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index a1c68552b..07834b485 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # VerifiedSCION -This package contains the **verified** implementation of the +This package contains the **verified** implementation of the router from the [SCION](http://www.scion-architecture.net) protocol, a future Internet architecture. SCION is the first clean-slate Internet architecture designed to provide route control, failure @@ -10,7 +10,7 @@ isolation, and explicit trust information for end-to-end communication. To find out more about the project, please visit the [official project page](https://www.pm.inf.ethz.ch/research/verifiedscion.html). -> We are currently in the process of migrating the specifications and other annotations from the [original VerifiedSCION repository](https://github.com/jcp19/VerifiedSCION) to this one. This repository contains an up-to-date version of SCION (which we plan to keep updated), as well as improvements resulting from our experience from our first efforts on verifying SCION. +> This repository contains a recent version of SCION (which we plan to keep updated), as well as fixes to the bugs we report as a result of verifying the SCION router from the mainline SCION repository. ## Methodology We focus on verifying the main implementation of SCION, written in the *Go* programming language. From 22bf818d3770889f5698b7402e4f407355966569 Mon Sep 17 00:00:00 2001 From: mlimbeck Date: Mon, 12 Aug 2024 19:34:01 +0200 Subject: [PATCH 57/57] progress with verification errors --- router/dataplane.go | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/router/dataplane.go b/router/dataplane.go index b6ca1c2c6..22d1cd639 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -2236,8 +2236,8 @@ func (p *scionPacketProcessor) validateSrcDstIA( /*@ ghost ub []byte @*/ ) (resp // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP return p.invalidDstIA() } - // @ ghost if(p.path.IsLastHopSpec(ubPath)) { - // @ p.path.LastHopLemma(ubPath) + // @ ghost if(p.path.IsLastHopSpec(ubScionPath)) { + // @ p.path.LastHopLemma(ubScionPath) // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, startScionP) // @ p.SubSliceAbsPktToAbsPkt(ub, startScionP, endScionP) // @ } @@ -2249,7 +2249,7 @@ func (p *scionPacketProcessor) validateSrcDstIA( /*@ ghost ub []byte @*/ ) (resp // @ p.scionLayer.DstIA) == (unfolding acc(p.d.Mem(), _) in p.d.localIA)) ==> p.ingressID != 0 // @ assert (unfolding acc(p.scionLayer.Mem(ub), R55) in // @ (unfolding acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R55) in - // @ p.scionLayer.DstIA) == (unfolding acc(p.d.Mem(), _) in p.d.localIA)) ==> p.path.IsLastHopSpec(ubPath) + // @ p.scionLayer.DstIA) == (unfolding acc(p.d.Mem(), _) in p.d.localIA)) ==> p.path.IsLastHopSpec(ubScionPath) // @ assert reveal p.DstIsLocalIngressID(ub) // @ assert reveal p.LastHopLen(ub) return processResult{}, nil @@ -2713,7 +2713,7 @@ func (p *scionPacketProcessor) resolveInbound( /*@ ghost ubScionL []byte @*/ ) ( // @ ensures sl.Bytes(ub, 0, len(ub)) // @ ensures acc(&p.path, R20) // @ ensures reserr == nil ==> p.scionLayer.Mem(ub) -// @ ensures reserr == nil ==> p.path == p.scionLayer.GetScionPath(ub) // TODO: (Markus) check if neccessary? +// @ ensures reserr == nil ==> p.path == p.scionLayer.GetScionPath(ub) // @ ensures reserr != nil ==> p.scionLayer.NonInitMem() // @ ensures reserr != nil ==> reserr.ErrorMem() // Postconditions for IO: @@ -2733,7 +2733,7 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr // @ unfold acc(p.scionLayer.Mem(ub), 1-R55) // @ ghost if typeOf(p.scionLayer.Path) == *epic.Path { - // @ unfold acc(p.scionLayer.Path.Mem(ubPath), R55) + // @ unfold acc(p.scionLayer.Path.Mem(ubPath), 1-R55) // @ } // @ sl.SplitRange_Bytes(ub, startScionP, endScionP, HalfPerm) // @ sl.SplitByIndex_Bytes(ub, 0, startScionP, slayers.CmnHdrLen, R54) @@ -2816,7 +2816,12 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr // @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) // @ requires p.GetIsXoverSpec(ub) // @ requires let ubPath := p.scionLayer.UBPath(ub) in -// @ (unfolding acc(p.scionLayer.Mem(ub), _) in p.path.GetBase(ubPath)) == currBase +// @ let ubScionPath := p.scionLayer.UBScionPath(ub) in +// @ (unfolding acc(p.scionLayer.Mem(ub), _) in +// @ typeOf(p.scionLayer.Path) == *epic.Path ? +// @ (unfolding acc(p.scionLayer.Path.Mem(ubPath), _) in +// @ p.path.GetBase(ubScionPath)) == currBase : +// @ p.path.GetBase(ubScionPath) == currBase) // @ requires currBase.Valid() // @ ensures acc(&p.segmentChange) // @ ensures acc(&p.hopField) @@ -2841,11 +2846,15 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr // @ ensures reserr == nil ==> p.EqAbsInfoField(absPkt(ub)) // @ ensures reserr == nil ==> absPkt(ub) == AbsDoXover(old(absPkt(ub))) // @ ensures reserr == nil ==> old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub) -// @ ensures reserr == nil ==> -// @ let ubPath := p.scionLayer.UBPath(ub) in +// @ ensures reserr == nil ==> +// @ let ubPath := p.scionLayer.UBPath(ub) in +// @ let ubScionPath := p.scionLayer.UBScionPath(ub) in // @ (unfolding acc(p.scionLayer.Mem(ub), _) in -// @ p.path === p.scionLayer.GetScionPath(ub) && -// @ p.path.GetBase(ubPath) == currBase.IncPathSpec() && +// @ p.path == p.scionLayer.GetScionPath(ub) && +// @ (typeOf(p.scionLayer.Path) == *epic.Path ? +// @ (unfolding acc(p.scionLayer.Path.Mem(ubPath), _) in +// @ p.path.GetBase(ubScionPath)) == currBase.IncPathSpec() : +// @ p.path.GetBase(ubScionPath) == currBase.IncPathSpec()) && // @ currBase.IncPathSpec().Valid()) // @ decreases func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte, ghost currBase scion.Base @*/ ) (respr processResult, reserr error) { @@ -2868,7 +2877,7 @@ func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte, ghost currBase scio // @ slayers.IsSupportedPktSubslice(ub, slayers.CmnHdrLen) // @ p.AbsPktToSubSliceAbsPkt(ub, startScionP, endScionP) // @ p.scionLayer.ValidHeaderOffsetToSubSliceLemma(ub, startScionP) - // @ p.path.XoverLemma(ubPath) + // @ p.path.XoverLemma(ubScionPath) // @ reveal p.EqAbsInfoField(absPkt(ub)) // @ reveal p.EqAbsHopField(absPkt(ub)) // @ sl.SplitRange_Bytes(ub, startScionP, endScionP, HalfPerm)