forked from fortuna/ss-example
-
Notifications
You must be signed in to change notification settings - Fork 189
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #43 from Jigsaw-Code/bemasc-replay-min
Add a minimal replay defense
- Loading branch information
Showing
9 changed files
with
454 additions
and
52 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,102 @@ | ||
// Copyright 2020 Jigsaw Operations LLC | ||
// | ||
// Licensed under the Apache License, Version 2.0 (the "License"); | ||
// you may not use this file except in compliance with the License. | ||
// You may obtain a copy of the License at | ||
// | ||
// https://www.apache.org/licenses/LICENSE-2.0 | ||
// | ||
// Unless required by applicable law or agreed to in writing, software | ||
// distributed under the License is distributed on an "AS IS" BASIS, | ||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
// See the License for the specific language governing permissions and | ||
// limitations under the License. | ||
|
||
package shadowsocks | ||
|
||
import ( | ||
"encoding/binary" | ||
"sync" | ||
) | ||
|
||
// MaxCapacity is the largest allowed size of ReplayCache. | ||
// | ||
// Capacities in excess of 20,000 are not recommended, due to the false | ||
// positive rate of up to 2 * capacity / 2^32 = 1 / 100,000. If larger | ||
// capacities are desired, the key type should be changed to uint64. | ||
const MaxCapacity = 20_000 | ||
|
||
type empty struct{} | ||
|
||
// ReplayCache allows us to check whether a handshake salt was used within | ||
// the last `capacity` handshakes. It requires approximately 20*capacity | ||
// bytes of memory (as measured by BenchmarkReplayCache_Creation). | ||
// | ||
// The nil and zero values represent a cache with capacity 0, i.e. no cache. | ||
type ReplayCache struct { | ||
mutex sync.Mutex | ||
capacity int | ||
active map[uint32]empty | ||
archive map[uint32]empty | ||
} | ||
|
||
// NewReplayCache returns a fresh ReplayCache that promises to remember at least | ||
// the most recent `capacity` handshakes. | ||
func NewReplayCache(capacity int) ReplayCache { | ||
if capacity > MaxCapacity { | ||
panic("ReplayCache capacity would result in too many false positives") | ||
} | ||
return ReplayCache{ | ||
capacity: capacity, | ||
active: make(map[uint32]empty, capacity), | ||
// `archive` is read-only and initially empty. | ||
} | ||
} | ||
|
||
// Trivially reduces the key and salt to a uint32, avoiding collisions | ||
// in case of salts with a shared prefix or suffix. Salts are normally | ||
// random, but in principle a client might use a counter instead, so | ||
// using only the prefix or suffix is not sufficient. Including the key | ||
// ID in the hash avoids accidental collisions when the same salt is used | ||
// by different access keys, as might happen in the case of a counter. | ||
// | ||
// Secure hashing is not required, because only authenticated handshakes | ||
// are added to the cache. A hostile client could produce colliding salts, | ||
// but this would not impact other users. Each map uses a new random hash | ||
// function, so it is not trivial for a hostile client to mount an | ||
// algorithmic complexity attack with nearly-colliding hashes: | ||
// https://dave.cheney.net/2018/05/29/how-the-go-runtime-implements-maps-efficiently-without-generics | ||
func preHash(id string, salt []byte) uint32 { | ||
buf := [4]byte{} | ||
for i := 0; i < len(id); i++ { | ||
buf[i&0x3] ^= id[i] | ||
} | ||
for i, v := range salt { | ||
buf[i&0x3] ^= v | ||
} | ||
return binary.BigEndian.Uint32(buf[:]) | ||
} | ||
|
||
// Add a handshake with this key ID and salt to the cache. | ||
// Returns false if it is already present. | ||
func (c *ReplayCache) Add(id string, salt []byte) bool { | ||
if c == nil || c.capacity == 0 { | ||
// Cache is disabled, so every salt is new. | ||
return true | ||
} | ||
hash := preHash(id, salt) | ||
c.mutex.Lock() | ||
defer c.mutex.Unlock() | ||
if _, ok := c.active[hash]; ok { | ||
// Fast replay: `salt` is already in the active set. | ||
return false | ||
} | ||
_, inArchive := c.archive[hash] | ||
if len(c.active) == c.capacity { | ||
// Discard the archive and move active to archive. | ||
c.archive = c.active | ||
c.active = make(map[uint32]empty, c.capacity) | ||
} | ||
c.active[hash] = empty{} | ||
return !inArchive | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,137 @@ | ||
// Copyright 2020 Jigsaw Operations LLC | ||
// | ||
// Licensed under the Apache License, Version 2.0 (the "License"); | ||
// you may not use this file except in compliance with the License. | ||
// You may obtain a copy of the License at | ||
// | ||
// https://www.apache.org/licenses/LICENSE-2.0 | ||
// | ||
// Unless required by applicable law or agreed to in writing, software | ||
// distributed under the License is distributed on an "AS IS" BASIS, | ||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
// See the License for the specific language governing permissions and | ||
// limitations under the License. | ||
|
||
package shadowsocks | ||
|
||
import ( | ||
"encoding/binary" | ||
"testing" | ||
) | ||
|
||
const keyID = "the key" | ||
|
||
var counter uint32 = 0 | ||
|
||
func makeSalts(n int) [][]byte { | ||
salts := make([][]byte, n) | ||
for i := 0; i < n; i++ { | ||
salts[i] = make([]byte, 4) | ||
binary.BigEndian.PutUint32(salts[i], counter) | ||
counter++ | ||
if counter == 0 { | ||
panic("Salt counter overflow") | ||
} | ||
} | ||
return salts | ||
} | ||
|
||
func TestReplayCache_Active(t *testing.T) { | ||
salts := makeSalts(2) | ||
cache := NewReplayCache(10) | ||
if !cache.Add(keyID, salts[0]) { | ||
t.Error("First addition to a clean cache should succeed") | ||
} | ||
if cache.Add(keyID, salts[0]) { | ||
t.Error("Duplicate add should fail") | ||
} | ||
if !cache.Add(keyID, salts[1]) { | ||
t.Error("Addition of a new vector should succeed") | ||
} | ||
if cache.Add(keyID, salts[1]) { | ||
t.Error("Second duplicate add should fail") | ||
} | ||
} | ||
|
||
func TestReplayCache_Archive(t *testing.T) { | ||
salts0 := makeSalts(10) | ||
salts1 := makeSalts(10) | ||
cache := NewReplayCache(10) | ||
// Add vectors to the active set until it hits the limit | ||
// and spills into the archive. | ||
for _, s := range salts0 { | ||
if !cache.Add(keyID, s) { | ||
t.Error("Addition of a new vector should succeed") | ||
} | ||
} | ||
|
||
for _, s := range salts0 { | ||
if cache.Add(keyID, s) { | ||
t.Error("Duplicate add should fail") | ||
} | ||
} | ||
|
||
// Repopulate the active set. | ||
for _, s := range salts1 { | ||
if !cache.Add(keyID, s) { | ||
t.Error("Addition of a new vector should succeed") | ||
} | ||
} | ||
|
||
// Both active and archive are full. Adding another vector | ||
// should wipe the archive. | ||
lastStraw := makeSalts(1)[0] | ||
if !cache.Add(keyID, lastStraw) { | ||
t.Error("Addition of a new vector should succeed") | ||
} | ||
for _, s := range salts0 { | ||
if !cache.Add(keyID, s) { | ||
t.Error("First 10 vectors should have been forgotten") | ||
} | ||
} | ||
} | ||
|
||
// Benchmark to determine the memory usage of ReplayCache. | ||
// Note that NewReplayCache only allocates the active set, | ||
// so the eventual memory usage will be roughly double. | ||
func BenchmarkReplayCache_Creation(b *testing.B) { | ||
for i := 0; i < b.N; i++ { | ||
NewReplayCache(MaxCapacity) | ||
} | ||
} | ||
|
||
func BenchmarkReplayCache_Max(b *testing.B) { | ||
salts := makeSalts(b.N) | ||
// Archive replacements will be infrequent. | ||
cache := NewReplayCache(MaxCapacity) | ||
b.ResetTimer() | ||
for i := 0; i < b.N; i++ { | ||
cache.Add(keyID, salts[i]) | ||
} | ||
} | ||
|
||
func BenchmarkReplayCache_Min(b *testing.B) { | ||
salts := makeSalts(b.N) | ||
// Every addition will archive the active set. | ||
cache := NewReplayCache(1) | ||
b.ResetTimer() | ||
for i := 0; i < b.N; i++ { | ||
cache.Add(keyID, salts[i]) | ||
} | ||
} | ||
|
||
func BenchmarkReplayCache_Parallel(b *testing.B) { | ||
c := make(chan []byte, b.N) | ||
for _, s := range makeSalts(b.N) { | ||
c <- s | ||
} | ||
close(c) | ||
// Exercise both expansion and archiving. | ||
cache := NewReplayCache(100) | ||
b.ResetTimer() | ||
b.RunParallel(func(pb *testing.PB) { | ||
for pb.Next() { | ||
cache.Add(keyID, <-c) | ||
} | ||
}) | ||
} |
Oops, something went wrong.