16
16
package art
17
17
18
18
import (
19
+ "bytes"
19
20
"fmt"
20
21
"math"
22
+ "sync/atomic"
21
23
22
24
tikverr "github.com/tikv/client-go/v2/error"
23
25
"github.com/tikv/client-go/v2/internal/unionstore/arena"
@@ -44,6 +46,12 @@ type ART struct {
44
46
bufferSizeLimit uint64
45
47
len int
46
48
size int
49
+
50
+ // The lastTraversedNode stores addr in uint64 of the last traversed node, include search and recursiveInsert.
51
+ // Compare to atomic.Pointer, atomic.Uint64 can avoid heap allocation, so it's more efficient.
52
+ lastTraversedNode atomic.Uint64
53
+ hitCount atomic.Uint64
54
+ missCount atomic.Uint64
47
55
}
48
56
49
57
func New () * ART {
@@ -55,6 +63,7 @@ func New() *ART {
55
63
t .allocator .nodeAllocator .freeNode4 = make ([]arena.MemdbArenaAddr , 0 , 1 << 4 )
56
64
t .allocator .nodeAllocator .freeNode16 = make ([]arena.MemdbArenaAddr , 0 , 1 << 3 )
57
65
t .allocator .nodeAllocator .freeNode48 = make ([]arena.MemdbArenaAddr , 0 , 1 << 2 )
66
+ t .lastTraversedNode .Store (arena .NullU64Addr )
58
67
return & t
59
68
}
60
69
@@ -102,10 +111,26 @@ func (t *ART) Set(key artKey, value []byte, ops ...kv.FlagsOp) error {
102
111
return nil
103
112
}
104
113
105
- // search looks up the leaf with the given key.
114
+ // search wraps searchImpl with cache.
115
+ func (t * ART ) search (key artKey ) (arena.MemdbArenaAddr , * artLeaf ) {
116
+ // check cache
117
+ addr , leaf , found := t .checkKeyInCache (key )
118
+ if found {
119
+ t .hitCount .Add (1 )
120
+ return addr , leaf
121
+ }
122
+ t .missCount .Add (1 )
123
+ addr , leaf = t .searchImpl (key )
124
+ if ! addr .IsNull () {
125
+ t .updateLastTraversed (addr )
126
+ }
127
+ return addr , leaf
128
+ }
129
+
130
+ // searchImpl looks up the leaf with the given key.
106
131
// It returns the memory arena address and leaf itself it there is a match leaf,
107
132
// returns arena.NullAddr and nil if the key is not found.
108
- func (t * ART ) search (key artKey ) (arena.MemdbArenaAddr , * artLeaf ) {
133
+ func (t * ART ) searchImpl (key artKey ) (arena.MemdbArenaAddr , * artLeaf ) {
109
134
current := t .root
110
135
if current == nullArtNode {
111
136
return arena .NullAddr , nil
@@ -154,9 +179,25 @@ func (t *ART) search(key artKey) (arena.MemdbArenaAddr, *artLeaf) {
154
179
}
155
180
}
156
181
157
- // recursiveInsert returns the node address of the key.
158
- // It will insert the key if not exists, returns the newly inserted or existing leaf.
182
+ // recursiveInsert wraps recursiveInsertImpl with cache.
159
183
func (t * ART ) recursiveInsert (key artKey ) (arena.MemdbArenaAddr , * artLeaf ) {
184
+ addr , leaf , found := t .checkKeyInCache (key )
185
+ if found {
186
+ t .hitCount .Add (1 )
187
+ return addr , leaf
188
+ }
189
+ t .missCount .Add (1 )
190
+ addr , leaf = t .recursiveInsertImpl (key )
191
+ if ! addr .IsNull () {
192
+ t .updateLastTraversed (addr )
193
+ }
194
+ return addr , leaf
195
+ }
196
+
197
+ // recursiveInsertImpl returns the node address of the key.
198
+ // It will insert the key if not exists, returns the newly inserted or existing leaf.
199
+ func (t * ART ) recursiveInsertImpl (key artKey ) (arena.MemdbArenaAddr , * artLeaf ) {
200
+
160
201
// lazy init root node and allocator.
161
202
// this saves memory for read only txns.
162
203
if t .root .addr .IsNull () {
@@ -501,6 +542,7 @@ func (t *ART) Reset() {
501
542
t .len = 0
502
543
t .allocator .nodeAllocator .Reset ()
503
544
t .allocator .vlogAllocator .Reset ()
545
+ t .lastTraversedNode .Store (arena .NullU64Addr )
504
546
}
505
547
506
548
// DiscardValues releases the memory used by all values.
@@ -583,10 +625,31 @@ func (t *ART) RemoveFromBuffer(key []byte) {
583
625
panic ("unimplemented" )
584
626
}
585
627
628
+ // updateLastTraversed updates the last traversed node atomically
629
+ // the addr must be a valid leaf address
630
+ func (t * ART ) updateLastTraversed (addr arena.MemdbArenaAddr ) {
631
+ t .lastTraversedNode .Store (addr .AsU64 ())
632
+ }
633
+
634
+ // checkKeyInCache retrieves the last traversed node if the key matches
635
+ func (t * ART ) checkKeyInCache (key []byte ) (arena.MemdbArenaAddr , * artLeaf , bool ) {
636
+ addrU64 := t .lastTraversedNode .Load ()
637
+ if addrU64 == arena .NullU64Addr {
638
+ return arena .NullAddr , nil , false
639
+ }
640
+
641
+ addr := arena .U64ToAddr (addrU64 )
642
+ leaf := t .allocator .getLeaf (addr )
643
+ if ! bytes .Equal (leaf .GetKey (), key ) {
644
+ return arena .NullAddr , nil , false
645
+ }
646
+ return addr , leaf , true
647
+ }
648
+
586
649
func (t * ART ) GetCacheHitCount () uint64 {
587
- return 0
650
+ return t . hitCount . Load ()
588
651
}
589
652
590
653
func (t * ART ) GetCacheMissCount () uint64 {
591
- return 0
654
+ return t . missCount . Load ()
592
655
}
0 commit comments