-
Notifications
You must be signed in to change notification settings - Fork 487
/
Copy pathpreimage_cache.go
134 lines (124 loc) · 3.33 KB
/
preimage_cache.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
package validator
import (
"sync"
"sync/atomic"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/pkg/errors"
)
// deletion without maitenance leaves a preimageEntry in memory
// A few MBs of those should be o.k.
const maintenanceEvery int32 = 100000
type preimageCache struct {
cacheMap sync.Map
maintenance sync.RWMutex
deletionsSinceMaintenance int32
}
type preimageEntry struct {
Mutex sync.Mutex
Refcount int
Data []byte
}
func (p *preimageCache) PourToCache(preimages map[common.Hash][]byte) []common.Hash {
// multiple can be done in parallel, but cannot be done during maintenance
p.maintenance.RLock()
defer p.maintenance.RUnlock()
var newEntry *preimageEntry = nil
hashlist := make([]common.Hash, 0, len(preimages))
for hash, val := range preimages {
if newEntry == nil {
newEntry = new(preimageEntry)
}
actual, found := p.cacheMap.LoadOrStore(hash, newEntry)
var curEntry *preimageEntry
if found {
var ok bool
curEntry, ok = actual.(*preimageEntry)
if !ok {
p.cacheMap.Store(hash, newEntry)
curEntry = newEntry
newEntry = nil
}
} else {
curEntry = newEntry
newEntry = nil
}
curEntry.Mutex.Lock()
if curEntry.Refcount == 0 {
curEntry.Data = val
}
curEntry.Refcount += 1
curEntry.Mutex.Unlock()
hashlist = append(hashlist, hash)
}
return hashlist
}
func (p *preimageCache) RemoveFromCache(hashlist []common.Hash) error {
// don't need maintenance lock because we only decrease refcount
for _, hash := range hashlist {
actual, found := p.cacheMap.Load(hash)
if !found {
return errors.New("preimage not in cache")
}
curEntry, ok := actual.(*preimageEntry)
if !ok {
return errors.New("preimage cache entry invalid")
}
curEntry.Mutex.Lock()
prevref := curEntry.Refcount
curEntry.Refcount -= 1
if curEntry.Refcount == 0 {
curEntry.Data = nil
deletionsNum := atomic.AddInt32(&p.deletionsSinceMaintenance, 1)
if deletionsNum%maintenanceEvery == 0 {
// maintains in-memory structure. No need for StopAndWait
go p.CacheMaintenance()
}
}
curEntry.Mutex.Unlock()
if prevref <= 0 {
return errors.New("preimage reference underflow")
}
}
return nil
}
func (p *preimageCache) CacheMaintenance() {
p.maintenance.Lock()
defer p.maintenance.Unlock()
p.cacheMap.Range(func(key, val interface{}) bool {
entry, ok := val.(*preimageEntry)
if !ok {
log.Error("preimage map: invalid entry")
return false
}
refc := entry.Refcount
if refc == 0 {
p.cacheMap.Delete(key)
}
return true
})
}
// The top-level CMultipleByteArrays returned must be freed, but the inner byte arrays must **not** be freed.
func (p *preimageCache) FillHashedValues(hashlist []common.Hash) (map[common.Hash][]byte, error) {
length := len(hashlist)
res := make(map[common.Hash][]byte, length)
for _, hash := range hashlist {
actual, found := p.cacheMap.Load(hash)
if !found {
return nil, errors.New("preimage not in cache")
}
curEntry, ok := actual.(*preimageEntry)
if !ok {
return nil, errors.New("preimage malformed in cache")
}
curEntry.Mutex.Lock()
curData := curEntry.Data
curRefCount := curEntry.Refcount
curEntry.Mutex.Unlock()
if curRefCount <= 0 {
return nil, errors.New("preimage cache in bad state")
}
res[hash] = curData
}
return res, nil
}