diff --git a/iterator.go b/iterator.go index 0ae92ab9d..0d2cff5bc 100644 --- a/iterator.go +++ b/iterator.go @@ -28,7 +28,7 @@ import ( "github.com/dgryski/go-farm" ) -type prefetchStatus uint8 +type prefetchStatus = uint32 const ( prefetched prefetchStatus = 1 @@ -89,7 +89,7 @@ func (item *Item) Version() uint64 { // instead, or copy it yourself. Value might change once discard or commit is called. // Use ValueCopy if you want to do a Set after Get. func (item *Item) Value() ([]byte, error) { - if item.status == noPrefetch { + if item.getStatus() == noPrefetch { if (item.meta & bitValuePointer) == 0 { return item.vptr, nil } @@ -98,12 +98,16 @@ func (item *Item) Value() ([]byte, error) { return item.db.vlog.Read(vp, item.slice) } item.wg.Wait() - if item.status == prefetched { + if item.getStatus() == prefetched { return item.val, item.err } return item.yieldItemValue(nil) } +func (item *Item) getStatus() uint32 { + return atomic.LoadUint32(&item.status) +} + // ValueCopy returns a copy of the value of the item from the value log, writing it to dst slice. // If nil is passed, or capacity of dst isn't sufficient, a new slice would be allocated and // returned. Tip: It might make sense to reuse the returned slice as dst argument for the next call. @@ -112,7 +116,7 @@ func (item *Item) Value() ([]byte, error) { // See Github issue: https://github.com/coocood/badger/issues/315 func (item *Item) ValueCopy(dst []byte) ([]byte, error) { item.wg.Wait() - if item.status == prefetched { + if item.getStatus() == prefetched { return y.SafeCopy(dst, item.val), item.err } buf, err := item.yieldItemValue(dst) @@ -172,7 +176,7 @@ func (item *Item) yieldItemValue(dst []byte) ([]byte, error) { func (item *Item) prefetchValue() { val, err := item.yieldItemValue(nil) item.err = err - item.status = prefetched + atomic.StoreUint32(&item.status, prefetched) if val == nil { return } diff --git a/level_handler.go b/level_handler.go index 0520ece3c..b7366893d 100644 --- a/level_handler.go +++ b/level_handler.go @@ -134,7 +134,6 @@ func (s *levelHandler) replaceTables(newTables []*table.Table) error { } assertTablesOrder(newTables) - assertTablesOrder(s.tables) s.Lock() // We s.Unlock() below. @@ -167,8 +166,8 @@ func (s *levelHandler) replaceTables(newTables []*table.Table) error { y.Assert(numAdded == copy(t, newTables)) t = t[numAdded:] y.Assert(len(s.tables[right:]) == copy(t, s.tables[right:])) - s.tables = tables assertTablesOrder(tables) + s.tables = tables s.Unlock() // s.Unlock before we DecrRef tables -- that can be slow. return decrRefs(toDecr) } diff --git a/table/mem_table.go b/table/mem_table.go index db4fbe31f..c3a440d58 100644 --- a/table/mem_table.go +++ b/table/mem_table.go @@ -42,7 +42,7 @@ func (mt *MemTable) Get(key []byte) y.ValueStruct { if v, ok := curr.get(key); ok { return v } - curr = (*listNode)(curr.next) + curr = (*listNode)(atomic.LoadPointer(&curr.next)) } return mt.skl.Get(key) } @@ -55,7 +55,7 @@ func (mt *MemTable) NewIterator(reverse bool) y.Iterator { curr := (*listNode)(atomic.LoadPointer(&mt.pendingList)) for curr != nil { its = append(its, curr.newIterator(reverse)) - curr = (*listNode)(curr.next) + curr = (*listNode)(atomic.LoadPointer(&curr.next)) } if len(its) == 0 { @@ -70,7 +70,7 @@ func (mt *MemTable) MemSize() int64 { curr := (*listNode)(atomic.LoadPointer(&mt.pendingList)) for curr != nil { sz += curr.memSize - curr = (*listNode)(curr.next) + curr = (*listNode)(atomic.LoadPointer(&curr.next)) } return mt.skl.MemSize() + sz }