Skip to content

Commit

Permalink
swarm/storage/mru: Renamed all comments to Feeds
Browse files Browse the repository at this point in the history
  • Loading branch information
jpeletier committed Oct 3, 2018
1 parent f1e86ad commit b35622c
Show file tree
Hide file tree
Showing 14 changed files with 96 additions and 102 deletions.
4 changes: 2 additions & 2 deletions swarm/storage/mru/cacheentry.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ const (
defaultRetrieveTimeout = 100 * time.Millisecond
)

// cacheEntry caches resource data and the metadata of its root chunk.
// cacheEntry caches the last known update of a specific Feed.
type cacheEntry struct {
Update
*bytes.Reader
Expand All @@ -42,7 +42,7 @@ func (r *cacheEntry) Size(ctx context.Context, _ chan bool) (int64, error) {
return int64(len(r.Update.data)), nil
}

//returns the resource's topic
//returns the Feed's topic
func (r *cacheEntry) Topic() Topic {
return r.Feed.Topic
}
29 changes: 14 additions & 15 deletions swarm/storage/mru/doc.go
Original file line number Diff line number Diff line change
@@ -1,43 +1,42 @@
/*
Package feeds defines Swarm Feeds.
A Mutable Resource is an entity which allows updates to a resource
Swarm Feeds allows a user to build an update feed about a particular topic
without resorting to ENS on each update.
The update scheme is built on swarm chunks with chunk keys following
a predictable, versionable pattern.
A Resource is tied to a unique identifier that is deterministically generated out of
A Feed is tied to a unique identifier that is deterministically generated out of
the chosen topic.
A Resource View is defined as a specific user's point of view about a particular resource.
Thus, a View is a Topic + the user's address (userAddr)
A Feed is defined as the series of updates of a specific user about a particular topic
Actual data updates are also made in the form of swarm chunks. The keys
of the updates are the hash of a concatenation of properties as follows:
updateAddr = H(View, Epoch ID)
updateAddr = H(Feed, Epoch ID)
where H is the SHA3 hash function
View is the combination of Topic and the user address
Feed is the combination of Topic and the user address
Epoch ID is a time slot. See the lookup package for more information.
A user looking up a resource would only need to know the View in order to
another user's updates
A user looking up a the latest update in a Feed only needs to know the Topic
and the other user's address.
The resource update data is:
resourcedata = View|Epoch|data
The Feed Update data is:
updatedata = Feed|Epoch|data
the full update data that goes in the chunk payload is:
The full update data that goes in the chunk payload is:
resourcedata|sign(resourcedata)
Structure Summary:
Request: Resource update with signature
ResourceUpdate: headers + data
Request: Feed Update with signature
Update: headers + data
Header: Protocol version and reserved for future use placeholders
ID: Information about how to locate a specific update
View: Author of the update and what is updating
Feed: Represents a user's series of publications about a specific Topic
Topic: Item that the updates are about
User: User who updates the resource
User: User who updates the Feed
Epoch: time slot where the update is stored
*/
Expand Down
2 changes: 1 addition & 1 deletion swarm/storage/mru/error.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ func (e *Error) Code() int {
return e.code
}

// NewError creates a new Mutable Resource Error object with the specified code and custom error message
// NewError creates a new Swarm Feeds Error object with the specified code and custom error message
func NewError(code int, s string) error {
if code < 0 || code >= ErrCnt {
panic("no such error code!")
Expand Down
57 changes: 27 additions & 30 deletions swarm/storage/mru/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ func init() {
}
}

// NewHandler creates a new Mutable Resource API
// NewHandler creates a new Swarm Feeds API
func NewHandler(params *HandlerParams) *Handler {
fh := &Handler{
cache: make(map[uint64]*cacheEntry),
Expand All @@ -74,13 +74,13 @@ func NewHandler(params *HandlerParams) *Handler {
return fh
}

// SetStore sets the store backend for the Mutable Resource API
// SetStore sets the store backend for the Swarm Feeds API
func (h *Handler) SetStore(store *storage.NetStore) {
h.chunkStore = store
}

// Validate is a chunk validation method
// If it looks like a resource update, the chunk address is checked against the userAddr of the update's signature
// If it looks like a feed update, the chunk address is checked against the userAddr of the update's signature
// It implements the storage.ChunkValidator interface
func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool {
dataLength := len(data)
Expand All @@ -89,7 +89,7 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool {
}

// check if it is a properly formatted update chunk with
// valid signature and proof of ownership of the resource it is trying
// valid signature and proof of ownership of the feed it is trying
// to update

// First, deserialize the chunk
Expand All @@ -99,9 +99,9 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool {
return false
}

// Verify signatures and that the signer actually owns the resource
// Verify signatures and that the signer actually owns the feed
// If it fails, it means either the signature is not valid, data is corrupted
// or someone is trying to update someone else's resource.
// or someone is trying to update someone else's feed.
if err := r.Verify(); err != nil {
log.Debug("Invalid feed update signature", "err", err)
return false
Expand All @@ -110,14 +110,14 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool {
return true
}

// GetContent retrieves the data payload of the last synced update of the Mutable Resource
// GetContent retrieves the data payload of the last synced update of the Feed
func (h *Handler) GetContent(feed *Feed) (storage.Address, []byte, error) {
if feed == nil {
return nil, nil, NewError(ErrInvalidValue, "view is nil")
return nil, nil, NewError(ErrInvalidValue, "feed is nil")
}
feedUpdate := h.get(feed)
if feedUpdate == nil {
return nil, nil, NewError(ErrNotFound, "resource does not exist")
return nil, nil, NewError(ErrNotFound, "feed update not cached")
}
return feedUpdate.lastKey, feedUpdate.data, nil
}
Expand All @@ -142,7 +142,7 @@ func (h *Handler) NewRequest(ctx context.Context, feed *Feed) (request *Request,
return nil, err
}
// not finding updates means that there is a network error
// or that the resource really does not have updates
// or that the feed really does not have updates
}

request.Feed = *feed
Expand All @@ -157,13 +157,10 @@ func (h *Handler) NewRequest(ctx context.Context, feed *Feed) (request *Request,
return request, nil
}

// Lookup retrieves a specific or latest version of the resource
// Lookup works differently depending on the configuration of `ID`
// See the `ID` documentation and helper functions:
// `LookupLatest` and `LookupBefore`
// When looking for the latest update, it starts at the next period after the current time.
// upon failure tries the corresponding keys of each previous period until one is found
// (or startTime is reached, in which case there are no updates).
// Lookup retrieves a specific or latest feed update
// Lookup works differently depending on the configuration of `query`
// See the `query` documentation and helper functions:
// `NewQueryLatest` and `NewQuery`
func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error) {

timeLimit := query.TimeLimit
Expand Down Expand Up @@ -213,17 +210,17 @@ func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error)
return nil, err
}

log.Info(fmt.Sprintf("Resource lookup finished in %d lookups", readCount))
log.Info(fmt.Sprintf("Feed lookup finished in %d lookups", readCount))

request, _ := requestPtr.(*Request)
if request == nil {
return nil, NewError(ErrNotFound, "no updates found")
return nil, NewError(ErrNotFound, "no feed updates found")
}
return h.updateCache(request)

}

// update mutable resource cache map with specified content
// update feed updates cache with specified content
func (h *Handler) updateCache(request *Request) (*cacheEntry, error) {

updateAddr := request.Addr()
Expand All @@ -242,10 +239,10 @@ func (h *Handler) updateCache(request *Request) (*cacheEntry, error) {
return feedUpdate, nil
}

// Update adds an actual data update
// Uses the Mutable Resource metadata currently loaded in the resources map entry.
// It is the caller's responsibility to make sure that this data is not stale.
// Note that a Mutable Resource update cannot span chunks, and thus has a MAX NET LENGTH 4096, INCLUDING update header data and signature. An error will be returned if the total length of the chunk payload will exceed this limit.
// Update publishes a feed update
// Note that a Feed update cannot span chunks, and thus has a MAX NET LENGTH 4096, INCLUDING update header data and signature.
// This results in a max payload of `maxUpdateDataLength` (check update.go for more details)
// An error will be returned if the total length of the chunk payload will exceed this limit.
// Update can only check if the caller is trying to overwrite the very last known version, otherwise it just puts the update
// on the network.
func (h *Handler) Update(ctx context.Context, r *Request) (updateAddr storage.Address, err error) {
Expand Down Expand Up @@ -280,18 +277,18 @@ func (h *Handler) Update(ctx context.Context, r *Request) (updateAddr storage.Ad
return r.idAddr, nil
}

// Retrieves the resource cache value for the given nameHash
func (h *Handler) get(view *Feed) *cacheEntry {
mapKey := view.mapKey()
// Retrieves the feed update cache value for the given nameHash
func (h *Handler) get(feed *Feed) *cacheEntry {
mapKey := feed.mapKey()
h.cacheLock.RLock()
defer h.cacheLock.RUnlock()
feedUpdate := h.cache[mapKey]
return feedUpdate
}

// Sets the resource cache value for the given View
func (h *Handler) set(view *Feed, feedUpdate *cacheEntry) {
mapKey := view.mapKey()
// Sets the feed update cache value for the given Feed
func (h *Handler) set(feed *Feed, feedUpdate *cacheEntry) {
mapKey := feed.mapKey()
h.cacheLock.Lock()
defer h.cacheLock.Unlock()
h.cache[mapKey] = feedUpdate
Expand Down
54 changes: 27 additions & 27 deletions swarm/storage/mru/handler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,12 +89,12 @@ func TestFeedsHandler(t *testing.T) {
}
defer teardownTest()

// create a new resource
// create a new Feed
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

topic, _ := NewTopic("Mess with Swarm Feeds code and see what ghost catches you", nil)
view := Feed{
feed := Feed{
Topic: topic,
User: signer.Address(),
}
Expand All @@ -107,7 +107,7 @@ func TestFeedsHandler(t *testing.T) {
"clyde", // t=4285
}

request := NewFirstRequest(view.Topic) // this timestamps the update at t = 4200 (start time)
request := NewFirstRequest(feed.Topic) // this timestamps the update at t = 4200 (start time)
chunkAddress := make(map[string]storage.Address)
data := []byte(updates[0])
request.SetData(data)
Expand Down Expand Up @@ -205,38 +205,38 @@ func TestFeedsHandler(t *testing.T) {
t.Fatal(err)
}

rsrc2, err := feedsHandler2.Lookup(ctx, NewQueryLatest(&request.Feed, lookup.NoClue))
update2, err := feedsHandler2.Lookup(ctx, NewQueryLatest(&request.Feed, lookup.NoClue))
if err != nil {
t.Fatal(err)
}

// last update should be "clyde"
if !bytes.Equal(rsrc2.data, []byte(updates[len(updates)-1])) {
t.Fatalf("resource data was %v, expected %v", string(rsrc2.data), updates[len(updates)-1])
if !bytes.Equal(update2.data, []byte(updates[len(updates)-1])) {
t.Fatalf("feed update data was %v, expected %v", string(update2.data), updates[len(updates)-1])
}
if rsrc2.Level != 22 {
t.Fatalf("resource epoch level was %d, expected 22", rsrc2.Level)
if update2.Level != 22 {
t.Fatalf("feed update epoch level was %d, expected 22", update2.Level)
}
if rsrc2.Base() != 0 {
t.Fatalf("resource epoch base time was %d, expected 0", rsrc2.Base())
if update2.Base() != 0 {
t.Fatalf("feed update epoch base time was %d, expected 0", update2.Base())
}
log.Debug("Latest lookup", "epoch base time", rsrc2.Base(), "epoch level", rsrc2.Level, "data", rsrc2.data)
log.Debug("Latest lookup", "epoch base time", update2.Base(), "epoch level", update2.Level, "data", update2.data)

// specific point in time
rsrc, err := feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, 4284, lookup.NoClue))
update, err := feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, 4284, lookup.NoClue))
if err != nil {
t.Fatal(err)
}
// check data
if !bytes.Equal(rsrc.data, []byte(updates[2])) {
t.Fatalf("resource data (historical) was %v, expected %v", string(rsrc2.data), updates[2])
if !bytes.Equal(update.data, []byte(updates[2])) {
t.Fatalf("feed update data (historical) was %v, expected %v", string(update2.data), updates[2])
}
log.Debug("Historical lookup", "epoch base time", rsrc2.Base(), "epoch level", rsrc2.Level, "data", rsrc2.data)
log.Debug("Historical lookup", "epoch base time", update2.Base(), "epoch level", update2.Level, "data", update2.data)

// beyond the first should yield an error
rsrc, err = feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, startTime.Time-1, lookup.NoClue))
update, err = feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, startTime.Time-1, lookup.NoClue))
if err == nil {
t.Fatalf("expected previous to fail, returned epoch %s data %v", rsrc.Epoch.String(), rsrc.data)
t.Fatalf("expected previous to fail, returned epoch %s data %v", update.Epoch.String(), update.data)
}

}
Expand Down Expand Up @@ -266,11 +266,11 @@ func TestSparseUpdates(t *testing.T) {
defer teardownTest()
defer os.RemoveAll(datadir)

// create a new resource
// create a new Feed
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
topic, _ := NewTopic("Very slow updates", nil)
view := Feed{
feed := Feed{
Topic: topic,
User: signer.Address(),
}
Expand All @@ -280,7 +280,7 @@ func TestSparseUpdates(t *testing.T) {
var epoch lookup.Epoch
var lastUpdateTime uint64
for T := uint64(0); T < today; T += 5 * Year {
request := NewFirstRequest(view.Topic)
request := NewFirstRequest(feed.Topic)
request.Epoch = lookup.GetNextEpoch(epoch, T)
request.data = generateData(T) // this generates some data that depends on T, so we can check later
request.Sign(signer)
Expand All @@ -295,14 +295,14 @@ func TestSparseUpdates(t *testing.T) {
lastUpdateTime = T
}

query := NewQuery(&view, today, lookup.NoClue)
query := NewQuery(&feed, today, lookup.NoClue)

_, err = rh.Lookup(ctx, query)
if err != nil {
t.Fatal(err)
}

_, content, err := rh.GetContent(&view)
_, content, err := rh.GetContent(&feed)
if err != nil {
t.Fatal(err)
}
Expand All @@ -321,7 +321,7 @@ func TestSparseUpdates(t *testing.T) {
t.Fatal(err)
}

_, content, err = rh.GetContent(&view)
_, content, err = rh.GetContent(&feed)
if err != nil {
t.Fatal(err)
}
Expand All @@ -348,7 +348,7 @@ func TestValidator(t *testing.T) {
}
defer teardownTest()

// create new resource
// create new Feed
topic, _ := NewTopic(subtopicName, nil)
feed := Feed{
Topic: topic,
Expand Down Expand Up @@ -382,7 +382,7 @@ func TestValidator(t *testing.T) {
}

// tests that the content address validator correctly checks the data
// tests that resource update chunks are passed through content address validator
// tests that Feed update chunks are passed through content address validator
// there is some redundancy in this test as it also tests content addressed chunks,
// which should be evaluated as invalid chunks by this validator
func TestValidatorInStore(t *testing.T) {
Expand All @@ -409,7 +409,7 @@ func TestValidatorInStore(t *testing.T) {
t.Fatal(err)
}

// set up resource handler and add is as a validator to the localstore
// set up Swarm Feeds handler and add is as a validator to the localstore
fhParams := &HandlerParams{}
fh := NewHandler(fhParams)
store.Validators = append(store.Validators, fh)
Expand All @@ -425,7 +425,7 @@ func TestValidatorInStore(t *testing.T) {
User: signer.Address(),
}

// create a resource update chunk with correct publickey
// create a feed update chunk with correct publickey
id := ID{
Epoch: lookup.Epoch{Time: 42,
Level: 1,
Expand Down
Loading

0 comments on commit b35622c

Please sign in to comment.