Skip to content
This repository was archived by the owner on Oct 11, 2024. It is now read-only.

Commit 1e9db9e

Browse files
committed
Work in progress: compute new root hash
1 parent 5eee227 commit 1e9db9e

File tree

1 file changed

+45
-56
lines changed

1 file changed

+45
-56
lines changed

core/monitor/verify.go

Lines changed: 45 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -19,24 +19,22 @@
1919
package monitor
2020

2121
import (
22+
"bytes"
2223
"crypto"
2324
"errors"
2425
"fmt"
26+
"math/big"
2527

2628
"github.com/golang/glog"
2729

28-
// "github.com/google/trillian"
29-
// "github.com/google/trillian/merkle"
30-
"github.com/google/keytransparency/core/mutator/entry"
30+
"github.com/google/trillian/merkle"
31+
"github.com/google/trillian/merkle/coniks"
32+
"github.com/google/trillian/storage"
3133

3234
tcrypto "github.com/google/trillian/crypto"
3335

34-
"bytes"
36+
"github.com/google/keytransparency/core/mutator/entry"
3537
ktpb "github.com/google/keytransparency/core/proto/keytransparency_v1_types"
36-
"github.com/google/trillian"
37-
"github.com/google/trillian/merkle"
38-
"github.com/google/trillian/merkle/coniks"
39-
"github.com/google/trillian/storage"
4038
)
4139

4240
var (
@@ -87,18 +85,9 @@ func VerifyResponse(logPubKey, mapPubKey crypto.PublicKey, resp *ktpb.GetMutatio
8785
}
8886

8987
func verifyMutations(muts []*ktpb.Mutation, expectedRoot []byte, mapID int64) error {
90-
// TODO: export applyMutations in CreateEpoch / signer.go?
91-
//
92-
93-
// verify the mutation’s validity against the previous leaf.
94-
//
95-
// entry.VerifyKeys()
96-
// or
97-
// entry.Mutate() // does all checks and returns the new leaf as well
98-
inclusionMap := make(map[[32]byte]*trillian.MapLeafInclusion)
99-
updatedLeafMap := make(map[[32]byte]*trillian.MapLeaf)
88+
newLeaves := make([]merkle.HStar2LeafHash, 0, len(muts))
10089
mutator := entry.New()
101-
oldProofNodes := make(map[string]*storage.Node)
90+
oldProofNodes := make(map[string][]byte)
10291
hasher := coniks.Default
10392

10493
for _, m := range muts {
@@ -115,53 +104,53 @@ func verifyMutations(muts []*ktpb.Mutation, expectedRoot []byte, mapID int64) er
115104
}
116105
newLeaf, err := mutator.Mutate(leafVal, m.GetUpdate())
117106
if err != nil {
118-
// TODO(ismail): do not return; collect other errors if any
107+
// TODO(ismail): collect all data to reproduce this (expectedRoot, oldLeaf, and mutation)
119108
return ErrInvalidMutation
120109
}
121-
// update and store intermediate hashes for this new leaf
122-
// (using old inclusion proof and already updated intermediate leafs)
123-
fmt.Println(newLeaf)
124-
// the index shouldn't change:
125-
var index [32]byte
126-
copy(index[:], m.GetProof().GetLeaf().GetIndex()[:32])
127-
// TODO(ismail): do we actually need these copies?
128-
inclusionMap[index] = m.GetProof()
129-
130-
updatedLeafMap[index] = &trillian.MapLeaf{
131-
Index: index[:],
132-
// LeafHash: hasher.HashLeaf(mapID, l.Index, l.LeafValue),
133-
LeafValue: newLeaf,
134-
}
135-
// cache proof nodes:
110+
111+
index := m.GetProof().GetLeaf().GetIndex()
112+
113+
newLeafnID := storage.NewNodeIDFromPrefixSuffix(index, storage.Suffix{}, hasher.BitLen())
114+
newLeafH := hasher.HashLeaf(mapID, index, newLeaf)
115+
newLeaves = append(newLeaves, merkle.HStar2LeafHash{
116+
Index: newLeafnID.BigInt(),
117+
LeafHash: newLeafH,
118+
})
119+
120+
sibIDs := newLeafnID.Siblings()
136121
for level, proof := range m.GetProof().GetInclusion() {
137-
sid := storage.NewNodeIDFromBigInt(level, index, hasher.BitLen())
138-
pid := sid.Neighbor()
139-
// TODO Do we need the node revision or is this only used internally?
140-
pNode := &storage.Node{
141-
NodeID: pid,
142-
Hash: proof,
143-
NodeRevision: 0,
144-
}
145-
if p, ok := oldProofNodes[pid.String()]; ok {
122+
pID := sibIDs[level]
123+
if p, ok := oldProofNodes[pID.String()]; ok {
146124
// sanity check: for each mutation overlapping proof nodes should be
147125
// equal:
148-
bytes.Equal(p.Hash, proof)
126+
if !bytes.Equal(p, proof) {
127+
// TODO: this is really odd
128+
}
149129
} else {
150-
oldProofNodes[pid.String()] = pNode
130+
oldProofNodes[pID.String()] = proof
151131
}
152132
}
153133
}
154-
// TODO write get function that returns and potentially recomputes proof nodes
155-
// (if neccessary) and a set method that updates recomputed proof nodes and
156-
// call:
157-
//
158-
//hs2 := merkle.NewHStar2(mapID, hasher)
159-
//hs2.HStar2Nodes([]byte{}, hasher.Size(), new []HStar2LeafHash
160-
161-
// get SparseGetNodeFunc, set SparseSetNodeFunc)
162-
//
134+
// TODO write get function that returns old proof nodes by index and level
163135
// compute the new leaf and store the intermediate hashes locally.
164136
// compute the new root using local intermediate hashes from epoch e.
165137
// verify rootHash
166-
return errors.New("TODO: implement verification logic")
138+
139+
hs2 := merkle.NewHStar2(mapID, hasher)
140+
newRoot, err := hs2.HStar2Nodes([]byte{}, hasher.Size(), newLeaves,
141+
func(depth int, index *big.Int) ([]byte, error) {
142+
nID := storage.NewNodeIDFromBigInt(depth, index, hasher.BitLen())
143+
if p, ok := oldProofNodes[nID.String()]; ok {
144+
return p, nil
145+
}
146+
return nil, nil
147+
}, nil)
148+
if err != nil {
149+
glog.Errorf("hs2.HStar2Nodes(_): %v", err)
150+
fmt.Errorf("could not compute new root hash: hs2.HStar2Nodes(_): %v", err)
151+
}
152+
if !bytes.Equal(newRoot, expectedRoot) {
153+
return ErrNotMatchingRoot
154+
}
155+
return nil
167156
}

0 commit comments

Comments
 (0)