@@ -31,8 +31,12 @@ import (
3131
3232 tcrypto "github.com/google/trillian/crypto"
3333
34+ "bytes"
3435 ktpb "github.com/google/keytransparency/core/proto/keytransparency_v1_types"
3536 "github.com/google/trillian"
37+ "github.com/google/trillian/merkle"
38+ "github.com/google/trillian/merkle/coniks"
39+ "github.com/google/trillian/storage"
3640)
3741
3842var (
@@ -75,14 +79,14 @@ func VerifyResponse(logPubKey, mapPubKey crypto.PublicKey, resp *ktpb.GetMutatio
7579 // logVerifier.VerifyInclusionProof()
7680
7781 // mapID := resp.GetSmr().GetMapId()
78- if err := verifyMutations (allMuts , resp .GetSmr ().GetRootHash ()); err != nil {
82+ if err := verifyMutations (allMuts , resp .GetSmr ().GetRootHash (), resp . GetSmr (). GetMapId () ); err != nil {
7983 return err
8084 }
8185
8286 return errors .New ("TODO: implement verification logic" )
8387}
8488
85- func verifyMutations (muts []* ktpb.Mutation , expectedRoot []byte ) error {
89+ func verifyMutations (muts []* ktpb.Mutation , expectedRoot []byte , mapID int64 ) error {
8690 // TODO: export applyMutations in CreateEpoch / signer.go?
8791 //
8892
@@ -94,6 +98,9 @@ func verifyMutations(muts []*ktpb.Mutation, expectedRoot []byte) error {
9498 inclusionMap := make (map [[32 ]byte ]* trillian.MapLeafInclusion )
9599 updatedLeafMap := make (map [[32 ]byte ]* trillian.MapLeaf )
96100 mutator := entry .New ()
101+ oldProofNodes := make (map [string ]* storage.Node )
102+ hasher := coniks .Default
103+
97104 for _ , m := range muts {
98105 // verify that the provided leaf’s inclusion proof goes to epoch e-1:
99106 //
@@ -125,22 +132,34 @@ func verifyMutations(muts []*ktpb.Mutation, expectedRoot []byte) error {
125132 // LeafHash: hasher.HashLeaf(mapID, l.Index, l.LeafValue),
126133 LeafValue : newLeaf ,
127134 }
128-
129- //for level, proof := range m.GetProof().GetInclusion() {
130- // pElement := proof
131- // if len(pElement) == 0 {
132- // pElement = hasher.HashEmpty(treeID, sib.Path, level)
133- // }
134- // if proofIsRightHandElement {
135- // runningHash = hasher.HashChildren(runningHash, pElement)
136- // } else {
137- // runningHash = hasher.HashChildren(pElement, runningHash)
138- // }
139- //}
140- //
141-
135+ // cache proof nodes:
136+ for level , proof := range m .GetProof ().GetInclusion () {
137+ sid := storage .NewNodeIDFromBigInt (level , index , hasher .BitLen ())
138+ pid := sid .Neighbor ()
139+ // TODO Do we need the node revision or is this only used internally?
140+ pNode := & storage.Node {
141+ NodeID : pid ,
142+ Hash : proof ,
143+ NodeRevision : 0 ,
144+ }
145+ if p , ok := oldProofNodes [pid .String ()]; ok {
146+ // sanity check: for each mutation overlapping proof nodes should be
147+ // equal:
148+ bytes .Equal (p .Hash , proof )
149+ } else {
150+ oldProofNodes [pid .String ()] = pNode
151+ }
152+ }
142153 }
154+ // TODO write get function that returns and potentially recomputes proof nodes
155+ // (if neccessary) and a set method that updates recomputed proof nodes and
156+ // call:
157+ //
158+ //hs2 := merkle.NewHStar2(mapID, hasher)
159+ //hs2.HStar2Nodes([]byte{}, hasher.Size(), new []HStar2LeafHash
143160
161+ // get SparseGetNodeFunc, set SparseSetNodeFunc)
162+ //
144163 // compute the new leaf and store the intermediate hashes locally.
145164 // compute the new root using local intermediate hashes from epoch e.
146165 // verify rootHash
0 commit comments