forked from bitcoin-core/btcdeb
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmerkle.h
1423 lines (1249 loc) · 49.4 KB
/
merkle.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright (c) 2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_MERKLE
#define BITCOIN_MERKLE
#include <stdint.h>
#include <array>
#include <iterator>
#include <vector>
#include <primitives/transaction.h>
// #include <primitives/block.h>
#include <serialize.h>
#include <uint256.h>
uint256 ComputeMerkleRoot(const std::vector<uint256>& leaves, bool* mutated = nullptr);
std::vector<uint256> ComputeMerkleBranch(const std::vector<uint256>& leaves, uint32_t position);
uint256 ComputeMerkleRootFromBranch(const uint256& leaf, const std::vector<uint256>& branch, uint32_t position);
/*
* Has similar API semantics, but produces Merkle roots and validates
* branches 3x as fast, and without the mutation vulnerability. Cannot
* be substituted for the non-fast variants because the hash values are
* different. ComputeFastMerkleBranch returns a pair with the second
* element being the path used to validate the branch.
*
* Because the fast Merkle branch does not do unnecessary hash operations,
* the path used to validate a branch is derived from but not necessarily
* the same as the original position in the list. ComputeFastMerkleBranch
* calculates the path by dropping high-order zeros from the binary
* representation of the position until the path is the same length or
* less as the number of Merkle branches.
*
* To understand why this works, consider a list of 303 elements from
* which a fast Merkle tree is constructed, and we request the branch to
* the 292nd element. The binary encoded positions of the last and
* desired elements are as follows:
*
* 0b 1 0 0 1 0 1 1 1 0 # decimal 302 (zero-indexed)
*
* 0b 1 0 0 1 0 0 0 1 1 # decimal 291
*
* The root of the Merkle tree has a left branch that contains 2^8 = 256
* elements, and a right branch that contains the remaining 47. The first
* level of the right branch contains 2^5 = 32 nodes on the left side, and
* the remaining 15 nodes on the right side. The next level contains 2^3 =
* 8 nodes on the left, and the remaining 7 on the right. This pattern
* repeats on the right hand side of the tree: each layer has the largest
* remaining power of two on the left, and the residual on the right.
*
* Notice specifically that the sizes of the sub-trees correspnd to the
* set bits in the zero-based index of the final element. For each 1 at,
* index n, there is a branch with 2^n elements on the left and the
* remaining amount on the right.
*
* So, for an element whose path traverse the right-side of the tree, the
* intervening levels (e.g. 2^7 and 2^6) are missing. These correspond to
* zeros in the binary expansion, and they are removed from the path
* description. However once the path takes a left-turn into the tree (a
* zero where a one is present in the expansion of the last element), the
* sub-tree is full and no more 0's can be pruned out.
*
* So the path for element 292 becomes:
*
* 0b 1 - - 1 - 0 0 1 1 # decimal 291
*
* = 0b 1 1 0 0 1 1
*
* = 51
*/
uint256 ComputeFastMerkleRoot(const std::vector<uint256>& leaves);
std::pair<std::vector<uint256>, uint32_t> ComputeFastMerkleBranch(const std::vector<uint256>& leaves, uint32_t position);
uint256 ComputeFastMerkleRootFromBranch(const uint256& leaf, const std::vector<uint256>& branch, uint32_t path);
/*
* Compute the Merkle root of the transactions in a block.
* *mutated is set to true if a duplicated subtree was found.
*/
// uint256 BlockMerkleRoot(const CBlock& block, bool* mutated = nullptr);
/*
* Compute the Merkle root of the witness transactions in a block.
* *mutated is set to true if a duplicated subtree was found.
*/
// uint256 BlockWitnessMerkleRoot(const CBlock& block, bool* mutated = nullptr);
/*
* Compute the Merkle branch for the tree of transactions in a block, for a
* given position.
* This can be verified using ComputeMerkleRootFromBranch.
*/
// std::vector<uint256> BlockMerkleBranch(const CBlock& block, uint32_t position);
/*
* Each link of a Merkle tree can have one of three values in a proof
* object:
*
* DESCEND: This link connects to another sub-tree, which must be
* processed. The root of this sub-tree is the hash value of the
* link.
*
* VERIFY: This hash value of this link must be provided at
* validation time. Computation of the Merkle root and comparison
* with a reference value provides a batch confirmation as to
* whether ALL the provided VERIFY hashes are correct.
*
* SKIP: The hash value of this link is provided as part of the
* proof.
*/
enum class MerkleLink : unsigned char { DESCEND, VERIFY, SKIP };
/*
* An internal node can have up to eight different structures, the
* product of the 3 possible MerkleLink states the left and right
* branches can have, with the exception of the {SKIP, SKIP} state
* which would be pruned as a SKIP hash in the parent node.
*
* This means nodes can be represented as a 3-bit integer, and packed
* 8 nodes to each 3 byte sequence. The MerkleNode class uses an
* unsigned char to represent the unpacked code, whereas the
* MerkleNodeReference class is used to access a 3-bit code value
* within a packed representation.
*/
struct MerkleNode
{
typedef unsigned char code_type;
protected:
code_type m_code;
static const std::array<MerkleLink, 8> m_left_from_code;
static const std::array<MerkleLink, 8> m_right_from_code;
static code_type _get_code(MerkleLink left, MerkleLink right);
public:
explicit MerkleNode(MerkleLink left, MerkleLink right) : m_code(_get_code(left, right)) { }
explicit MerkleNode(code_type code) : m_code(code) { }
/* Note that a code value of 0 is a {VERIFY, SKIP} node. */
MerkleNode() : m_code(0) { }
/* The default behavior is adequate. */
MerkleNode(const MerkleNode&) = default;
MerkleNode(MerkleNode&&) = default;
MerkleNode& operator=(const MerkleNode&) = default;
MerkleNode& operator=(MerkleNode&&) = default;
/*
* Ideally this would perhaps be operator int() and operator=(),
* however C++ does not let us mark an assingment operator as
* explicit. This unfortunately defeats many of the protections
* against bugs that strong typing would give us as any integer or
* Boolean value could be mistakenly assigned and interpreted as a
* code, and therefore assignable to a MerkleNode, probably
* generating a memory access exception if the value is not
* between 0 and 7.
*/
inline code_type GetCode() const
{ return m_code; }
inline MerkleNode& SetCode(code_type code)
{
m_code = code;
return *this;
}
/*
* The getters and setters for the left and right MerkleLinks
* simply recalculate the code value using tables. The code values
* line up such that this could be done with arithmetic and
* shifts, but it is probably of similar efficiency.
*/
inline MerkleLink GetLeft() const
{ return m_left_from_code[m_code]; }
inline MerkleNode& SetLeft(MerkleLink left)
{
m_code = _get_code(left, m_right_from_code[m_code]);
return *this;
}
inline MerkleLink GetRight() const
{ return m_right_from_code[m_code]; }
inline MerkleNode& SetRight(MerkleLink right)
{
m_code = _get_code(m_left_from_code[m_code], right);
return *this;
}
/* Equality */
inline bool operator==(MerkleNode other) const
{ return (m_code == other.m_code); }
inline bool operator!=(MerkleNode other) const
{ return !(*this == other); }
/* Relational */
inline bool operator<(MerkleNode other) const
{ return (m_code < other.m_code); }
inline bool operator<=(MerkleNode other) const
{ return !(other < *this); }
inline bool operator>=(MerkleNode other) const
{ return !(*this < other); }
inline bool operator>(MerkleNode other) const
{ return (other < *this); }
/* Needs access to m_{left,right}_from_code and _get_code() */
friend struct MerkleNodeReference;
};
/*
* Now we begin constructing the necessary infrastructure for
* supporting an STL-like container for packed 3-bit code
* representations of MerkleNode values. This is parallels the way
* that std::vector<bool> is specialized, with the added complication
* of a non-power-of-2 packed size.
*/
/*
* First we build a "reference" class which is able to address the
* location of a packed 3-bit value, and to read and write that value
* without affecting its neighbors.
*
* Then we will make use of this MerkleNode reference type to
* construct an STL-compatible iterator class (technically two, since
* the class const_iterator is not a const instance of the class
* iterator, for reasons).
*/
struct MerkleNodeReference
{
/*
* Nodes are stored with a tightly packed 3-bit encoding, the
* code. This allows up to 8 node specifications to fit within 3
* bytes:
*
* -- Node index
* /
* 00011122 23334445 55666777
* byte 0 byte 1 byte 2
* 76543210 76543210 76543210
* /
* Bit Index --
*
* A reference to a particular node consists of a pointer to the
* beginning of this 3 byte sequence, and the index (between 0 and
* 7) of the node.
*/
typedef unsigned char base_type;
typedef unsigned char offset_type;
protected:
base_type* m_base;
offset_type m_offset;
/*
* The parameterized constructor is protected because MerkleNode
* references should only ever be created by the friended iterator
* and container code.
*/
MerkleNodeReference(base_type* base, offset_type offset) : m_base(base), m_offset(offset) { }
/*
* We're emulating a reference, not a pointer, and it doesn't make
* sense to have a default-constructable reference.
*/
MerkleNodeReference() = delete;
public:
/*
* The default copy constructors are sufficient. Note that these
* create a new reference object that points to the same packed
* MerkleNode value.
*/
MerkleNodeReference(const MerkleNodeReference& other) = default;
MerkleNodeReference(MerkleNodeReference&& other) = default;
/*
* Copy assignment operators are NOT the default behavior:
* assigning one reference to another copies the underlying
* values, to make the MerkleNodeReference objects behave like
* references. It is NOT the same as the copy constructor, which
* copies the reference itself.
*/
inline MerkleNodeReference& operator=(const MerkleNodeReference& other)
{ return SetCode(other.GetCode()); }
inline MerkleNodeReference& operator=(MerkleNodeReference&& other)
{ return SetCode(other.GetCode()); }
public:
/* Read a 3-bit code value */
MerkleNode::code_type GetCode() const;
/* Write a 3-bit code value */
MerkleNodeReference& SetCode(MerkleNode::code_type code);
/* Read and write the MerkleLink values individually. */
inline MerkleLink GetLeft() const
{ return MerkleNode::m_left_from_code[GetCode()]; }
inline MerkleNodeReference& SetLeft(MerkleLink left)
{ return SetCode(MerkleNode::_get_code(left, GetRight())); }
MerkleLink GetRight() const
{ return MerkleNode::m_right_from_code[GetCode()]; }
MerkleNodeReference& SetRight(MerkleLink right)
{ return SetCode(MerkleNode::_get_code(GetLeft(), right)); }
/* Equality */
inline bool operator==(const MerkleNodeReference& other) const
{ return (GetCode() == other.GetCode()); }
inline bool operator==(MerkleNode other) const
{ return (GetCode() == other.GetCode()); }
inline bool operator!=(const MerkleNodeReference& other) const
{ return (GetCode() != other.GetCode()); }
inline bool operator!=(MerkleNode other) const
{ return (GetCode() != other.GetCode()); }
/* Relational */
inline bool operator<(const MerkleNodeReference& other) const
{ return (GetCode() < other.GetCode()); }
inline bool operator<(MerkleNode other) const
{ return (GetCode() < other.GetCode()); }
inline bool operator<=(const MerkleNodeReference& other) const
{ return (GetCode() <= other.GetCode()); }
inline bool operator<=(MerkleNode other) const
{ return (GetCode() <= other.GetCode()); }
inline bool operator>=(const MerkleNodeReference& other) const
{ return (GetCode() >= other.GetCode()); }
inline bool operator>=(MerkleNode other) const
{ return (GetCode() >= other.GetCode()); }
inline bool operator>(const MerkleNodeReference& other) const
{ return (GetCode() > other.GetCode()); }
inline bool operator>(MerkleNode other) const
{ return (GetCode() > other.GetCode()); }
/* Conversion to/from class MerkleNode */
inline MerkleNodeReference& operator=(const MerkleNode& other)
{ return SetCode(other.GetCode()); }
inline operator MerkleNode() const
{ return MerkleNode(GetCode()); }
protected:
/* Needs C(base,offset) and access to m_base and m_offset */
friend struct MerkleNodeIteratorBase;
/* Needs C(base,offset) */
template<class T, class Alloc> friend class std::vector;
};
inline bool operator==(MerkleNode lhs, const MerkleNodeReference& rhs)
{ return (lhs.GetCode() == rhs.GetCode()); }
inline bool operator!=(MerkleNode lhs, const MerkleNodeReference& rhs)
{ return (lhs.GetCode() != rhs.GetCode()); }
inline bool operator<(MerkleNode lhs, const MerkleNodeReference& rhs)
{ return (lhs.GetCode() < rhs.GetCode()); }
inline bool operator<=(MerkleNode lhs, const MerkleNodeReference& rhs)
{ return (lhs.GetCode() <= rhs.GetCode()); }
inline bool operator>=(MerkleNode lhs, const MerkleNodeReference& rhs)
{ return (lhs.GetCode() >= rhs.GetCode()); }
inline bool operator>(MerkleNode lhs, const MerkleNodeReference& rhs)
{ return (lhs.GetCode() > rhs.GetCode()); }
/*
* Now we construct an STL-compatible iterator object. If you are not
* familiar with writing STL iterators, this might be difficult to
* review. I will not explain how this works in detail, but I
* encourage reviewers to compare this with how std::vector<bool>'s
* iterators are implemented, as well as available documentation for
* std::iterator, as necessary.
*
* We derive from std::iterator basically just to provide the ability
* to specialized algorithms based on iterator category tags. All
* iterator functionality is implemented by this class due to the
* peculiarities of iterating over packed representations.
*
* Note, if you're not aware, that for STL containers const_iterator
* is not the iterator class with a const qualifier applied. The class
* MerkleNodeIteratorBase implements the common functionality and the
* two iterators derive from it.
*/
struct MerkleNodeIteratorBase : public std::iterator<std::random_access_iterator_tag, MerkleNodeReference::base_type>
{
/*
* The value extracted from an iterator is a MerkleNode, or more
* properly a MerkleNodeReference (iterator) or const MerkleNode
* (const_iterator). The packed array of 3-bit code values only
* has its values extracted and then converted to MerkleNode as
* necessary on the fly.
*/
typedef MerkleNode value_type;
typedef std::ptrdiff_t difference_type;
protected:
MerkleNodeReference m_ref;
/*
* A pass through initializer used by the derived iterator types,
* since otherwise m_ref would not be accessible to their
* constructor member initialization lists.
*/
MerkleNodeIteratorBase(MerkleNodeReference::base_type* base, MerkleNodeReference::offset_type offset) : m_ref(base, offset) { }
/*
* Constructing an iterator from another iterator clones the
* underlying reference.
*/
MerkleNodeIteratorBase(const MerkleNodeIteratorBase&) = default;
MerkleNodeIteratorBase(MerkleNodeIteratorBase&&) = default;
/*
* Copy assignment also clones the underlying reference, but a
* non-default copy assignment is required because the underlying
* MerkleNodeReference structure uses its own copy assignment
* operator to emulate reference behavior, but in this context we
* want what would otherwise be the default behavior of copying
* the reference itself.
*/
inline MerkleNodeIteratorBase& operator=(const MerkleNodeIteratorBase& other)
{
m_ref.m_base = other.m_ref.m_base;
m_ref.m_offset = other.m_ref.m_offset;
return *this;
}
inline MerkleNodeIteratorBase& operator=(MerkleNodeIteratorBase&& other)
{
m_ref.m_base = other.m_ref.m_base;
m_ref.m_offset = other.m_ref.m_offset;
return *this;
}
public:
/* Distance */
difference_type operator-(const MerkleNodeIteratorBase& other) const;
/*
* The standard increment, decrement, advancement, etc. operators
* for both iterator and const_iterator do the same thing and
* could be defined here, but then they would be returning
* instances of the base class not the iterator. So look to those
* definitions in the derived classes below.
*/
/*
* Equality
*
* Note: Comparing the underlying reference directly with
* MerkleNodeReference::operator== and friends would result
* in the underlying values being compared, not the memory
* addresses.
*/
inline bool operator==(const MerkleNodeIteratorBase& other) const
{
return m_ref.m_base == other.m_ref.m_base
&& m_ref.m_offset == other.m_ref.m_offset;
}
inline bool operator!=(const MerkleNodeIteratorBase& other) const
{ return !(*this == other); }
/* Relational */
inline bool operator<(const MerkleNodeIteratorBase& other) const
{
return m_ref.m_base < other.m_ref.m_base
|| (m_ref.m_base == other.m_ref.m_base && m_ref.m_offset < other.m_ref.m_offset);
}
inline bool operator<=(const MerkleNodeIteratorBase& other) const
{ return !(other < *this); }
inline bool operator>=(const MerkleNodeIteratorBase& other) const
{ return !(*this < other); }
inline bool operator>(const MerkleNodeIteratorBase& other) const
{ return other < *this; }
protected:
/*
* Move to the next 3-bit code value, incrementing m_base (by 3!)
* only if we've gone past the end of a 3-byte block of 8 code
* values.
*/
void _incr();
/*
* Move to the prior 3-bit code value, moving m_back back (by 3)
* if we've gone past the beginning of the 3-byte block of 8 code
* values it points to.
*/
void _decr();
/*
* Move an arbitrary number of elements forward or backwards. for
* random access (see related operator-() definition below).
*/
void _seek(difference_type distance);
};
/*
* Forward random access iterator, using the _incr(), _decr(), _seek()
* and operator-() methods of MerkleNodeIteratorBase, which is the
* important business logic. This class is mostly just API wrappers to
* provide an API interface close enough to API compatible with STL
* iterators to be usable with other standard library generics.
*/
struct MerkleNodeIterator : public MerkleNodeIteratorBase
{
typedef MerkleNodeIterator iterator;
typedef MerkleNodeReference reference;
typedef MerkleNodeReference* pointer;
/*
* Default constructor makes an unusable iterator, but required so
* that an iterator variable can declared and initialized
* separately.
*/
MerkleNodeIterator() : MerkleNodeIteratorBase(nullptr, 0) { }
/* Default copy/move constructors and assignment operators are fine. */
MerkleNodeIterator(const MerkleNodeIterator& other) = default;
MerkleNodeIterator(MerkleNodeIterator&& other) = default;
MerkleNodeIterator& operator=(const MerkleNodeIterator& other) = default;
MerkleNodeIterator& operator=(MerkleNodeIterator&& other) = default;
protected:
MerkleNodeIterator(MerkleNodeReference::base_type* base, MerkleNodeReference::offset_type offset) : MerkleNodeIteratorBase(base, offset) { }
public:
/* Dereference */
inline reference operator*() const
{ return m_ref; }
inline pointer operator->() const
{ return const_cast<pointer>(&m_ref); }
/* Advancement */
inline iterator& operator++()
{
_incr();
return *this;
}
inline iterator operator++(int)
{
iterator ret(*this);
_incr();
return ret;
}
inline iterator& operator--()
{
_decr();
return *this;
}
inline iterator operator--(int)
{
iterator ret(*this);
_decr();
return ret;
}
/* Random access */
inline difference_type operator-(const MerkleNodeIterator& other) const
{
/*
* The base class implements this correctly, but since we
* define another overload of operator-() below, we need to
* explicitly implement this variant too.
*/
return MerkleNodeIteratorBase::operator-(other);
}
inline iterator& operator+=(difference_type n)
{
_seek(n);
return *this;
}
inline iterator& operator-=(difference_type n)
{
_seek(-n);
return *this;
}
inline iterator operator+(difference_type n) const
{
iterator ret(*this);
ret._seek(n);
return ret;
}
inline iterator operator-(difference_type n) const
{
iterator ret(*this);
ret._seek(-n);
return ret;
}
inline reference operator[](difference_type n) const
{ return *(*this + n); }
/* std::vector<Node> specialization uses C(base,offset) */
template<class T, class Alloc> friend class std::vector;
};
inline MerkleNodeIterator operator+(MerkleNodeIterator::difference_type n, const MerkleNodeIterator& x)
{ return (x + n); }
struct MerkleNodeConstIterator : public MerkleNodeIteratorBase
{
typedef MerkleNodeConstIterator iterator;
typedef const MerkleNodeReference reference;
typedef const MerkleNodeReference* pointer;
/* Creates an unusable iterator with a sentinal value. */
MerkleNodeConstIterator() : MerkleNodeIteratorBase(nullptr, 0) { }
/* Pass-through constructor of the m_ref field. */
MerkleNodeConstIterator(const MerkleNodeIterator& other) : MerkleNodeIteratorBase(static_cast<const MerkleNodeIteratorBase&>(other)) { }
/* Default copy/move constructors and assignment operators are fine. */
MerkleNodeConstIterator(const MerkleNodeConstIterator& other) = default;
MerkleNodeConstIterator(MerkleNodeConstIterator&& other) = default;
MerkleNodeConstIterator& operator=(const MerkleNodeConstIterator& other) = default;
MerkleNodeConstIterator& operator=(MerkleNodeConstIterator&& other) = default;
protected:
/*
* const_cast is required (and allowed) because the const
* qualifier is only dropped to copy its value into m_ref. No API
* is provided to actually manipulate the underlying value of the
* reference by this class.
*/
MerkleNodeConstIterator(const MerkleNodeReference::base_type* base, MerkleNodeReference::offset_type offset) : MerkleNodeIteratorBase(const_cast<MerkleNodeReference::base_type*>(base), offset) { }
public:
/* Dereference */
inline reference operator*() const
{ return m_ref; }
inline pointer operator->() const
{ return &m_ref; }
/* Advancement */
inline iterator& operator++()
{
_incr();
return *this;
}
inline iterator operator++(int)
{
iterator tmp = *this;
_incr();
return tmp;
}
inline iterator& operator--()
{
_decr();
return *this;
}
inline iterator operator--(int)
{
iterator tmp = *this;
_decr();
return tmp;
}
/* Random access */
inline difference_type operator-(const MerkleNodeConstIterator& other) const
{
/*
* The base class implements this correctly, but since we define
* another version of operator-() below, we need to explicitly
* implement this variant too.
*/
return MerkleNodeIteratorBase::operator-(other);
}
inline iterator& operator+=(difference_type n)
{
_seek(n);
return *this;
}
inline iterator& operator-=(difference_type n)
{
*this += -n;
return *this;
}
inline iterator operator+(difference_type n) const
{
iterator tmp = *this;
return tmp += n;
}
inline iterator operator-(difference_type n) const
{
iterator tmp = *this;
return tmp -= n;
}
inline reference operator[](difference_type n) const
{ return *(*this + n); }
/* std::vector<MerkleNode> uses C(base,offset) */
template<class T, class Alloc> friend class std::vector;
};
inline MerkleNodeConstIterator operator+(MerkleNodeConstIterator::difference_type n, const MerkleNodeConstIterator& x)
{ return x + n; }
/*
* Now we are ready to define the specialization of std::vector for
* packed 3-bit MerkleNode values. We use a std::vector<unsigned char>
* as the underlying container to hold the encoded bytes, with 3
* packed MerkleNodes per byte. We then provide a std::vector
* compatible API to return iterators over MerkleNodeReference objects
* inside this packed array.
*/
namespace std {
template<class Allocator>
class vector<MerkleNode, Allocator>
{
public:
typedef MerkleNode value_type;
typedef MerkleNodeReference::base_type base_type;
typedef typename Allocator::template rebind<value_type>::other allocator_type;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef MerkleNodeReference reference;
typedef const MerkleNodeReference const_reference;
typedef MerkleNodeReference* pointer;
typedef const MerkleNodeReference* const_pointer;
typedef MerkleNodeIterator iterator;
typedef MerkleNodeConstIterator const_iterator;
typedef std::reverse_iterator<iterator> reverse_iterator;
typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
protected:
/*
* A std::vector<unsignd char> is what is actually used to store
* the packed Node representation.
*/
typedef typename Allocator::template rebind<base_type>::other base_allocator_type;
typedef std::vector<base_type, base_allocator_type> vch_type;
/*
* m_vch.size() is (3 * m_count) / 8, but because of the truncation
* we can't know from m_vch.size() alone how many nodes are in the
* tree structure, so the count needs to be stored explicitly.
*/
vch_type m_vch;
size_type m_count;
/* Returns the required size of m_vch to contain count packed Nodes. */
static inline const typename vch_type::size_type _vch_size(size_type count)
{ return (3 * count + 7) / 8; }
public:
explicit vector(const Allocator& alloc = Allocator()) : m_vch(static_cast<base_allocator_type>(alloc)), m_count(0) { }
/*
* Yes, this doesn't allow specifying the allocator. That is a bug
* in C++11, fixed in C++14. However we aim for exact
* compatibility with the version of C++ used by Bitcoin Core,
* which is still pegged to C++11.
*
* Note: Because m_vch is a vector of a primitive type, its values
* are value initialized to zero according to the C++11
* standard. We don't need to do anything. Note, however,
* that in prior versions of the standard the behavior was
* different and this implementation will not work with
* pre-C++11 compilers.
*/
explicit vector(size_type count) : m_vch(_vch_size(count)), m_count(count) { }
vector(size_type count, value_type value, const Allocator& alloc = Allocator()) : m_vch(_vch_size(count), 0, static_cast<base_allocator_type>(alloc)), m_count(count)
{
MerkleNode::code_type code = value.GetCode();
if (code) // Already filled with zeros
_fill(0, count, code);
}
/* Assign constructors */
template<class InputIt>
vector(InputIt first, InputIt last, const Allocator& alloc = Allocator()) : m_vch(static_cast<base_allocator_type>(alloc)), m_count(0)
{ insert(begin(), first, last); }
vector(std::initializer_list<value_type> ilist, const Allocator& alloc = Allocator()) : m_vch(static_cast<base_allocator_type>(alloc)), m_count(0)
{ assign(ilist); }
/* Copy constructors */
vector(const vector& other) = default;
vector(const vector& other, const Allocator& alloc) : m_vch(other.m_vch, static_cast<base_allocator_type>(alloc)), m_count(other.m_count) { }
vector(vector&& other) = default;
vector(vector&& other, const Allocator& alloc) : m_vch(other.m_vch, static_cast<base_allocator_type>(alloc)), m_count(other.m_count) { }
/* Assignment operators */
vector& operator=(const vector& other) = default;
vector& operator=(vector&& other) = default;
inline vector& operator=(std::initializer_list<value_type> ilist)
{
assign(ilist);
return *this;
}
/* Clear & assign methods */
void assign(size_type count, value_type value)
{
clear();
insert(begin(), count, value);
}
template<class InputIt>
void assign(InputIt first, InputIt last)
{
clear();
insert(begin(), first, last);
}
void assign(std::initializer_list<value_type> ilist)
{
clear();
reserve(ilist.size());
for (auto node : ilist)
push_back(node);
}
allocator_type get_allocator() const
{ return allocator_type(m_vch.get_allocator()); }
/* Item access */
reference at(size_type pos)
{
if (!(pos < size()))
throw std::out_of_range("vector<Node>::at out of range");
return (*this)[pos];
}
const_reference at(size_type pos) const
{
if (!(pos < size()))
throw std::out_of_range("vector<Node>::at out of range");
return (*this)[pos];
}
inline reference operator[](size_type pos)
{ return reference(&m_vch[0] + (3 * (pos / 8)), pos % 8); }
inline const_reference operator[](size_type pos) const
{ return const_reference(const_cast<const_reference::base_type*>(&m_vch[0] + (3 * (pos / 8))), pos % 8); }
inline reference front()
{ return (*this)[0]; }
inline const_reference front() const
{ return (*this)[0]; }
inline reference back()
{ return (*this)[m_count-1]; }
inline const_reference back() const
{ return (*this)[m_count-1]; }
inline base_type* data()
{ return &m_vch[0]; }
inline const base_type* data() const
{ return &m_vch[0]; }
/* Iterators */
inline iterator begin() noexcept
{ return iterator(&m_vch[0], 0); }
inline const_iterator begin() const noexcept
{ return const_iterator(&m_vch[0], 0); }
inline const_iterator cbegin() const noexcept
{ return begin(); }
inline iterator end() noexcept
{ return iterator(&m_vch[0] + (3 * (m_count / 8)), m_count % 8); }
inline const_iterator end() const noexcept
{ return const_iterator(&m_vch[0] + (3 * (m_count / 8)), m_count % 8); }
inline const_iterator cend() const noexcept
{ return end(); }
inline reverse_iterator rbegin() noexcept
{ return reverse_iterator(end()); }
inline const_reverse_iterator rbegin() const noexcept
{ return const_reverse_iterator(end()); }
inline const_reverse_iterator crbegin() const noexcept
{ return rbegin(); }
inline reverse_iterator rend() noexcept
{ return reverse_iterator(begin()); }
inline const_reverse_iterator rend() const noexcept
{ return const_reverse_iterator(begin()); }
inline const_reverse_iterator crend() const noexcept
{ return rend(); }
/* Size and capacity */
inline bool empty() const noexcept
{ return !m_count; }
inline size_type size() const noexcept
{ return m_count; }
inline size_type max_size() const noexcept
{
/* We must be careful in what we return due to overflow. */
return std::max(m_vch.max_size(), 8 * m_vch.max_size() / 3);
}
inline void reserve(size_type new_cap)
{ m_vch.reserve(_vch_size(new_cap)); }
inline size_type capacity() const noexcept
{
/* Again, careful of overflow, although it is more of a
* theoretical concern here since such limitations would only
* be encountered if the vector consumed more than 1/8th of
* the addressable memory range. */
return std::max(m_count, 8 * m_vch.capacity() / 3);
}
inline void resize(size_type count)
{ resize(count, value_type()); }
void resize(size_type count, value_type value)
{
auto old_count = m_count;
_resize(count);
if (old_count < count)
_fill(old_count, count, value.GetCode());
}
inline void shrink_to_fit()
{ m_vch.shrink_to_fit(); }
protected:
/*
* Resizes the underlying vector to support the number of packed
* Nodes specified. Does NOT initialize any newly allocated bytes,
* except for the unused bits in the last byte when shrinking or
* the last new byte added, to prevent acquisition of dirty
* status. It is the responsibility of the caller to initialize
* any added new MerkleNodes.
*/
void _resize(size_type count)
{
if (count < m_count) {
/* Clear bits of elements being removed in the new last
* byte, for the purpose of not introducing dirty
* status. */
_fill(count, std::min((count + 7) & ~7, m_count), 0);
}
size_type new_vch_size = _vch_size(count);
m_vch.resize(new_vch_size);
if (m_count < count) {
/* Clear the last byte, if a byte is being added, so that
* none of the extra bits introduce dirty status. */
if (new_vch_size > _vch_size(m_count)) {
m_vch.back() = 0;
}
}
m_count = count;
}
/*
* A memmove()-like behavior over the packed elements of this
* container. The source and the destination are allowed to
* overlap. Any non-overlap in the source is left with its prior
* value intact.
*/
void _move(size_type first, size_type last, size_type dest)
{
/* TODO: This could be made much faster by copying chunks at a
* time. This far less efficient approach is taken
* because it is more obviously correct and _move is not
* in the pipeline critical to validation performance. */
if (dest < first) {
std::copy(begin()+first, begin()+last, begin()+dest);
}
if (first < dest) {
dest += last - first;
std::copy_backward(begin()+first, begin()+last, begin()+dest);
}
}
/*
* A std::fill()-like behavior over a range of the packed elements
* of this container.
*/
void _fill(size_type first, size_type last, MerkleNode::code_type value)
{
/* TODO: This could be made much faster for long ranges by
* precomputing the 3-byte repeating sequence and using
* that for long runs. However this method mostly exists
* for API compatability with std::vector, and is not
* used by Merkle tree manipulation code, which at best