|
| 1 | +// Copyright 2021 The go-ethereum Authors |
| 2 | +// This file is part of the go-ethereum library. |
| 3 | +// |
| 4 | +// The go-ethereum library is free software: you can redistribute it and/or modify |
| 5 | +// it under the terms of the GNU Lesser General Public License as published by |
| 6 | +// the Free Software Foundation, either version 3 of the License, or |
| 7 | +// (at your option) any later version. |
| 8 | +// |
| 9 | +// The go-ethereum library is distributed in the hope that it will be useful, |
| 10 | +// but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | +// GNU Lesser General Public License for more details. |
| 13 | +// |
| 14 | +// You should have received a copy of the GNU Lesser General Public License |
| 15 | +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. |
| 16 | + |
| 17 | +package rlpx |
| 18 | + |
| 19 | +import ( |
| 20 | + "io" |
| 21 | +) |
| 22 | + |
| 23 | +// readBuffer implements buffering for network reads. This type is similar to bufio.Reader, |
| 24 | +// with two crucial differences: the buffer slice is exposed, and the buffer keeps all |
| 25 | +// read data available until reset. |
| 26 | +// |
| 27 | +// How to use this type: |
| 28 | +// |
| 29 | +// Keep a readBuffer b alongside the underlying network connection. When reading a packet |
| 30 | +// from the connection, first call b.reset(). This empties b.data. Now perform reads |
| 31 | +// through b.read() until the end of the packet is reached. The complete packet data is |
| 32 | +// now available in b.data. |
| 33 | +type readBuffer struct { |
| 34 | + data []byte |
| 35 | + end int |
| 36 | +} |
| 37 | + |
| 38 | +// reset removes all processed data which was read since the last call to reset. |
| 39 | +// After reset, len(b.data) is zero. |
| 40 | +func (b *readBuffer) reset() { |
| 41 | + unprocessed := b.end - len(b.data) |
| 42 | + copy(b.data[:unprocessed], b.data[len(b.data):b.end]) |
| 43 | + b.end = unprocessed |
| 44 | + b.data = b.data[:0] |
| 45 | +} |
| 46 | + |
| 47 | +// read reads at least n bytes from r, returning the bytes. |
| 48 | +// The returned slice is valid until the next call to reset. |
| 49 | +func (b *readBuffer) read(r io.Reader, n int) ([]byte, error) { |
| 50 | + offset := len(b.data) |
| 51 | + have := b.end - len(b.data) |
| 52 | + |
| 53 | + // If n bytes are available in the buffer, there is no need to read from r at all. |
| 54 | + if have >= n { |
| 55 | + b.data = b.data[:offset+n] |
| 56 | + return b.data[offset : offset+n], nil |
| 57 | + } |
| 58 | + |
| 59 | + // Make buffer space available. |
| 60 | + need := n - have |
| 61 | + b.grow(need) |
| 62 | + |
| 63 | + // Read. |
| 64 | + rn, err := io.ReadAtLeast(r, b.data[b.end:cap(b.data)], need) |
| 65 | + if err != nil { |
| 66 | + return nil, err |
| 67 | + } |
| 68 | + b.end += rn |
| 69 | + b.data = b.data[:offset+n] |
| 70 | + return b.data[offset : offset+n], nil |
| 71 | +} |
| 72 | + |
| 73 | +// grow ensures the buffer has at least n bytes of unused space. |
| 74 | +func (b *readBuffer) grow(n int) { |
| 75 | + if cap(b.data)-b.end >= n { |
| 76 | + return |
| 77 | + } |
| 78 | + need := n - (cap(b.data) - b.end) |
| 79 | + offset := len(b.data) |
| 80 | + b.data = append(b.data[:cap(b.data)], make([]byte, need)...) |
| 81 | + b.data = b.data[:offset] |
| 82 | +} |
| 83 | + |
| 84 | +// writeBuffer implements buffering for network writes. This is essentially |
| 85 | +// a convenience wrapper around a byte slice. |
| 86 | +type writeBuffer struct { |
| 87 | + data []byte |
| 88 | +} |
| 89 | + |
| 90 | +func (b *writeBuffer) reset() { |
| 91 | + b.data = b.data[:0] |
| 92 | +} |
| 93 | + |
| 94 | +func (b *writeBuffer) appendZero(n int) []byte { |
| 95 | + offset := len(b.data) |
| 96 | + b.data = append(b.data, make([]byte, n)...) |
| 97 | + return b.data[offset : offset+n] |
| 98 | +} |
| 99 | + |
| 100 | +func (b *writeBuffer) Write(data []byte) (int, error) { |
| 101 | + b.data = append(b.data, data...) |
| 102 | + return len(data), nil |
| 103 | +} |
| 104 | + |
| 105 | +const maxUint24 = int(^uint32(0) >> 8) |
| 106 | + |
| 107 | +func readUint24(b []byte) uint32 { |
| 108 | + return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16 |
| 109 | +} |
| 110 | + |
| 111 | +func putUint24(v uint32, b []byte) { |
| 112 | + b[0] = byte(v >> 16) |
| 113 | + b[1] = byte(v >> 8) |
| 114 | + b[2] = byte(v) |
| 115 | +} |
| 116 | + |
| 117 | +// growslice ensures b has the wanted length by either expanding it to its capacity |
| 118 | +// or allocating a new slice if b has insufficient capacity. |
| 119 | +func growslice(b []byte, wantLength int) []byte { |
| 120 | + if len(b) >= wantLength { |
| 121 | + return b |
| 122 | + } |
| 123 | + if cap(b) >= wantLength { |
| 124 | + return b[:cap(b)] |
| 125 | + } |
| 126 | + return make([]byte, wantLength) |
| 127 | +} |
0 commit comments