forked from qmonnet/rbpf
-
Notifications
You must be signed in to change notification settings - Fork 0
/
interpreter.rs
360 lines (337 loc) · 20.1 KB
/
interpreter.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
// SPDX-License-Identifier: (Apache-2.0 OR MIT)
// Derived from uBPF <https://github.com/iovisor/ubpf>
// Copyright 2015 Big Switch Networks, Inc
// (uBPF: VM architecture, parts of the interpreter, originally in C)
// Copyright 2016 6WIND S.A. <quentin.monnet@6wind.com>
// (Translation to Rust, MetaBuff/multiple classes addition, hashmaps for helpers)
use std::collections::HashMap;
use std::io::{Error, ErrorKind};
use ebpf;
fn check_mem(addr: u64, len: usize, access_type: &str, insn_ptr: usize,
mbuff: &[u8], mem: &[u8], stack: &[u8]) -> Result<(), Error> {
if let Some(addr_end) = addr.checked_add(len as u64) {
if mbuff.as_ptr() as u64 <= addr && addr_end <= mbuff.as_ptr() as u64 + mbuff.len() as u64 {
return Ok(())
}
if mem.as_ptr() as u64 <= addr && addr_end <= mem.as_ptr() as u64 + mem.len() as u64 {
return Ok(())
}
if stack.as_ptr() as u64 <= addr && addr_end <= stack.as_ptr() as u64 + stack.len() as u64 {
return Ok(())
}
}
Err(Error::new(ErrorKind::Other, format!(
"Error: out of bounds memory {} (insn #{:?}), addr {:#x}, size {:?}\nmbuff: {:#x}/{:#x}, mem: {:#x}/{:#x}, stack: {:#x}/{:#x}",
access_type, insn_ptr, addr, len,
mbuff.as_ptr() as u64, mbuff.len(),
mem.as_ptr() as u64, mem.len(),
stack.as_ptr() as u64, stack.len()
)))
}
#[allow(unknown_lints)]
#[allow(cyclomatic_complexity)]
pub fn execute_program(prog_: Option<&[u8]>, mem: &[u8], mbuff: &[u8], helpers: &HashMap<u32, ebpf::Helper>) -> Result<u64, Error> {
const U32MAX: u64 = u32::MAX as u64;
const SHIFT_MASK_64: u64 = 0x3f;
let prog = match prog_ {
Some(prog) => prog,
None => Err(Error::new(ErrorKind::Other,
"Error: No program set, call prog_set() to load one"))?,
};
let stack = vec![0u8;ebpf::STACK_SIZE];
// R1 points to beginning of memory area, R10 to stack
let mut reg: [u64;11] = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, stack.as_ptr() as u64 + stack.len() as u64
];
if !mbuff.is_empty() {
reg[1] = mbuff.as_ptr() as u64;
}
else if !mem.is_empty() {
reg[1] = mem.as_ptr() as u64;
}
let check_mem_load = | addr: u64, len: usize, insn_ptr: usize | {
check_mem(addr, len, "load", insn_ptr, mbuff, mem, &stack)
};
let check_mem_store = | addr: u64, len: usize, insn_ptr: usize | {
check_mem(addr, len, "store", insn_ptr, mbuff, mem, &stack)
};
// Loop on instructions
let mut insn_ptr:usize = 0;
while insn_ptr * ebpf::INSN_SIZE < prog.len() {
let insn = ebpf::get_insn(prog, insn_ptr);
insn_ptr += 1;
let _dst = insn.dst as usize;
let _src = insn.src as usize;
let mut do_jump = || {
insn_ptr = (insn_ptr as i16 + insn.off) as usize;
};
match insn.opc {
// BPF_LD class
// LD_ABS_* and LD_IND_* are supposed to load pointer to data from metadata buffer.
// Since this pointer is constant, and since we already know it (mem), do not
// bother re-fetching it, just use mem already.
ebpf::LD_ABS_B => reg[0] = unsafe {
let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u8;
check_mem_load(x as u64, 8, insn_ptr)?;
x.read_unaligned() as u64
},
ebpf::LD_ABS_H => reg[0] = unsafe {
let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u16;
check_mem_load(x as u64, 8, insn_ptr)?;
x.read_unaligned() as u64
},
ebpf::LD_ABS_W => reg[0] = unsafe {
let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u32;
check_mem_load(x as u64, 8, insn_ptr)?;
x.read_unaligned() as u64
},
ebpf::LD_ABS_DW => reg[0] = unsafe {
let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u64;
check_mem_load(x as u64, 8, insn_ptr)?;
x.read_unaligned()
},
ebpf::LD_IND_B => reg[0] = unsafe {
let x = (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u8;
check_mem_load(x as u64, 8, insn_ptr)?;
x.read_unaligned() as u64
},
ebpf::LD_IND_H => reg[0] = unsafe {
let x = (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u16;
check_mem_load(x as u64, 8, insn_ptr)?;
x.read_unaligned() as u64
},
ebpf::LD_IND_W => reg[0] = unsafe {
let x = (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u32;
check_mem_load(x as u64, 8, insn_ptr)?;
x.read_unaligned() as u64
},
ebpf::LD_IND_DW => reg[0] = unsafe {
let x = (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u64;
check_mem_load(x as u64, 8, insn_ptr)?;
x.read_unaligned()
},
ebpf::LD_DW_IMM => {
let next_insn = ebpf::get_insn(prog, insn_ptr);
insn_ptr += 1;
reg[_dst] = ((insn.imm as u32) as u64) + ((next_insn.imm as u64) << 32);
},
// BPF_LDX class
ebpf::LD_B_REG => reg[_dst] = unsafe {
#[allow(clippy::cast_ptr_alignment)]
let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u8;
check_mem_load(x as u64, 1, insn_ptr)?;
x.read_unaligned() as u64
},
ebpf::LD_H_REG => reg[_dst] = unsafe {
#[allow(clippy::cast_ptr_alignment)]
let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u16;
check_mem_load(x as u64, 2, insn_ptr)?;
x.read_unaligned() as u64
},
ebpf::LD_W_REG => reg[_dst] = unsafe {
#[allow(clippy::cast_ptr_alignment)]
let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u32;
check_mem_load(x as u64, 4, insn_ptr)?;
x.read_unaligned() as u64
},
ebpf::LD_DW_REG => reg[_dst] = unsafe {
#[allow(clippy::cast_ptr_alignment)]
let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u64;
check_mem_load(x as u64, 8, insn_ptr)?;
x.read_unaligned()
},
// BPF_ST class
ebpf::ST_B_IMM => unsafe {
let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u8;
check_mem_store(x as u64, 1, insn_ptr)?;
x.write_unaligned(insn.imm as u8);
},
ebpf::ST_H_IMM => unsafe {
#[allow(clippy::cast_ptr_alignment)]
let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u16;
check_mem_store(x as u64, 2, insn_ptr)?;
x.write_unaligned(insn.imm as u16);
},
ebpf::ST_W_IMM => unsafe {
#[allow(clippy::cast_ptr_alignment)]
let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u32;
check_mem_store(x as u64, 4, insn_ptr)?;
x.write_unaligned(insn.imm as u32);
},
ebpf::ST_DW_IMM => unsafe {
#[allow(clippy::cast_ptr_alignment)]
let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u64;
check_mem_store(x as u64, 8, insn_ptr)?;
x.write_unaligned(insn.imm as u64);
},
// BPF_STX class
ebpf::ST_B_REG => unsafe {
let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u8;
check_mem_store(x as u64, 1, insn_ptr)?;
x.write_unaligned(reg[_src] as u8);
},
ebpf::ST_H_REG => unsafe {
#[allow(clippy::cast_ptr_alignment)]
let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u16;
check_mem_store(x as u64, 2, insn_ptr)?;
x.write_unaligned(reg[_src] as u16);
},
ebpf::ST_W_REG => unsafe {
#[allow(clippy::cast_ptr_alignment)]
let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u32;
check_mem_store(x as u64, 4, insn_ptr)?;
x.write_unaligned(reg[_src] as u32);
},
ebpf::ST_DW_REG => unsafe {
#[allow(clippy::cast_ptr_alignment)]
let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u64;
check_mem_store(x as u64, 8, insn_ptr)?;
x.write_unaligned(reg[_src]);
},
ebpf::ST_W_XADD => unimplemented!(),
ebpf::ST_DW_XADD => unimplemented!(),
// BPF_ALU class
// TODO Check how overflow works in kernel. Should we &= U32MAX all src register value
// before we do the operation?
// Cf ((0x11 << 32) - (0x1 << 32)) as u32 VS ((0x11 << 32) as u32 - (0x1 << 32) as u32
ebpf::ADD32_IMM => reg[_dst] = (reg[_dst] as i32).wrapping_add(insn.imm) as u64, //((reg[_dst] & U32MAX) + insn.imm as u64) & U32MAX,
ebpf::ADD32_REG => reg[_dst] = (reg[_dst] as i32).wrapping_add(reg[_src] as i32) as u64, //((reg[_dst] & U32MAX) + (reg[_src] & U32MAX)) & U32MAX,
ebpf::SUB32_IMM => reg[_dst] = (reg[_dst] as i32).wrapping_sub(insn.imm) as u64,
ebpf::SUB32_REG => reg[_dst] = (reg[_dst] as i32).wrapping_sub(reg[_src] as i32) as u64,
ebpf::MUL32_IMM => reg[_dst] = (reg[_dst] as i32).wrapping_mul(insn.imm) as u64,
ebpf::MUL32_REG => reg[_dst] = (reg[_dst] as i32).wrapping_mul(reg[_src] as i32) as u64,
ebpf::DIV32_IMM if insn.imm as u32 == 0 => reg[_dst] = 0,
ebpf::DIV32_IMM => reg[_dst] = (reg[_dst] as u32 / insn.imm as u32) as u64,
ebpf::DIV32_REG if reg[_src] as u32 == 0 => reg[_dst] = 0,
ebpf::DIV32_REG => reg[_dst] = (reg[_dst] as u32 / reg[_src] as u32) as u64,
ebpf::OR32_IMM => reg[_dst] = (reg[_dst] as u32 | insn.imm as u32) as u64,
ebpf::OR32_REG => reg[_dst] = (reg[_dst] as u32 | reg[_src] as u32) as u64,
ebpf::AND32_IMM => reg[_dst] = (reg[_dst] as u32 & insn.imm as u32) as u64,
ebpf::AND32_REG => reg[_dst] = (reg[_dst] as u32 & reg[_src] as u32) as u64,
// As for the 64-bit version, we should mask the number of bits to shift with
// 0x1f, but .wrappping_shr() already takes care of it for us.
ebpf::LSH32_IMM => reg[_dst] = (reg[_dst] as u32).wrapping_shl(insn.imm as u32) as u64,
ebpf::LSH32_REG => reg[_dst] = (reg[_dst] as u32).wrapping_shl(reg[_src] as u32) as u64,
ebpf::RSH32_IMM => reg[_dst] = (reg[_dst] as u32).wrapping_shr(insn.imm as u32) as u64,
ebpf::RSH32_REG => reg[_dst] = (reg[_dst] as u32).wrapping_shr(reg[_src] as u32) as u64,
ebpf::NEG32 => { reg[_dst] = (reg[_dst] as i32).wrapping_neg() as u64; reg[_dst] &= U32MAX; },
ebpf::MOD32_IMM if insn.imm as u32 == 0 => (),
ebpf::MOD32_IMM => reg[_dst] = (reg[_dst] as u32 % insn.imm as u32) as u64,
ebpf::MOD32_REG if reg[_src] as u32 == 0 => (),
ebpf::MOD32_REG => reg[_dst] = (reg[_dst] as u32 % reg[_src] as u32) as u64,
ebpf::XOR32_IMM => reg[_dst] = (reg[_dst] as u32 ^ insn.imm as u32) as u64,
ebpf::XOR32_REG => reg[_dst] = (reg[_dst] as u32 ^ reg[_src] as u32) as u64,
ebpf::MOV32_IMM => reg[_dst] = insn.imm as u32 as u64,
ebpf::MOV32_REG => reg[_dst] = (reg[_src] as u32) as u64,
// As for the 64-bit version, we should mask the number of bits to shift with
// 0x1f, but .wrappping_shr() already takes care of it for us.
ebpf::ARSH32_IMM => { reg[_dst] = (reg[_dst] as i32).wrapping_shr(insn.imm as u32) as u64; reg[_dst] &= U32MAX; },
ebpf::ARSH32_REG => { reg[_dst] = (reg[_dst] as i32).wrapping_shr(reg[_src] as u32) as u64; reg[_dst] &= U32MAX; },
ebpf::LE => {
reg[_dst] = match insn.imm {
16 => (reg[_dst] as u16).to_le() as u64,
32 => (reg[_dst] as u32).to_le() as u64,
64 => reg[_dst].to_le(),
_ => unreachable!(),
};
},
ebpf::BE => {
reg[_dst] = match insn.imm {
16 => (reg[_dst] as u16).to_be() as u64,
32 => (reg[_dst] as u32).to_be() as u64,
64 => reg[_dst].to_be(),
_ => unreachable!(),
};
},
// BPF_ALU64 class
ebpf::ADD64_IMM => reg[_dst] = reg[_dst].wrapping_add(insn.imm as u64),
ebpf::ADD64_REG => reg[_dst] = reg[_dst].wrapping_add(reg[_src]),
ebpf::SUB64_IMM => reg[_dst] = reg[_dst].wrapping_sub(insn.imm as u64),
ebpf::SUB64_REG => reg[_dst] = reg[_dst].wrapping_sub(reg[_src]),
ebpf::MUL64_IMM => reg[_dst] = reg[_dst].wrapping_mul(insn.imm as u64),
ebpf::MUL64_REG => reg[_dst] = reg[_dst].wrapping_mul(reg[_src]),
ebpf::DIV64_IMM if insn.imm == 0 => reg[_dst] = 0,
ebpf::DIV64_IMM => reg[_dst] /= insn.imm as u64,
ebpf::DIV64_REG if reg[_src] == 0 => reg[_dst] = 0,
ebpf::DIV64_REG => reg[_dst] /= reg[_src],
ebpf::OR64_IMM => reg[_dst] |= insn.imm as u64,
ebpf::OR64_REG => reg[_dst] |= reg[_src],
ebpf::AND64_IMM => reg[_dst] &= insn.imm as u64,
ebpf::AND64_REG => reg[_dst] &= reg[_src],
ebpf::LSH64_IMM => reg[_dst] <<= insn.imm as u64 & SHIFT_MASK_64,
ebpf::LSH64_REG => reg[_dst] <<= reg[_src] & SHIFT_MASK_64,
ebpf::RSH64_IMM => reg[_dst] >>= insn.imm as u64 & SHIFT_MASK_64,
ebpf::RSH64_REG => reg[_dst] >>= reg[_src] & SHIFT_MASK_64,
ebpf::NEG64 => reg[_dst] = -(reg[_dst] as i64) as u64,
ebpf::MOD64_IMM if insn.imm == 0 => (),
ebpf::MOD64_IMM => reg[_dst] %= insn.imm as u64,
ebpf::MOD64_REG if reg[_src] == 0 => (),
ebpf::MOD64_REG => reg[_dst] %= reg[_src],
ebpf::XOR64_IMM => reg[_dst] ^= insn.imm as u64,
ebpf::XOR64_REG => reg[_dst] ^= reg[_src],
ebpf::MOV64_IMM => reg[_dst] = insn.imm as u64,
ebpf::MOV64_REG => reg[_dst] = reg[_src],
ebpf::ARSH64_IMM => reg[_dst] = (reg[_dst] as i64 >> (insn.imm as u64 & SHIFT_MASK_64)) as u64,
ebpf::ARSH64_REG => reg[_dst] = (reg[_dst] as i64 >> (reg[_src] as u64 & SHIFT_MASK_64)) as u64,
// BPF_JMP class
// TODO: check this actually works as expected for signed / unsigned ops
ebpf::JA => do_jump(),
ebpf::JEQ_IMM => if reg[_dst] == insn.imm as u64 { do_jump(); },
ebpf::JEQ_REG => if reg[_dst] == reg[_src] { do_jump(); },
ebpf::JGT_IMM => if reg[_dst] > insn.imm as u64 { do_jump(); },
ebpf::JGT_REG => if reg[_dst] > reg[_src] { do_jump(); },
ebpf::JGE_IMM => if reg[_dst] >= insn.imm as u64 { do_jump(); },
ebpf::JGE_REG => if reg[_dst] >= reg[_src] { do_jump(); },
ebpf::JLT_IMM => if reg[_dst] < insn.imm as u64 { do_jump(); },
ebpf::JLT_REG => if reg[_dst] < reg[_src] { do_jump(); },
ebpf::JLE_IMM => if reg[_dst] <= insn.imm as u64 { do_jump(); },
ebpf::JLE_REG => if reg[_dst] <= reg[_src] { do_jump(); },
ebpf::JSET_IMM => if reg[_dst] & insn.imm as u64 != 0 { do_jump(); },
ebpf::JSET_REG => if reg[_dst] & reg[_src] != 0 { do_jump(); },
ebpf::JNE_IMM => if reg[_dst] != insn.imm as u64 { do_jump(); },
ebpf::JNE_REG => if reg[_dst] != reg[_src] { do_jump(); },
ebpf::JSGT_IMM => if reg[_dst] as i64 > insn.imm as i64 { do_jump(); },
ebpf::JSGT_REG => if reg[_dst] as i64 > reg[_src] as i64 { do_jump(); },
ebpf::JSGE_IMM => if reg[_dst] as i64 >= insn.imm as i64 { do_jump(); },
ebpf::JSGE_REG => if reg[_dst] as i64 >= reg[_src] as i64 { do_jump(); },
ebpf::JSLT_IMM => if (reg[_dst] as i64) < insn.imm as i64 { do_jump(); },
ebpf::JSLT_REG => if (reg[_dst] as i64) < reg[_src] as i64 { do_jump(); },
ebpf::JSLE_IMM => if reg[_dst] as i64 <= insn.imm as i64 { do_jump(); },
ebpf::JSLE_REG => if reg[_dst] as i64 <= reg[_src] as i64 { do_jump(); },
// BPF_JMP32 class
ebpf::JEQ_IMM32 => if reg[_dst] as u32 == insn.imm as u32 { do_jump(); },
ebpf::JEQ_REG32 => if reg[_dst] as u32 == reg[_src] as u32 { do_jump(); },
ebpf::JGT_IMM32 => if reg[_dst] as u32 > insn.imm as u32 { do_jump(); },
ebpf::JGT_REG32 => if reg[_dst] as u32 > reg[_src] as u32 { do_jump(); },
ebpf::JGE_IMM32 => if reg[_dst] as u32 >= insn.imm as u32 { do_jump(); },
ebpf::JGE_REG32 => if reg[_dst] as u32 >= reg[_src] as u32 { do_jump(); },
ebpf::JLT_IMM32 => if (reg[_dst] as u32) < insn.imm as u32 { do_jump(); },
ebpf::JLT_REG32 => if (reg[_dst] as u32) < reg[_src] as u32 { do_jump(); },
ebpf::JLE_IMM32 => if reg[_dst] as u32 <= insn.imm as u32 { do_jump(); },
ebpf::JLE_REG32 => if reg[_dst] as u32 <= reg[_src] as u32 { do_jump(); },
ebpf::JSET_IMM32 => if reg[_dst] as u32 & insn.imm as u32 != 0 { do_jump(); },
ebpf::JSET_REG32 => if reg[_dst] as u32 & reg[_src] as u32 != 0 { do_jump(); },
ebpf::JNE_IMM32 => if reg[_dst] as u32 != insn.imm as u32 { do_jump(); },
ebpf::JNE_REG32 => if reg[_dst] as u32 != reg[_src] as u32 { do_jump(); },
ebpf::JSGT_IMM32 => if reg[_dst] as i32 > insn.imm { do_jump(); },
ebpf::JSGT_REG32 => if reg[_dst] as i32 > reg[_src] as i32 { do_jump(); },
ebpf::JSGE_IMM32 => if reg[_dst] as i32 >= insn.imm { do_jump(); },
ebpf::JSGE_REG32 => if reg[_dst] as i32 >= reg[_src] as i32 { do_jump(); },
ebpf::JSLT_IMM32 => if (reg[_dst] as i32) < insn.imm { do_jump(); },
ebpf::JSLT_REG32 => if (reg[_dst] as i32) < reg[_src] as i32 { do_jump(); },
ebpf::JSLE_IMM32 => if reg[_dst] as i32 <= insn.imm { do_jump(); },
ebpf::JSLE_REG32 => if reg[_dst] as i32 <= reg[_src] as i32 { do_jump(); },
// Do not delegate the check to the verifier, since registered functions can be
// changed after the program has been verified.
ebpf::CALL => if let Some(function) = helpers.get(&(insn.imm as u32)) {
reg[0] = function(reg[1], reg[2], reg[3], reg[4], reg[5]);
} else {
Err(Error::new(ErrorKind::Other, format!("Error: unknown helper function (id: {:#x})", insn.imm as u32)))?;
},
ebpf::TAIL_CALL => unimplemented!(),
ebpf::EXIT => return Ok(reg[0]),
_ => unreachable!()
}
}
unreachable!()
}