Skip to content

Commit bf7d7ea

Browse files
committed
Only enable read ahead if prior chunk can be full covered
by the next read-ahead.
1 parent 3c1f00d commit bf7d7ea

File tree

2 files changed

+28
-19
lines changed

2 files changed

+28
-19
lines changed

src/osiris_log.erl

Lines changed: 16 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3354,15 +3354,17 @@ ra_read(ReadPos, Len, #ra{buf = {Pos, Data}})
33543354
ra_read(_Pos, _Len, _Ra) ->
33553355
undefined.
33563356

3357-
ra_update_size(undefined, _FilterSize, LastDataSize,
3357+
ra_update_size(undefined, FilterSize, LastDataSize,
33583358
#ra{on = true, size = Sz} = Ra)
33593359
when Sz < ?READ_AHEAD_LIMIT andalso
3360-
LastDataSize < ?READ_AHEAD_LIMIT ->
3360+
LastDataSize =< (?READ_AHEAD_LIMIT - ?HEADER_SIZE_B -
3361+
FilterSize - ?REC_HDR_SZ_SUBBATCH_B) ->
33613362
%% no filter and last data size was small so enable data read ahead
33623363
Ra#ra{size = ?READ_AHEAD_LIMIT};
3363-
ra_update_size(undefined, _FilterSize, LastDataSize,
3364+
ra_update_size(undefined, FilterSize, LastDataSize,
33643365
#ra{on = true, size = ?READ_AHEAD_LIMIT} = Ra)
3365-
when LastDataSize < ?READ_AHEAD_LIMIT ->
3366+
when LastDataSize =< (?READ_AHEAD_LIMIT - ?HEADER_SIZE_B -
3367+
FilterSize - ?REC_HDR_SZ_SUBBATCH_B) ->
33663368
Ra;
33673369
ra_update_size(_Filter, FilterSize, _LastDataSize, #ra{size = Sz} = Ra) ->
33683370
case FilterSize + ?HEADER_SIZE_B of
@@ -3444,6 +3446,16 @@ ra_update_size_test() ->
34443446

34453447
?assertMatch(#ra{size = DefSize},
34463448
ra_update_size("a filter", ?DEFAULT_FILTER_SIZE, 100, Ra0)),
3449+
3450+
%% we need to ensure that if we enable read ahead we can at least fulfil
3451+
%% the prior chunk including header filter and record length header
3452+
MaxEnablingDataSize = ?READ_AHEAD_LIMIT - ?HEADER_SIZE_B - ?DEFAULT_FILTER_SIZE - ?REC_HDR_SZ_SUBBATCH_B,
3453+
?assertMatch(#ra{size = DefSize},
3454+
ra_update_size(undefined, ?DEFAULT_FILTER_SIZE, MaxEnablingDataSize + 1 ,
3455+
Ra0)),
3456+
?assertMatch(#ra{size = ?READ_AHEAD_LIMIT},
3457+
ra_update_size(undefined, ?DEFAULT_FILTER_SIZE, MaxEnablingDataSize,
3458+
Ra0)),
34473459
ok.
34483460

34493461
part_test() ->

test/osiris_log_SUITE.erl

Lines changed: 12 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -2199,7 +2199,7 @@ read_ahead_send_file_filter(Config) ->
21992199
RAL = 4096, %% read ahead limit
22002200
HS = ?HEADER_SIZE_B,
22012201
%% we store the entry size on 4 bytes, so we must substract them from the data size
2202-
MaxEntrySize = RAL - 4 - HS,
2202+
MaxEntrySize = RAL - 11 - HS,
22032203
DFS = ?DEFAULT_FILTER_SIZE,
22042204
% FilterSizes = [DFS * 2],
22052205
FilterSizes = [DFS, DFS * 2],
@@ -2307,50 +2307,47 @@ read_ahead_send_file_filter(Config) ->
23072307
{_, W1} = write_committed(Entries, W0),
23082308
W1;
23092309
(read, #{r := R0, cs := CS, ss := SS, tracer := T}) ->
2310-
%% we still read ahead, so we read a small chunk at once
2310+
%% read ahead is disabled, but will be reenabled
23112311
Entries = [{<<"banana">>, <<"aaa">>}],
23122312
[_, _, D, _] = ra_fake_chunk(Entries),
23132313
{ok, R1} = osiris_log:send_file(CS, R0),
23142314
{ok, Read} = recv(SS, byte_size(D) + HS),
23152315
?assertEqual(D, binary:part(Read, HS, byte_size(Read) - HS)),
23162316
?assertEqual(1, osiris_tracer:call_count(T, file, pread)),
2317-
?assertEqual(0, osiris_tracer:call_count(T, file, sendfile)),
2317+
?assertEqual(1, osiris_tracer:call_count(T, file, sendfile)),
23182318
R1
23192319
end,
23202320
fun(write, #{w := W0}) ->
2321-
EData = binary:copy(<<"b">>, MES1 * 2),
2321+
EData = binary:copy(<<"b">>, MES1 div 2),
23222322
Entries = [{<<"banana">>, EData}],
23232323
{_, W1} = write_committed(Entries, W0),
23242324
W1;
23252325
(read, #{r := R0, cs := CS, ss := SS, tracer := T}) ->
2326-
%% large chunk, we had the header already,
2327-
%% but we must use sendfile for the data
2328-
EData = binary:copy(<<"b">>, MES1 * 2),
2326+
%% previous chunk enabled read ahead
2327+
EData = binary:copy(<<"b">>, MES1 div 2),
23292328
Entries = [{<<"banana">>, EData}],
23302329
[_, _, D, _] = ra_fake_chunk(Entries),
23312330
{ok, R1} = osiris_log:send_file(CS, R0),
23322331
{ok, Read} = recv(SS, byte_size(D) + HS),
23332332
?assertEqual(D, binary:part(Read, HS, byte_size(Read) - HS)),
2334-
?assertEqual(0, osiris_tracer:call_count(T, file, pread)),
2335-
?assertEqual(1, osiris_tracer:call_count(T, file, sendfile)),
2333+
?assertEqual(1, osiris_tracer:call_count(T, file, pread)),
2334+
?assertEqual(0, osiris_tracer:call_count(T, file, sendfile)),
23362335
R1
23372336
end,
23382337
fun(write, #{w := W0}) ->
2339-
EData = binary:copy(<<"b">>, MES1 div 2),
2338+
EData = binary:copy(<<"b">>, MES1 * 2),
23402339
Entries = [{<<"banana">>, EData}],
23412340
{_, W1} = write_committed(Entries, W0),
23422341
W1;
23432342
(read, #{r := R0, cs := CS, ss := SS, tracer := T}) ->
2344-
%% we don't read ahead anymore because the previous chunk was large
2345-
%% so we read the header and use sendfile,
2346-
%% even for a small chunk
2347-
EData = binary:copy(<<"b">>, MES1 div 2),
2343+
%% large chunk, we had the header already,
2344+
EData = binary:copy(<<"b">>, MES1 * 2),
23482345
Entries = [{<<"banana">>, EData}],
23492346
[_, _, D, _] = ra_fake_chunk(Entries),
23502347
{ok, R1} = osiris_log:send_file(CS, R0),
23512348
{ok, Read} = recv(SS, byte_size(D) + HS),
23522349
?assertEqual(D, binary:part(Read, HS, byte_size(Read) - HS)),
2353-
?assertEqual(1, osiris_tracer:call_count(T, file, pread)),
2350+
?assertEqual(0, osiris_tracer:call_count(T, file, pread)),
23542351
?assertEqual(1, osiris_tracer:call_count(T, file, sendfile)),
23552352
R1
23562353
end

0 commit comments

Comments
 (0)