Skip to content

Commit d38461e

Browse files
authored
fix: invalid post commit volumes format (#829)
1 parent c48d6fa commit d38461e

File tree

10 files changed

+71
-1
lines changed

10 files changed

+71
-1
lines changed

internal/storage/bucket/migrations/19-transactions-fill-pcv/up.sql

+1-1
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ do $$
88
drop table if exists moves_view;
99

1010
create temp table moves_view as
11-
select transactions_id::numeric, public.aggregate_objects(json_build_object(accounts_address, json_build_object(asset, post_commit_volumes))::jsonb) as volumes
11+
select transactions_id::numeric, public.aggregate_objects(json_build_object(accounts_address, json_build_object(asset, json_build_object('input', (post_commit_volumes).inputs, 'output', (post_commit_volumes).outputs)))::jsonb) as volumes
1212
from (
1313
SELECT DISTINCT ON (moves.transactions_id, accounts_address, asset) moves.transactions_id, accounts_address, asset,
1414
first_value(post_commit_volumes) OVER (
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
do $$
2+
declare
3+
expected varchar = '{"fees": {"USD": {"input": 1, "output": 0}}, "world": {"USD": {"input": 0, "output": 100}}, "orders:0": {"USD": {"input": 100, "output": 100}}, "sellers:0": {"USD": {"input": 99, "output": 0}}}';
4+
begin
5+
set search_path = '{{.Schema}}';
6+
assert (select post_commit_volumes::varchar from transactions where id = 0) = expected,
7+
'post_commit_volumes should be equals to ' || expected || ' but was ' || (select to_jsonb(post_commit_volumes) from transactions where id = 0);
8+
end;
9+
$$
10+
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
name: Fill invalid post_commit_volumes after upgrade existing txs to v2.2
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
do $$
2+
declare
3+
_offset integer := 0;
4+
_batch_size integer := 1000;
5+
begin
6+
set search_path = '{{ .Schema }}';
7+
8+
drop table if exists moves_view;
9+
10+
create temp table moves_view as
11+
select transactions_id::numeric, public.aggregate_objects(json_build_object(accounts_address, json_build_object(asset, json_build_object('input', (post_commit_volumes).inputs, 'output', (post_commit_volumes).outputs)))::jsonb) as volumes
12+
from (
13+
SELECT DISTINCT ON (moves.transactions_id, accounts_address, asset)
14+
moves.transactions_id,
15+
accounts_address,
16+
asset,
17+
first_value(post_commit_volumes) OVER (
18+
PARTITION BY moves.transactions_id, accounts_address, asset
19+
ORDER BY seq DESC
20+
) AS post_commit_volumes
21+
FROM moves
22+
where insertion_date < (
23+
select tstamp from goose_db_version where version_id = 12
24+
)
25+
) moves
26+
group by transactions_id;
27+
28+
create index moves_view_idx on moves_view(transactions_id);
29+
30+
if (select count(*) from moves_view) = 0 then
31+
return;
32+
end if;
33+
34+
perform pg_notify('migrations-{{ .Schema }}', 'init: ' || (select count(*) from moves_view));
35+
36+
loop
37+
with data as (
38+
select transactions_id, volumes
39+
from moves_view
40+
-- play better than offset/limit
41+
where transactions_id >= _offset and transactions_id < _offset + _batch_size
42+
)
43+
update transactions
44+
set post_commit_volumes = data.volumes
45+
from data
46+
where transactions.id = data.transactions_id;
47+
48+
exit when not found;
49+
50+
_offset = _offset + _batch_size;
51+
52+
perform pg_notify('migrations-{{ .Schema }}', 'continue: ' || _batch_size);
53+
54+
commit;
55+
end loop;
56+
57+
drop table if exists moves_view;
58+
end
59+
$$;

0 commit comments

Comments
 (0)