Skip to content
This repository has been archived by the owner on Oct 11, 2024. It is now read-only.

Re-implement dynamic max expiration time #832

Merged
merged 11 commits into from
Jun 15, 2020
Prev Previous commit
Next Next commit
Implement removing orders with longest expiration time in Dexie
  • Loading branch information
albrow committed Jun 15, 2020
commit bc5556a2115bc95660115b42c22df6bb22a25eb8
94 changes: 39 additions & 55 deletions db/sql_implementation.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
"database/sql"
"errors"
"fmt"
"math"
"math/big"
"os"
"path/filepath"
Expand All @@ -21,6 +22,9 @@ import (
_ "github.com/mattn/go-sqlite3"
)

// largeLimit is used as a workaround due to the fact that SQL does not allow limit without offset.
const largeLimit = math.MaxInt64

var _ Database = (*DB)(nil)

// DB instantiates the DB connection and creates all the collections used by the application
Expand Down Expand Up @@ -259,7 +263,7 @@ func (db *DB) AddOrders(orders []*types.OrderWithMetadata) (added []*types.Order
err = convertErr(err)
}()

var ordersToRemove []*sqltypes.Order
addedMap := map[common.Hash]*types.OrderWithMetadata{}
err = db.sqldb.TransactionalContext(db.ctx, nil, func(txn *sqlz.Tx) error {
for _, order := range orders {
result, err := txn.NamedExecContext(db.ctx, insertOrderQuery, sqltypes.OrderFromCommonType(order))
Expand All @@ -271,14 +275,18 @@ func (db *DB) AddOrders(orders []*types.OrderWithMetadata) (added []*types.Order
return err
}
if affected > 0 {
added = append(added, order)
addedMap[order.Hash] = order
}
}

// Remove orders with an expiration time too far in the future.
// HACK(albrow): sqlz doesn't support ORDER BY, LIMIT, and OFFSET
// for DELETE statements. It also doesn't support RETURNING. As a
// workaround, we do a SELECT and DELETE inside a transaction.
removeQuery := txn.Select("*").From("orders").OrderBy(sqlz.Asc(string(OFExpirationTimeSeconds))).Offset(int64(db.opts.MaxOrders)).Limit(999999999999999999)
// HACK(albrow): SQL doesn't support limit without offset. As a
// workaround, we set the limit to an extremely large number.
removeQuery := txn.Select("*").From("orders").OrderBy(sqlz.Asc(string(OFExpirationTimeSeconds))).Limit(largeLimit).Offset(int64(db.opts.MaxOrders))
var ordersToRemove []*sqltypes.Order
err = removeQuery.GetAllContext(db.ctx, &ordersToRemove)
if err != nil {
return err
Expand All @@ -288,38 +296,24 @@ func (db *DB) AddOrders(orders []*types.OrderWithMetadata) (added []*types.Order
if err != nil {
return err
}
if _, found := addedMap[order.Hash]; found {
// If the order was previously added, remove it from
// the added set and don't add it to the removed set.
delete(addedMap, order.Hash)
} else {
removed = append(removed, sqltypes.OrderToCommonType(order))
}
}
return nil
})
if err != nil {
return nil, nil, err
}

// Because of how the above code is written, a single order could exist
// in both added and removed sets. We should remove such orders from both
// sets in this case.
addedMap := map[common.Hash]*types.OrderWithMetadata{}
removedMap := map[common.Hash]*sqltypes.Order{}
for _, a := range added {
addedMap[a.Hash] = a
}
for _, r := range ordersToRemove {
removedMap[r.Hash] = r
}
dedupedAdded := []*types.OrderWithMetadata{}
dedupedRemoved := []*sqltypes.Order{}
for _, a := range added {
if _, wasRemoved := removedMap[a.Hash]; !wasRemoved {
dedupedAdded = append(dedupedAdded, a)
}
}
for _, r := range ordersToRemove {
if _, wasAdded := addedMap[r.Hash]; !wasAdded {
dedupedRemoved = append(dedupedRemoved, r)
}
for _, order := range addedMap {
added = append(added, order)
}

return dedupedAdded, sqltypes.OrdersToCommonType(dedupedRemoved), nil
return added, removed, nil
}

func (db *DB) GetOrder(hash common.Hash) (order *types.OrderWithMetadata, err error) {
Expand Down Expand Up @@ -520,7 +514,8 @@ func (db *DB) AddMiniHeaders(miniHeaders []*types.MiniHeader) (added []*types.Mi
defer func() {
err = convertErr(err)
}()
var miniHeadersToRemove []*sqltypes.MiniHeader

addedMap := map[common.Hash]*types.MiniHeader{}
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I found a more efficient way to remove duplicates from the added and removed sets. Applied the same approach to both AddMiniHeaders and AddOrders for both the SQL and Dexie implementations.

err = db.sqldb.TransactionalContext(db.ctx, nil, func(txn *sqlz.Tx) error {
for _, miniHeader := range miniHeaders {
result, err := txn.NamedExecContext(db.ctx, insertMiniHeaderQuery, sqltypes.MiniHeaderFromCommonType(miniHeader))
Expand All @@ -532,54 +527,43 @@ func (db *DB) AddMiniHeaders(miniHeaders []*types.MiniHeader) (added []*types.Mi
return err
}
if affected > 0 {
added = append(added, miniHeader)
addedMap[miniHeader.Hash] = miniHeader
}
}

// HACK(albrow): sqlz doesn't support ORDER BY, LIMIT, and OFFSET
// for DELETE statements. It also doesn't support RETURNING. As a
// workaround, we do a SELECT and DELETE inside a transaction.
trimQuery := txn.Select("*").From("miniHeaders").OrderBy(sqlz.Desc(string(MFNumber))).Limit(99999999999).Offset(int64(db.opts.MaxMiniHeaders))
if err := trimQuery.GetAllContext(db.ctx, &miniHeadersToRemove); err != nil {
// HACK(albrow): SQL doesn't support limit without offset. As a
// workaround, we set the limit to an extremely large number.
removeQuery := txn.Select("*").From("miniHeaders").OrderBy(sqlz.Desc(string(MFNumber))).Limit(largeLimit).Offset(int64(db.opts.MaxMiniHeaders))
var miniHeadersToRemove []*sqltypes.MiniHeader
if err := removeQuery.GetAllContext(db.ctx, &miniHeadersToRemove); err != nil {
return err
}
for _, miniHeader := range miniHeadersToRemove {
_, err := txn.DeleteFrom("miniHeaders").Where(sqlz.Eq(string(MFHash), miniHeader.Hash)).ExecContext(db.ctx)
if err != nil {
return err
}
if _, found := addedMap[miniHeader.Hash]; found {
// If the miniHeader was previously added, remove it from
// the added set and don't add it to the removed set.
delete(addedMap, miniHeader.Hash)
} else {
removed = append(removed, sqltypes.MiniHeaderToCommonType(miniHeader))
}
}
return nil
})
if err != nil {
return nil, nil, err
}

// Because of how the above code is written, a single miniHeader could exist
// in both added and removed sets. We should remove such miniHeaders from both
// sets in this case.
addedMap := map[common.Hash]*types.MiniHeader{}
removedMap := map[common.Hash]*sqltypes.MiniHeader{}
for _, a := range added {
addedMap[a.Hash] = a
}
for _, r := range miniHeadersToRemove {
removedMap[r.Hash] = r
}
dedupedAdded := []*types.MiniHeader{}
dedupedRemoved := []*sqltypes.MiniHeader{}
for _, a := range added {
if _, wasRemoved := removedMap[a.Hash]; !wasRemoved {
dedupedAdded = append(dedupedAdded, a)
}
}
for _, r := range miniHeadersToRemove {
if _, wasAdded := addedMap[r.Hash]; !wasAdded {
dedupedRemoved = append(dedupedRemoved, r)
}
for _, miniHeader := range addedMap {
added = append(added, miniHeader)
}

return dedupedAdded, sqltypes.MiniHeadersToCommonType(dedupedRemoved), nil
return added, removed, nil
}

func (db *DB) GetMiniHeader(hash common.Hash) (miniHeader *types.MiniHeader, err error) {
Expand Down
62 changes: 44 additions & 18 deletions packages/browser-lite/src/database.ts
Original file line number Diff line number Diff line change
Expand Up @@ -139,15 +139,15 @@ export function createDatabase(opts: Options): Database {

export class Database {
private readonly _db: Dexie;
// private readonly _maxOrders: number;
private readonly _maxOrders: number;
private readonly _maxMiniHeaders: number;
private readonly _orders: Dexie.Table<Order, string>;
private readonly _miniHeaders: Dexie.Table<MiniHeader, string>;
private readonly _metadata: Dexie.Table<Metadata, number>;

constructor(opts: Options) {
this._db = new Dexie(opts.dataSourceName);
// this._maxOrders = opts.maxOrders;
this._maxOrders = opts.maxOrders;
this._maxMiniHeaders = opts.maxMiniHeaders;

this._db.version(1).stores({
Expand All @@ -168,9 +168,9 @@ export class Database {

// AddOrders(orders []*types.OrderWithMetadata) (added []*types.OrderWithMetadata, removed []*types.OrderWithMetadata, err error)
public async addOrdersAsync(orders: Order[]): Promise<AddOrdersResult> {
// TODO(albrow): Remove orders with max expiration time.
const added: Order[] = [];
await this._db.transaction('rw!', this._orders, async () => {
const addedMap = new Map<string, Order>();
const removed: Order[] = [];
await this._db.transaction('rw', this._orders, async () => {
for (const order of orders) {
try {
await this._orders.add(order);
Expand All @@ -182,12 +182,29 @@ export class Database {
}
throw e;
}
added.push(order);
addedMap.set(order.hash, order);
}

// Remove orders with an expiration time too far in the future.
const ordersToRemove = await this._orders
.orderBy('expirationTimeSeconds')
.offset(this._maxOrders)
.toArray();
for (const order of ordersToRemove) {
await this._orders.delete(order.hash);
if (addedMap.has(order.hash)) {
// If the order was previously added, remove it from
// the added set and don't add it to the removed set.
addedMap.delete(order.hash);
} else {
removed.push(order);
}
}
});

return {
added,
removed: [],
added: Array.from(addedMap.values()),
removed,
};
}

Expand Down Expand Up @@ -257,7 +274,7 @@ export class Database {

// AddMiniHeaders(miniHeaders []*types.MiniHeader) (added []*types.MiniHeader, removed []*types.MiniHeader, err error)
public async addMiniHeadersAsync(miniHeaders: MiniHeader[]): Promise<AddMiniHeadersResult> {
const added: MiniHeader[] = [];
const addedMap = new Map<string, MiniHeader>();
const removed: MiniHeader[] = [];
await this._db.transaction('rw!', this._miniHeaders, async () => {
for (const miniHeader of miniHeaders) {
Expand All @@ -271,20 +288,29 @@ export class Database {
}
throw e;
}
added.push(miniHeader);
const outdatedMiniHeaders = await this._miniHeaders
.orderBy('number')
.offset(this._maxMiniHeaders)
.reverse()
.toArray();
for (const outdated of outdatedMiniHeaders) {
await this._miniHeaders.delete(outdated.hash);
addedMap.set(miniHeader.hash, miniHeader);
}

// Remove any outdated miniHeaders.
const outdatedMiniHeaders = await this._miniHeaders
.orderBy('number')
.offset(this._maxMiniHeaders)
.reverse()
.toArray();
for (const outdated of outdatedMiniHeaders) {
await this._miniHeaders.delete(outdated.hash);
if (addedMap.has(outdated.hash)) {
// If the order was previously added, remove it from
// the added set and don't add it to the removed set.
addedMap.delete(outdated.hash);
} else {
removed.push(outdated);
}
}
});

return {
added,
added: Array.from(addedMap.values()),
removed,
};
}
Expand Down