diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1f8011ad..82698b58 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -85,6 +85,10 @@ jobs: working-directory: ./riverdriver/riverpgxv5 run: go test -race ./... + - name: Test rivertype + working-directory: ./rivertype + run: go test -race ./... + cli: runs-on: ubuntu-latest timeout-minutes: 3 @@ -186,6 +190,12 @@ jobs: version: ${{ env.GOLANGCI_LINT_VERSION }} working-directory: ./riverdriver/riverpgxv5 + - name: Lint rivertype + uses: golangci/golangci-lint-action@v3 + with: + version: ${{ env.GOLANGCI_LINT_VERSION }} + working-directory: ./rivertype + producer_sample: runs-on: ubuntu-latest timeout-minutes: 2 @@ -250,7 +260,7 @@ jobs: - name: Setup sqlc uses: sqlc-dev/setup-sqlc@v4 with: - sqlc-version: "1.24.0" + sqlc-version: "1.25.0" - name: Run sqlc diff run: | diff --git a/Makefile b/Makefile index 4ce6e137..99b05391 100644 --- a/Makefile +++ b/Makefile @@ -4,7 +4,6 @@ generate: generate/sqlc .PHONY: generate/sqlc generate/sqlc: - cd internal/dbsqlc && sqlc generate cd riverdriver/riverdatabasesql/internal/dbsqlc && sqlc generate cd riverdriver/riverpgxv5/internal/dbsqlc && sqlc generate @@ -15,6 +14,7 @@ lint: cd riverdriver && golangci-lint run --fix cd riverdriver/riverdatabasesql && golangci-lint run --fix cd riverdriver/riverpgxv5 && golangci-lint run --fix + cd rivertype && golangci-lint run --fix .PHONY: test test: @@ -23,6 +23,7 @@ test: cd riverdriver && go test ./... cd riverdriver/riverdatabasesql && go test ./... cd riverdriver/riverpgxv5 && go test ./... + cd rivertype && go test ./... .PHONY: verify verify: @@ -30,6 +31,5 @@ verify: verify/sqlc .PHONY: verify/sqlc verify/sqlc: - cd internal/dbsqlc && sqlc diff cd riverdriver/riverdatabasesql/internal/dbsqlc && sqlc diff cd riverdriver/riverpgxv5/internal/dbsqlc && sqlc diff \ No newline at end of file diff --git a/client.go b/client.go index a3135d04..9211c1c9 100644 --- a/client.go +++ b/client.go @@ -16,8 +16,8 @@ import ( "github.com/riverqueue/river/internal/baseservice" "github.com/riverqueue/river/internal/componentstatus" - "github.com/riverqueue/river/internal/dbadapter" - "github.com/riverqueue/river/internal/dbsqlc" + "github.com/riverqueue/river/internal/dblist" + "github.com/riverqueue/river/internal/dbunique" "github.com/riverqueue/river/internal/jobcompleter" "github.com/riverqueue/river/internal/jobstats" "github.com/riverqueue/river/internal/leadership" @@ -262,8 +262,6 @@ type QueueConfig struct { // multiple instances operating on different databases or Postgres schemas // within a single database. type Client[TTx any] struct { - adapter dbadapter.Adapter - // BaseService can't be embedded like on other services because its // properties would leak to the external API. baseService baseservice.BaseService @@ -290,6 +288,7 @@ type Client[TTx any] struct { statsMu sync.Mutex statsNumJobs int testSignals clientTestSignals + uniqueInserter *dbunique.UniqueInserter wg sync.WaitGroup // workCancel cancels the context used for all work goroutines. Normal Stop @@ -332,7 +331,7 @@ var ( // ErrNotFound is returned when a query by ID does not match any existing // rows. For example, attempting to cancel a job that doesn't exist will // return this error. - ErrNotFound = errors.New("not found") + ErrNotFound = rivertype.ErrNotFound errMissingConfig = errors.New("missing config") errMissingDatabasePoolWithQueues = errors.New("must have a non-nil database pool to execute jobs (either use a driver with database pool or don't configure Queues)") @@ -437,17 +436,9 @@ func NewClient[TTx any](driver riverdriver.Driver[TTx], config *Config) (*Client TimeNowUTC: func() time.Time { return time.Now().UTC() }, } - adapter := dbadapter.NewStandardAdapter(archetype, &dbadapter.StandardAdapterConfig{ - AdvisoryLockPrefix: config.AdvisoryLockPrefix, - DeadlineTimeout: 5 * time.Second, // not exposed in client configuration for now, but we may want to do so - Executor: driver.GetDBPool(), - WorkerName: config.ID, - }) - - completer := jobcompleter.NewAsyncCompleter(archetype, adapter, 100) + completer := jobcompleter.NewAsyncCompleter(archetype, driver.GetExecutor(), 100) client := &Client[TTx]{ - adapter: adapter, completer: completer, config: config, driver: driver, @@ -456,6 +447,9 @@ func NewClient[TTx any](driver riverdriver.Driver[TTx], config *Config) (*Client stopComplete: make(chan struct{}), subscriptions: make(map[int]*eventSubscription), testSignals: clientTestSignals{}, + uniqueInserter: baseservice.Init(archetype, &dbunique.UniqueInserter{ + AdvisoryLockPrefix: config.AdvisoryLockPrefix, + }), } baseservice.Init(archetype, &client.baseService) @@ -465,7 +459,7 @@ func NewClient[TTx any](driver riverdriver.Driver[TTx], config *Config) (*Client // we're actually going to be working jobs (as opposed to just enqueueing // them): if config.willExecuteJobs() { - if driver.GetDBPool() == nil { + if !driver.HasPool() { return nil, errMissingDatabasePoolWithQueues } @@ -474,9 +468,10 @@ func NewClient[TTx any](driver riverdriver.Driver[TTx], config *Config) (*Client // we'll need to add a config for this. instanceName := "default" - client.notifier = notifier.New(archetype, driver.GetDBPool().Config().ConnConfig, client.monitor.SetNotifierStatus, logger) + client.notifier = notifier.New(archetype, driver.GetListener(), client.monitor.SetNotifierStatus, logger) + var err error - client.elector, err = leadership.NewElector(client.adapter, client.notifier, instanceName, client.ID(), 5*time.Second, logger) + client.elector, err = leadership.NewElector(driver.GetExecutor(), client.notifier, instanceName, client.ID(), 5*time.Second, logger) if err != nil { return nil, err } @@ -496,7 +491,7 @@ func NewClient[TTx any](driver riverdriver.Driver[TTx], config *Config) (*Client CancelledJobRetentionPeriod: config.CancelledJobRetentionPeriod, CompletedJobRetentionPeriod: config.CompletedJobRetentionPeriod, DiscardedJobRetentionPeriod: config.DiscardedJobRetentionPeriod, - }, driver.GetDBPool()) + }, driver.GetExecutor()) maintenanceServices = append(maintenanceServices, jobCleaner) client.testSignals.jobCleaner = &jobCleaner.TestSignals } @@ -513,7 +508,7 @@ func NewClient[TTx any](driver riverdriver.Driver[TTx], config *Config) (*Client } periodicJobs = append(periodicJobs, &maintenance.PeriodicJob{ - ConstructorFunc: func() (*dbadapter.JobInsertParams, error) { + ConstructorFunc: func() (*riverdriver.JobInsertParams, *dbunique.UniqueOpts, error) { return insertParamsFromArgsAndOptions(periodicJob.constructorFunc()) }, RunOnStart: opts.RunOnStart, @@ -522,8 +517,9 @@ func NewClient[TTx any](driver riverdriver.Driver[TTx], config *Config) (*Client } periodicJobEnqueuer := maintenance.NewPeriodicJobEnqueuer(archetype, &maintenance.PeriodicJobEnqueuerConfig{ - PeriodicJobs: periodicJobs, - }, adapter) + AdvisoryLockPrefix: config.AdvisoryLockPrefix, + PeriodicJobs: periodicJobs, + }, driver.GetExecutor()) maintenanceServices = append(maintenanceServices, periodicJobEnqueuer) client.testSignals.periodicJobEnqueuer = &periodicJobEnqueuer.TestSignals } @@ -534,7 +530,7 @@ func NewClient[TTx any](driver riverdriver.Driver[TTx], config *Config) (*Client scheduleFunc = config.ReindexerSchedule.Next } - reindexer := maintenance.NewReindexer(archetype, &maintenance.ReindexerConfig{ScheduleFunc: scheduleFunc}, driver.GetDBPool()) + reindexer := maintenance.NewReindexer(archetype, &maintenance.ReindexerConfig{ScheduleFunc: scheduleFunc}, driver.GetExecutor()) maintenanceServices = append(maintenanceServices, reindexer) client.testSignals.reindexer = &reindexer.TestSignals } @@ -549,7 +545,7 @@ func NewClient[TTx any](driver riverdriver.Driver[TTx], config *Config) (*Client } return nil }, - }, driver.GetDBPool()) + }, driver.GetExecutor()) maintenanceServices = append(maintenanceServices, rescuer) client.testSignals.rescuer = &rescuer.TestSignals } @@ -557,7 +553,7 @@ func NewClient[TTx any](driver riverdriver.Driver[TTx], config *Config) (*Client { scheduler := maintenance.NewScheduler(archetype, &maintenance.SchedulerConfig{ Interval: config.schedulerInterval, - }, driver.GetDBPool()) + }, driver.GetExecutor()) maintenanceServices = append(maintenanceServices, scheduler) client.testSignals.scheduler = &scheduler.TestSignals } @@ -603,7 +599,7 @@ func (c *Client[TTx]) Start(ctx context.Context) error { // available, the client appears to have started even though it's completely // non-functional. Here we try to make an initial assessment of health and // return quickly in case of an apparent problem. - _, err := c.driver.GetDBPool().Exec(ctx, "SELECT 1") + _, err := c.driver.GetExecutor().Exec(ctx, "SELECT 1") if err != nil { return fmt.Errorf("error making initial connection to database: %w", err) } @@ -858,7 +854,7 @@ func (c *Client[TTx]) distributeJobCompleterCallback(update jobcompleter.Complet c.statsNumJobs++ }() - c.distributeJob(dbsqlc.JobRowFromInternal(update.Job), jobStatisticsFromInternal(update.JobStats)) + c.distributeJob(update.Job, jobStatisticsFromInternal(update.JobStats)) } // Dump aggregate stats from job completions to logs periodically. These @@ -927,19 +923,19 @@ func (c *Client[TTx]) handleLeadershipChange(ctx context.Context, notification * func (c *Client[TTx]) provisionProducers() error { for queue, queueConfig := range c.config.Queues { config := &producerConfig{ + ClientID: c.config.ID, ErrorHandler: c.config.ErrorHandler, FetchCooldown: c.config.FetchCooldown, FetchPollInterval: c.config.FetchPollInterval, JobTimeout: c.config.JobTimeout, MaxWorkerCount: uint16(queueConfig.MaxWorkers), Notifier: c.notifier, - QueueName: queue, + Queue: queue, RetryPolicy: c.config.RetryPolicy, SchedulerInterval: c.config.schedulerInterval, - WorkerName: c.config.ID, Workers: c.config.Workers, } - producer, err := newProducer(&c.baseService.Archetype, c.adapter, c.completer, config) + producer, err := newProducer(&c.baseService.Archetype, c.driver.GetExecutor(), c.completer, config) if err != nil { return err } @@ -998,15 +994,7 @@ func (c *Client[TTx]) runProducers(fetchNewWorkCtx, workCtx context.Context) { // Returns the up-to-date JobRow for the specified jobID if it exists. Returns // ErrNotFound if the job doesn't exist. func (c *Client[TTx]) JobCancel(ctx context.Context, jobID int64) (*rivertype.JobRow, error) { - job, err := c.adapter.JobCancel(ctx, jobID) - if err != nil { - if errors.Is(err, riverdriver.ErrNoRows) { - return nil, ErrNotFound - } - return nil, err - } - - return dbsqlc.JobRowFromInternal(job), nil + return c.jobCancel(ctx, c.driver.GetExecutor(), jobID) } // JobCancelTx cancels the job with the given ID within the specified @@ -1051,44 +1039,28 @@ func (c *Client[TTx]) JobCancel(ctx context.Context, jobID int64) (*rivertype.Jo // Returns the up-to-date JobRow for the specified jobID if it exists. Returns // ErrNotFound if the job doesn't exist. func (c *Client[TTx]) JobCancelTx(ctx context.Context, tx TTx, jobID int64) (*rivertype.JobRow, error) { - job, err := c.adapter.JobCancelTx(ctx, c.driver.UnwrapTx(tx), jobID) - if errors.Is(err, riverdriver.ErrNoRows) { - return nil, ErrNotFound - } - if err != nil { - return nil, err - } + return c.jobCancel(ctx, c.driver.UnwrapExecutor(tx), jobID) +} - return dbsqlc.JobRowFromInternal(job), nil +func (c *Client[TTx]) jobCancel(ctx context.Context, exec riverdriver.Executor, jobID int64) (*rivertype.JobRow, error) { + return exec.JobCancel(ctx, &riverdriver.JobCancelParams{ + ID: jobID, + CancelAttemptedAt: c.baseService.TimeNowUTC(), + JobControlTopic: string(notifier.NotificationTopicJobControl), + }) } // JobGet fetches a single job by its ID. Returns the up-to-date JobRow for the // specified jobID if it exists. Returns ErrNotFound if the job doesn't exist. -func (c *Client[TTx]) JobGet(ctx context.Context, jobID int64) (*rivertype.JobRow, error) { - job, err := c.adapter.JobGet(ctx, jobID) - if err != nil { - if errors.Is(err, riverdriver.ErrNoRows) { - return nil, ErrNotFound - } - return nil, err - } - - return dbsqlc.JobRowFromInternal(job), nil +func (c *Client[TTx]) JobGet(ctx context.Context, id int64) (*rivertype.JobRow, error) { + return c.driver.GetExecutor().JobGetByID(ctx, id) } // JobGetTx fetches a single job by its ID, within a transaction. Returns the // up-to-date JobRow for the specified jobID if it exists. Returns ErrNotFound // if the job doesn't exist. -func (c *Client[TTx]) JobGetTx(ctx context.Context, tx TTx, jobID int64) (*rivertype.JobRow, error) { - job, err := c.adapter.JobGetTx(ctx, c.driver.UnwrapTx(tx), jobID) - if errors.Is(err, riverdriver.ErrNoRows) { - return nil, ErrNotFound - } - if err != nil { - return nil, err - } - - return dbsqlc.JobRowFromInternal(job), nil +func (c *Client[TTx]) JobGetTx(ctx context.Context, tx TTx, id int64) (*rivertype.JobRow, error) { + return c.driver.UnwrapExecutor(tx).JobGetByID(ctx, id) } // JobRetry updates the job with the given ID to make it immediately available @@ -1099,16 +1071,8 @@ func (c *Client[TTx]) JobGetTx(ctx context.Context, tx TTx, jobID int64) (*river // // MaxAttempts is also incremented by one if the job has already exhausted its // max attempts. -func (c *Client[TTx]) JobRetry(ctx context.Context, jobID int64) (*rivertype.JobRow, error) { - job, err := c.adapter.JobRetryImmediately(ctx, jobID) - if err != nil { - if errors.Is(err, riverdriver.ErrNoRows) { - return nil, ErrNotFound - } - return nil, err - } - - return dbsqlc.JobRowFromInternal(job), nil +func (c *Client[TTx]) JobRetry(ctx context.Context, id int64) (*rivertype.JobRow, error) { + return c.driver.GetExecutor().JobRetry(ctx, id) } // JobRetryTx updates the job with the given ID to make it immediately available @@ -1124,16 +1088,8 @@ func (c *Client[TTx]) JobRetry(ctx context.Context, jobID int64) (*rivertype.Job // // MaxAttempts is also incremented by one if the job has already exhausted its // max attempts. -func (c *Client[TTx]) JobRetryTx(ctx context.Context, tx TTx, jobID int64) (*rivertype.JobRow, error) { - job, err := c.adapter.JobRetryImmediatelyTx(ctx, c.driver.UnwrapTx(tx), jobID) - if errors.Is(err, riverdriver.ErrNoRows) { - return nil, ErrNotFound - } - if err != nil { - return nil, err - } - - return dbsqlc.JobRowFromInternal(job), nil +func (c *Client[TTx]) JobRetryTx(ctx context.Context, tx TTx, id int64) (*rivertype.JobRow, error) { + return c.driver.UnwrapExecutor(tx).JobRetry(ctx, id) } // ID returns the unique ID of this client as set in its config or @@ -1142,10 +1098,10 @@ func (c *Client[TTx]) ID() string { return c.config.ID } -func insertParamsFromArgsAndOptions(args JobArgs, insertOpts *InsertOpts) (*dbadapter.JobInsertParams, error) { +func insertParamsFromArgsAndOptions(args JobArgs, insertOpts *InsertOpts) (*riverdriver.JobInsertParams, *dbunique.UniqueOpts, error) { encodedArgs, err := json.Marshal(args) if err != nil { - return nil, fmt.Errorf("error marshaling args to JSON: %w", err) + return nil, nil, fmt.Errorf("error marshaling args to JSON: %w", err) } if insertOpts == nil { @@ -1162,16 +1118,19 @@ func insertParamsFromArgsAndOptions(args JobArgs, insertOpts *InsertOpts) (*dbad queue := valutil.FirstNonZero(insertOpts.Queue, jobInsertOpts.Queue, rivercommon.QueueDefault) if err := validateQueueName(queue); err != nil { - return nil, err + return nil, nil, err } tags := insertOpts.Tags if insertOpts.Tags == nil { tags = jobInsertOpts.Tags } + if tags == nil { + tags = []string{} + } if priority > 4 { - return nil, errors.New("priority must be between 1 and 4") + return nil, nil, errors.New("priority must be between 1 and 4") } uniqueOpts := insertOpts.UniqueOpts @@ -1179,7 +1138,7 @@ func insertParamsFromArgsAndOptions(args JobArgs, insertOpts *InsertOpts) (*dbad uniqueOpts = jobInsertOpts.UniqueOpts } if err := uniqueOpts.validate(); err != nil { - return nil, err + return nil, nil, err } metadata := insertOpts.Metadata @@ -1187,31 +1146,23 @@ func insertParamsFromArgsAndOptions(args JobArgs, insertOpts *InsertOpts) (*dbad metadata = []byte("{}") } - insertParams := &dbadapter.JobInsertParams{ + insertParams := &riverdriver.JobInsertParams{ EncodedArgs: encodedArgs, Kind: args.Kind(), MaxAttempts: maxAttempts, Metadata: metadata, Priority: priority, Queue: queue, - State: dbsqlc.JobState(JobStateAvailable), + State: rivertype.JobStateAvailable, Tags: tags, } - if !uniqueOpts.isEmpty() { - insertParams.Unique = true - insertParams.UniqueByArgs = uniqueOpts.ByArgs - insertParams.UniqueByQueue = uniqueOpts.ByQueue - insertParams.UniqueByPeriod = uniqueOpts.ByPeriod - insertParams.UniqueByState = sliceutil.Map(uniqueOpts.ByState, func(s rivertype.JobState) dbsqlc.JobState { return dbsqlc.JobState(s) }) - } - if !insertOpts.ScheduledAt.IsZero() { - insertParams.ScheduledAt = insertOpts.ScheduledAt - insertParams.State = dbsqlc.JobState(JobStateScheduled) + insertParams.ScheduledAt = &insertOpts.ScheduledAt + insertParams.State = rivertype.JobStateScheduled } - return insertParams, nil + return insertParams, (*dbunique.UniqueOpts)(&uniqueOpts), nil } var errNoDriverDBPool = errors.New("driver must have non-nil database pool to use non-transactional methods like Insert and InsertMany (try InsertTx or InsertManyTx instead") @@ -1227,25 +1178,11 @@ var errNoDriverDBPool = errors.New("driver must have non-nil database pool to us // // handle error // } func (c *Client[TTx]) Insert(ctx context.Context, args JobArgs, opts *InsertOpts) (*rivertype.JobRow, error) { - if c.driver.GetDBPool() == nil { + if !c.driver.HasPool() { return nil, errNoDriverDBPool } - if err := c.validateJobArgs(args); err != nil { - return nil, err - } - - insertParams, err := insertParamsFromArgsAndOptions(args, opts) - if err != nil { - return nil, err - } - - res, err := c.adapter.JobInsert(ctx, insertParams) - if err != nil { - return nil, err - } - - return dbsqlc.JobRowFromInternal(res.Job), nil + return c.insert(ctx, c.driver.GetExecutor(), args, opts) } // InsertTx inserts a new job with the provided args on the given transaction. @@ -1263,21 +1200,25 @@ func (c *Client[TTx]) Insert(ctx context.Context, args JobArgs, opts *InsertOpts // changes. An inserted job isn't visible to be worked until the transaction // commits, and if the transaction rolls back, so too is the inserted job. func (c *Client[TTx]) InsertTx(ctx context.Context, tx TTx, args JobArgs, opts *InsertOpts) (*rivertype.JobRow, error) { + return c.insert(ctx, c.driver.UnwrapExecutor(tx), args, opts) +} + +func (c *Client[TTx]) insert(ctx context.Context, exec riverdriver.Executor, args JobArgs, opts *InsertOpts) (*rivertype.JobRow, error) { if err := c.validateJobArgs(args); err != nil { return nil, err } - insertParams, err := insertParamsFromArgsAndOptions(args, opts) + params, uniqueOpts, err := insertParamsFromArgsAndOptions(args, opts) if err != nil { return nil, err } - res, err := c.adapter.JobInsertTx(ctx, c.driver.UnwrapTx(tx), insertParams) + jobInsertRes, err := c.uniqueInserter.JobInsert(ctx, exec, params, uniqueOpts) if err != nil { return nil, err } - return dbsqlc.JobRowFromInternal(res.Job), nil + return jobInsertRes.Job, nil } // InsertManyParams encapsulates a single job combined with insert options for @@ -1306,7 +1247,7 @@ type InsertManyParams struct { // // handle error // } func (c *Client[TTx]) InsertMany(ctx context.Context, params []InsertManyParams) (int64, error) { - if c.driver.GetDBPool() == nil { + if !c.driver.HasPool() { return 0, errNoDriverDBPool } @@ -1315,7 +1256,7 @@ func (c *Client[TTx]) InsertMany(ctx context.Context, params []InsertManyParams) return 0, err } - return c.adapter.JobInsertMany(ctx, insertParams) + return c.driver.GetExecutor().JobInsertMany(ctx, insertParams) } // InsertManyTx inserts many jobs at once using Postgres' `COPY FROM` mechanism, @@ -1343,17 +1284,17 @@ func (c *Client[TTx]) InsertManyTx(ctx context.Context, tx TTx, params []InsertM return 0, err } - return c.adapter.JobInsertManyTx(ctx, c.driver.UnwrapTx(tx), insertParams) + return c.driver.UnwrapExecutor(tx).JobInsertMany(ctx, insertParams) } // Validates input parameters for an a batch insert operation and generates a // set of batch insert parameters. -func (c *Client[TTx]) insertManyParams(params []InsertManyParams) ([]*dbadapter.JobInsertParams, error) { +func (c *Client[TTx]) insertManyParams(params []InsertManyParams) ([]*riverdriver.JobInsertParams, error) { if len(params) < 1 { return nil, errors.New("no jobs to insert") } - insertParams := make([]*dbadapter.JobInsertParams, len(params)) + insertParams := make([]*riverdriver.JobInsertParams, len(params)) for i, param := range params { if err := c.validateJobArgs(param.Args); err != nil { return nil, err @@ -1369,7 +1310,7 @@ func (c *Client[TTx]) insertManyParams(params []InsertManyParams) ([]*dbadapter. } var err error - insertParams[i], err = insertParamsFromArgsAndOptions(param.Args, param.InsertOpts) + insertParams[i], _, err = insertParamsFromArgsAndOptions(param.Args, param.InsertOpts) if err != nil { return nil, err } @@ -1419,7 +1360,7 @@ func validateQueueName(queueName string) error { // // handle error // } func (c *Client[TTx]) JobList(ctx context.Context, params *JobListParams) ([]*rivertype.JobRow, error) { - if c.driver.GetDBPool() == nil { + if !c.driver.HasPool() { return nil, errNoDriverDBPool } @@ -1431,11 +1372,7 @@ func (c *Client[TTx]) JobList(ctx context.Context, params *JobListParams) ([]*ri return nil, err } - internalJobs, err := c.adapter.JobList(ctx, *dbParams) - if err != nil { - return nil, err - } - return dbsqlc.JobRowsFromInternal(internalJobs), nil + return dblist.JobList(ctx, c.driver.GetExecutor(), dbParams) } // JobListTx returns a paginated list of jobs matching the provided filters. The @@ -1456,9 +1393,5 @@ func (c *Client[TTx]) JobListTx(ctx context.Context, tx TTx, params *JobListPara return nil, err } - internalJobs, err := c.adapter.JobListTx(ctx, c.driver.UnwrapTx(tx), *dbParams) - if err != nil { - return nil, err - } - return dbsqlc.JobRowsFromInternal(internalJobs), nil + return dblist.JobList(ctx, c.driver.UnwrapExecutor(tx), dbParams) } diff --git a/client_test.go b/client_test.go index 4bf2258a..f8f2e935 100644 --- a/client_test.go +++ b/client_test.go @@ -20,14 +20,14 @@ import ( "github.com/stretchr/testify/require" "github.com/riverqueue/river/internal/componentstatus" - "github.com/riverqueue/river/internal/dbadapter" - "github.com/riverqueue/river/internal/dbsqlc" "github.com/riverqueue/river/internal/maintenance" "github.com/riverqueue/river/internal/rivercommon" "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/internal/util/dbutil" "github.com/riverqueue/river/internal/util/ptrutil" "github.com/riverqueue/river/internal/util/sliceutil" "github.com/riverqueue/river/internal/util/valutil" + "github.com/riverqueue/river/riverdriver" "github.com/riverqueue/river/riverdriver/riverpgxv5" "github.com/riverqueue/river/rivertype" ) @@ -130,11 +130,9 @@ func newTestConfig(t *testing.T, callback callbackFunc) *Config { } } -func newTestClient(ctx context.Context, t *testing.T, config *Config) *Client[pgx.Tx] { +func newTestClient(t *testing.T, dbPool *pgxpool.Pool, config *Config) *Client[pgx.Tx] { t.Helper() - dbPool := riverinternaltest.TestDB(ctx, t) - client, err := NewClient(riverpgxv5.New(dbPool), config) require.NoError(t, err) @@ -159,7 +157,9 @@ func startClient(ctx context.Context, t *testing.T, client *Client[pgx.Tx]) { func runNewTestClient(ctx context.Context, t *testing.T, config *Config) *Client[pgx.Tx] { t.Helper() - client := newTestClient(ctx, t, config) + + dbPool := riverinternaltest.TestDB(ctx, t) + client := newTestClient(t, dbPool, config) startClient(ctx, t, client) return client } @@ -170,14 +170,16 @@ func Test_Client(t *testing.T) { ctx := context.Background() type testBundle struct { + dbPool *pgxpool.Pool subscribeChan <-chan *Event } setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { t.Helper() + dbPool := riverinternaltest.TestDB(ctx, t) config := newTestConfig(t, nil) - client := newTestClient(ctx, t, config) + client := newTestClient(t, dbPool, config) subscribeChan, _ := client.Subscribe( EventKindJobCancelled, @@ -187,6 +189,7 @@ func Test_Client(t *testing.T) { ) return client, &testBundle{ + dbPool: dbPool, subscribeChan: subscribeChan, } } @@ -277,7 +280,7 @@ func Test_Client(t *testing.T) { // _outside of_ a transaction. The exact same test logic applies to each case, // the only difference is a different cancelFunc provided by the specific // subtest. - cancelRunningJobTestHelper := func(t *testing.T, cancelFunc func(ctx context.Context, client *Client[pgx.Tx], jobID int64) (*rivertype.JobRow, error)) { //nolint:thelper + cancelRunningJobTestHelper := func(t *testing.T, cancelFunc func(ctx context.Context, dbPool *pgxpool.Pool, client *Client[pgx.Tx], jobID int64) (*rivertype.JobRow, error)) { //nolint:thelper client, bundle := setup(t) jobStartedChan := make(chan int64) @@ -303,7 +306,7 @@ func Test_Client(t *testing.T) { require.Equal(t, insertedJob.ID, startedJobID) // Cancel the job: - updatedJob, err := cancelFunc(ctx, client, insertedJob.ID) + updatedJob, err := cancelFunc(ctx, bundle.dbPool, client, insertedJob.ID) require.NoError(t, err) require.NotNil(t, updatedJob) // Job is still actively running at this point because the query wouldn't @@ -324,7 +327,7 @@ func Test_Client(t *testing.T) { t.Run("CancelRunningJob", func(t *testing.T) { t.Parallel() - cancelRunningJobTestHelper(t, func(ctx context.Context, client *Client[pgx.Tx], jobID int64) (*rivertype.JobRow, error) { + cancelRunningJobTestHelper(t, func(ctx context.Context, dbPool *pgxpool.Pool, client *Client[pgx.Tx], jobID int64) (*rivertype.JobRow, error) { return client.JobCancel(ctx, jobID) }) }) @@ -332,12 +335,12 @@ func Test_Client(t *testing.T) { t.Run("CancelRunningJobInTx", func(t *testing.T) { t.Parallel() - cancelRunningJobTestHelper(t, func(ctx context.Context, client *Client[pgx.Tx], jobID int64) (*rivertype.JobRow, error) { + cancelRunningJobTestHelper(t, func(ctx context.Context, dbPool *pgxpool.Pool, client *Client[pgx.Tx], jobID int64) (*rivertype.JobRow, error) { var ( job *rivertype.JobRow err error ) - txErr := pgx.BeginFunc(ctx, client.driver.GetDBPool(), func(tx pgx.Tx) error { + txErr := pgx.BeginFunc(ctx, dbPool, func(tx pgx.Tx) error { job, err = client.JobCancelTx(ctx, tx, jobID) return err }) @@ -388,8 +391,8 @@ func Test_Client(t *testing.T) { require.Nil(t, jobAfter) // Cancel an unknown job ID, within a transaction: - err = pgx.BeginFunc(ctx, client.driver.GetDBPool(), func(tx pgx.Tx) error { - jobAfter, err := client.JobCancelTx(ctx, tx, 0) + err = dbutil.WithTx(ctx, client.driver.GetExecutor(), func(ctx context.Context, exec riverdriver.ExecutorTx) error { + jobAfter, err := exec.JobCancel(ctx, &riverdriver.JobCancelParams{ID: 0}) require.ErrorIs(t, err, ErrNotFound) require.Nil(t, jobAfter) return nil @@ -400,17 +403,17 @@ func Test_Client(t *testing.T) { t.Run("AlternateSchema", func(t *testing.T) { t.Parallel() - client, _ := setup(t) + _, bundle := setup(t) // Reconfigure the pool with an alternate schema, initialize a new pool - dbPoolConfig := client.driver.GetDBPool().Config() // a copy of the original config + dbPoolConfig := bundle.dbPool.Config() // a copy of the original config dbPoolConfig.ConnConfig.RuntimeParams["search_path"] = "alternate_schema" dbPool, err := pgxpool.NewWithConfig(ctx, dbPoolConfig) require.NoError(t, err) t.Cleanup(dbPool.Close) - client, err = NewClient(riverpgxv5.New(dbPool), newTestConfig(t, nil)) + client, err := NewClient(riverpgxv5.New(dbPool), newTestConfig(t, nil)) require.NoError(t, err) // We don't actually verify that River's functional on another schema so @@ -510,7 +513,9 @@ func Test_Client_Stop(t *testing.T) { t.Run("not started", func(t *testing.T) { t.Parallel() - client := newTestClient(ctx, t, newTestConfig(t, nil)) + + dbPool := riverinternaltest.TestDB(ctx, t) + client := newTestClient(t, dbPool, newTestConfig(t, nil)) err := client.Stop(ctx) require.Error(t, err) @@ -635,9 +640,9 @@ func Test_Client_Stop(t *testing.T) { require.NoError(t, client.Stop(ctx)) - count, err := (&dbsqlc.Queries{}).JobCountRunning(ctx, client.driver.GetDBPool()) + runningJobs, err := client.JobList(ctx, NewJobListParams().State(rivertype.JobStateRunning)) require.NoError(t, err) - require.Equal(t, int64(0), count, "expected no jobs to be left running") + require.Empty(t, runningJobs, "expected no jobs to be left running") }) t.Run("WithSubscriber", func(t *testing.T) { @@ -833,8 +838,9 @@ func Test_Client_Insert(t *testing.T) { setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { t.Helper() + dbPool := riverinternaltest.TestDB(ctx, t) config := newTestConfig(t, nil) - client := newTestClient(ctx, t, config) + client := newTestClient(t, dbPool, config) return client, &testBundle{} } @@ -948,10 +954,11 @@ func Test_Client_InsertTx(t *testing.T) { setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { t.Helper() + dbPool := riverinternaltest.TestDB(ctx, t) config := newTestConfig(t, nil) - client := newTestClient(ctx, t, config) + client := newTestClient(t, dbPool, config) - tx, err := client.driver.GetDBPool().Begin(ctx) + tx, err := dbPool.Begin(ctx) require.NoError(t, err) t.Cleanup(func() { tx.Rollback(ctx) }) @@ -1043,25 +1050,22 @@ func Test_Client_InsertMany(t *testing.T) { ctx := context.Background() - type testBundle struct { - queries *dbsqlc.Queries - } + type testBundle struct{} setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { t.Helper() + dbPool := riverinternaltest.TestDB(ctx, t) config := newTestConfig(t, nil) - client := newTestClient(ctx, t, config) + client := newTestClient(t, dbPool, config) - return client, &testBundle{ - queries: dbsqlc.New(), - } + return client, &testBundle{} } t.Run("SucceedsWithMultipleJobs", func(t *testing.T) { t.Parallel() - client, bundle := setup(t) + client, _ := setup(t) count, err := client.InsertMany(ctx, []InsertManyParams{ {Args: noOpArgs{}, InsertOpts: &InsertOpts{Queue: "foo", Priority: 2}}, @@ -1070,7 +1074,7 @@ func Test_Client_InsertMany(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(2), count) - jobs, err := bundle.queries.JobGetByKind(ctx, client.driver.GetDBPool(), (noOpArgs{}).Kind()) + jobs, err := client.driver.GetExecutor().JobGetByKindMany(ctx, []string{(noOpArgs{}).Kind()}) require.NoError(t, err) require.Len(t, jobs, 2, "Expected to find exactly two jobs of kind: "+(noOpArgs{}).Kind()) //nolint:goconst }) @@ -1078,7 +1082,7 @@ func Test_Client_InsertMany(t *testing.T) { t.Run("WithInsertOptsScheduledAtZeroTime", func(t *testing.T) { t.Parallel() - client, bundle := setup(t) + client, _ := setup(t) count, err := client.InsertMany(ctx, []InsertManyParams{ {Args: &noOpArgs{}, InsertOpts: &InsertOpts{ScheduledAt: time.Time{}}}, @@ -1086,7 +1090,7 @@ func Test_Client_InsertMany(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(1), count) - jobs, err := bundle.queries.JobGetByKind(ctx, client.driver.GetDBPool(), (noOpArgs{}).Kind()) + jobs, err := client.driver.GetExecutor().JobGetByKindMany(ctx, []string{(noOpArgs{}).Kind()}) require.NoError(t, err) require.Len(t, jobs, 1, "Expected to find exactly one job of kind: "+(noOpArgs{}).Kind()) jobRow := jobs[0] @@ -1178,17 +1182,17 @@ func Test_Client_InsertManyTx(t *testing.T) { ctx := context.Background() type testBundle struct { - queries *dbsqlc.Queries - tx pgx.Tx + tx pgx.Tx } setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { t.Helper() + dbPool := riverinternaltest.TestDB(ctx, t) config := newTestConfig(t, nil) - client := newTestClient(ctx, t, config) + client := newTestClient(t, dbPool, config) - tx, err := client.driver.GetDBPool().Begin(ctx) + tx, err := dbPool.Begin(ctx) require.NoError(t, err) t.Cleanup(func() { tx.Rollback(ctx) }) @@ -1209,14 +1213,14 @@ func Test_Client_InsertManyTx(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(2), count) - jobs, err := bundle.queries.JobGetByKind(ctx, bundle.tx, (noOpArgs{}).Kind()) + jobs, err := client.driver.UnwrapExecutor(bundle.tx).JobGetByKindMany(ctx, []string{(noOpArgs{}).Kind()}) require.NoError(t, err) require.Len(t, jobs, 2, "Expected to find exactly two jobs of kind: "+(noOpArgs{}).Kind()) require.NoError(t, bundle.tx.Commit(ctx)) // Ensure the jobs are visible outside the transaction: - jobs, err = bundle.queries.JobGetByKind(ctx, client.driver.GetDBPool(), (noOpArgs{}).Kind()) + jobs, err = client.driver.GetExecutor().JobGetByKindMany(ctx, []string{(noOpArgs{}).Kind()}) require.NoError(t, err) require.Len(t, jobs, 2, "Expected to find exactly two jobs of kind: "+(noOpArgs{}).Kind()) }) @@ -1232,10 +1236,10 @@ func Test_Client_InsertManyTx(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(1), count) - insertedJobs, err := bundle.queries.JobGetByKind(ctx, bundle.tx, noOpArgs{}.Kind()) + insertedJobs, err := client.driver.UnwrapExecutor(bundle.tx).JobGetByKindMany(ctx, []string{(noOpArgs{}).Kind()}) require.NoError(t, err) require.Len(t, insertedJobs, 1) - require.Equal(t, dbsqlc.JobStateScheduled, insertedJobs[0].State) + require.Equal(t, rivertype.JobStateScheduled, insertedJobs[0].State) require.WithinDuration(t, time.Now().Add(time.Minute), insertedJobs[0].ScheduledAt, 2*time.Second) }) @@ -1318,8 +1322,9 @@ func Test_Client_JobGet(t *testing.T) { setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { t.Helper() + dbPool := riverinternaltest.TestDB(ctx, t) config := newTestConfig(t, nil) - client := newTestClient(ctx, t, config) + client := newTestClient(t, dbPool, config) return client, &testBundle{} } @@ -1355,10 +1360,7 @@ func Test_Client_JobGet(t *testing.T) { func Test_Client_JobList(t *testing.T) { t.Parallel() - var ( - ctx = context.Background() - queries = dbsqlc.New() - ) + ctx := context.Background() type insertJobParams struct { AttemptedAt *time.Time @@ -1367,11 +1369,11 @@ func Test_Client_JobList(t *testing.T) { Metadata []byte Queue string ScheduledAt *time.Time - State dbsqlc.JobState + State rivertype.JobState } - insertJob := func(ctx context.Context, dbtx dbsqlc.DBTX, params insertJobParams) *dbsqlc.RiverJob { - job, err := queries.JobInsert(ctx, dbtx, dbsqlc.JobInsertParams{ + insertJob := func(ctx context.Context, exec riverdriver.Executor, params insertJobParams) *rivertype.JobRow { + jobRow, err := exec.JobInsert(ctx, &riverdriver.JobInsertParams{ Attempt: 1, AttemptedAt: params.AttemptedAt, FinalizedAt: params.FinalizedAt, @@ -1381,10 +1383,10 @@ func Test_Client_JobList(t *testing.T) { Priority: rivercommon.PriorityDefault, Queue: QueueDefault, ScheduledAt: params.ScheduledAt, - State: valutil.FirstNonZero(params.State, dbsqlc.JobStateAvailable), + State: valutil.FirstNonZero(params.State, rivertype.JobStateAvailable), }) require.NoError(t, err) - return job + return jobRow } type testBundle struct{} @@ -1392,8 +1394,9 @@ func Test_Client_JobList(t *testing.T) { setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { t.Helper() + dbPool := riverinternaltest.TestDB(ctx, t) config := newTestConfig(t, nil) - client := newTestClient(ctx, t, config) + client := newTestClient(t, dbPool, config) return client, &testBundle{} } @@ -1403,9 +1406,9 @@ func Test_Client_JobList(t *testing.T) { client, _ := setup(t) - job1 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateAvailable}) - job2 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateAvailable}) - job3 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateRunning}) + job1 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateAvailable}) + job2 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateAvailable}) + job3 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateRunning}) jobs, err := client.JobList(ctx, NewJobListParams().State(JobStateAvailable)) require.NoError(t, err) @@ -1424,14 +1427,14 @@ func Test_Client_JobList(t *testing.T) { now := time.Now().UTC() - states := map[rivertype.JobState]dbsqlc.JobState{ - JobStateAvailable: dbsqlc.JobStateAvailable, - JobStateRetryable: dbsqlc.JobStateRetryable, - JobStateScheduled: dbsqlc.JobStateScheduled, + states := map[rivertype.JobState]rivertype.JobState{ + JobStateAvailable: rivertype.JobStateAvailable, + JobStateRetryable: rivertype.JobStateRetryable, + JobStateScheduled: rivertype.JobStateScheduled, } for state, dbState := range states { - job1 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbState, ScheduledAt: ptrutil.Ptr(now)}) - job2 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbState, ScheduledAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) + job1 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: dbState, ScheduledAt: ptrutil.Ptr(now)}) + job2 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: dbState, ScheduledAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) jobs, err := client.JobList(ctx, NewJobListParams().State(state)) require.NoError(t, err) @@ -1450,14 +1453,14 @@ func Test_Client_JobList(t *testing.T) { now := time.Now().UTC() - states := map[rivertype.JobState]dbsqlc.JobState{ - JobStateCancelled: dbsqlc.JobStateCancelled, - JobStateCompleted: dbsqlc.JobStateCompleted, - JobStateDiscarded: dbsqlc.JobStateDiscarded, + states := map[rivertype.JobState]rivertype.JobState{ + JobStateCancelled: rivertype.JobStateCancelled, + JobStateCompleted: rivertype.JobStateCompleted, + JobStateDiscarded: rivertype.JobStateDiscarded, } for state, dbState := range states { - job1 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbState, FinalizedAt: ptrutil.Ptr(now.Add(-10 * time.Second))}) - job2 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbState, FinalizedAt: ptrutil.Ptr(now.Add(-15 * time.Second))}) + job1 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: dbState, FinalizedAt: ptrutil.Ptr(now.Add(-10 * time.Second))}) + job2 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: dbState, FinalizedAt: ptrutil.Ptr(now.Add(-15 * time.Second))}) jobs, err := client.JobList(ctx, NewJobListParams().State(state)) require.NoError(t, err) @@ -1475,8 +1478,8 @@ func Test_Client_JobList(t *testing.T) { client, _ := setup(t) now := time.Now().UTC() - job1 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateRunning, AttemptedAt: ptrutil.Ptr(now)}) - job2 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateRunning, AttemptedAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) + job1 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateRunning, AttemptedAt: ptrutil.Ptr(now)}) + job2 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateRunning, AttemptedAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) jobs, err := client.JobList(ctx, NewJobListParams().State(JobStateRunning)) require.NoError(t, err) @@ -1494,9 +1497,9 @@ func Test_Client_JobList(t *testing.T) { client, _ := setup(t) now := time.Now().UTC() - job1 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateAvailable, ScheduledAt: ptrutil.Ptr(now)}) - job2 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateAvailable, ScheduledAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) - _ = insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateRunning}) + job1 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateAvailable, ScheduledAt: ptrutil.Ptr(now)}) + job2 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateAvailable, ScheduledAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) + _ = insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateRunning}) jobs, err := client.JobList(ctx, nil) require.NoError(t, err) @@ -1510,25 +1513,22 @@ func Test_Client_JobList(t *testing.T) { client, _ := setup(t) now := time.Now().UTC() - job1 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateAvailable, ScheduledAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) - job2 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateAvailable, ScheduledAt: ptrutil.Ptr(now)}) - job3 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateRunning, AttemptedAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) - job4 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateRunning, AttemptedAt: ptrutil.Ptr(now)}) - job5 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateCompleted, FinalizedAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) - job6 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateCompleted, FinalizedAt: ptrutil.Ptr(now)}) - jobRow1 := dbsqlc.JobRowFromInternal(job1) - jobRow3 := dbsqlc.JobRowFromInternal(job3) - jobRow5 := dbsqlc.JobRowFromInternal(job5) - - jobs, err := client.JobList(ctx, NewJobListParams().After(JobListCursorFromJob(jobRow1))) + job1 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateAvailable, ScheduledAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) + job2 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateAvailable, ScheduledAt: ptrutil.Ptr(now)}) + job3 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateRunning, AttemptedAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) + job4 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateRunning, AttemptedAt: ptrutil.Ptr(now)}) + job5 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateCompleted, FinalizedAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) + job6 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateCompleted, FinalizedAt: ptrutil.Ptr(now)}) + + jobs, err := client.JobList(ctx, NewJobListParams().After(JobListCursorFromJob(job1))) require.NoError(t, err) require.Equal(t, []int64{job2.ID}, sliceutil.Map(jobs, func(job *rivertype.JobRow) int64 { return job.ID })) - jobs, err = client.JobList(ctx, NewJobListParams().State(rivertype.JobStateRunning).After(JobListCursorFromJob(jobRow3))) + jobs, err = client.JobList(ctx, NewJobListParams().State(rivertype.JobStateRunning).After(JobListCursorFromJob(job3))) require.NoError(t, err) require.Equal(t, []int64{job4.ID}, sliceutil.Map(jobs, func(job *rivertype.JobRow) int64 { return job.ID })) - jobs, err = client.JobList(ctx, NewJobListParams().State(rivertype.JobStateCompleted).After(JobListCursorFromJob(jobRow5))) + jobs, err = client.JobList(ctx, NewJobListParams().State(rivertype.JobStateCompleted).After(JobListCursorFromJob(job5))) require.NoError(t, err) require.Equal(t, []int64{job6.ID}, sliceutil.Map(jobs, func(job *rivertype.JobRow) int64 { return job.ID })) }) @@ -1538,9 +1538,9 @@ func Test_Client_JobList(t *testing.T) { client, _ := setup(t) - job1 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{Metadata: []byte(`{"foo": "bar"}`)}) - job2 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{Metadata: []byte(`{"baz": "value"}`)}) - job3 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{Metadata: []byte(`{"baz": "value"}`)}) + job1 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{Metadata: []byte(`{"foo": "bar"}`)}) + job2 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{Metadata: []byte(`{"baz": "value"}`)}) + job3 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{Metadata: []byte(`{"baz": "value"}`)}) jobs, err := client.JobList(ctx, NewJobListParams().State("").Metadata(`{"foo": "bar"}`)) require.NoError(t, err) @@ -1571,15 +1571,18 @@ func Test_Client_JobRetry(t *testing.T) { ctx := context.Background() - type testBundle struct{} + type testBundle struct { + dbPool *pgxpool.Pool + } setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { t.Helper() + dbPool := riverinternaltest.TestDB(ctx, t) config := newTestConfig(t, nil) - client := newTestClient(ctx, t, config) + client := newTestClient(t, dbPool, config) - return client, &testBundle{} + return client, &testBundle{dbPool: dbPool} } t.Run("UpdatesAJobScheduledInTheFutureToBeImmediatelyAvailable", func(t *testing.T) { @@ -1602,7 +1605,7 @@ func Test_Client_JobRetry(t *testing.T) { t.Run("TxVariantAlsoUpdatesJobToAvailable", func(t *testing.T) { t.Parallel() - client, _ := setup(t) + client, bundle := setup(t) newJob, err := client.Insert(ctx, noOpArgs{}, &InsertOpts{ScheduledAt: time.Now().Add(time.Hour)}) require.NoError(t, err) @@ -1610,7 +1613,7 @@ func Test_Client_JobRetry(t *testing.T) { var jobAfter *rivertype.JobRow - err = pgx.BeginFunc(ctx, client.driver.GetDBPool(), func(tx pgx.Tx) error { + err = pgx.BeginFunc(ctx, bundle.dbPool, func(tx pgx.Tx) error { var err error jobAfter, err = client.JobRetryTx(ctx, tx, newJob.ID) return err @@ -1705,9 +1708,9 @@ func Test_Client_ErrorHandler(t *testing.T) { // Bypass the normal Insert function because that will error on an // unknown job. - insertParams, err := insertParamsFromArgsAndOptions(unregisteredJobArgs{}, nil) + insertParams, _, err := insertParamsFromArgsAndOptions(unregisteredJobArgs{}, nil) require.NoError(t, err) - _, err = client.adapter.JobInsert(ctx, insertParams) + _, err = client.driver.GetExecutor().JobInsert(ctx, insertParams) require.NoError(t, err) riverinternaltest.WaitOrTimeout(t, bundle.SubscribeChan) @@ -1743,29 +1746,26 @@ func Test_Client_ErrorHandler(t *testing.T) { func Test_Client_Maintenance(t *testing.T) { t.Parallel() - var ( - ctx = context.Background() - queries = dbsqlc.New() - ) + ctx := context.Background() type insertJobParams struct { - Attempt int16 + Attempt int AttemptedAt *time.Time FinalizedAt *time.Time Kind string - MaxAttempts int16 + MaxAttempts int ScheduledAt *time.Time - State dbsqlc.JobState + State rivertype.JobState } - insertJob := func(ctx context.Context, dbtx dbsqlc.DBTX, params insertJobParams) *dbsqlc.RiverJob { + insertJob := func(ctx context.Context, exec riverdriver.Executor, params insertJobParams) *rivertype.JobRow { // This is a lot of boilerplate to get a realistic job into the database // with the number of errors that corresponds to its attempt count. Without // that, the rescued/errored jobs can retry immediately with no backoff and // cause flakiness as they quickly get picked back up again. - errorCount := int(params.Attempt - 1) + errorCount := params.Attempt - 1 if params.Attempt == 0 { - errorCount = int(params.Attempt) + errorCount = params.Attempt } errorsBytes := make([][]byte, errorCount) @@ -1780,14 +1780,14 @@ func Test_Client_Maintenance(t *testing.T) { require.NoError(t, err) } - job, err := queries.JobInsert(ctx, dbtx, dbsqlc.JobInsertParams{ - Attempt: valutil.FirstNonZero(params.Attempt, int16(1)), + job, err := exec.JobInsert(ctx, &riverdriver.JobInsertParams{ + Attempt: valutil.FirstNonZero(params.Attempt, 1), AttemptedAt: params.AttemptedAt, Errors: errorsBytes, FinalizedAt: params.FinalizedAt, Kind: valutil.FirstNonZero(params.Kind, "test_kind"), - MaxAttempts: valutil.FirstNonZero(params.MaxAttempts, int16(rivercommon.MaxAttemptsDefault)), - Priority: int16(rivercommon.PriorityDefault), + MaxAttempts: valutil.FirstNonZero(params.MaxAttempts, rivercommon.MaxAttemptsDefault), + Priority: rivercommon.PriorityDefault, Queue: QueueDefault, ScheduledAt: params.ScheduledAt, State: params.State, @@ -1799,30 +1799,32 @@ func Test_Client_Maintenance(t *testing.T) { t.Run("JobCleaner", func(t *testing.T) { t.Parallel() + dbPool := riverinternaltest.TestDB(ctx, t) + config := newTestConfig(t, nil) config.CancelledJobRetentionPeriod = 1 * time.Hour config.CompletedJobRetentionPeriod = 1 * time.Hour config.DiscardedJobRetentionPeriod = 1 * time.Hour - client := newTestClient(ctx, t, config) + client := newTestClient(t, dbPool, config) deleteHorizon := time.Now().Add(-config.CompletedJobRetentionPeriod) // Take care to insert jobs before starting the client because otherwise // there's a race condition where the cleaner could run its initial // pass before our insertion is complete. - ineligibleJob1 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateAvailable}) - ineligibleJob2 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateRunning}) - ineligibleJob3 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateScheduled}) + ineligibleJob1 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateAvailable}) + ineligibleJob2 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateRunning}) + ineligibleJob3 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateScheduled}) - jobBeyondHorizon1 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateCancelled, FinalizedAt: ptrutil.Ptr(deleteHorizon.Add(-1 * time.Hour))}) - jobBeyondHorizon2 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateCompleted, FinalizedAt: ptrutil.Ptr(deleteHorizon.Add(-1 * time.Hour))}) - jobBeyondHorizon3 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateDiscarded, FinalizedAt: ptrutil.Ptr(deleteHorizon.Add(-1 * time.Hour))}) + jobBeyondHorizon1 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateCancelled, FinalizedAt: ptrutil.Ptr(deleteHorizon.Add(-1 * time.Hour))}) + jobBeyondHorizon2 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateCompleted, FinalizedAt: ptrutil.Ptr(deleteHorizon.Add(-1 * time.Hour))}) + jobBeyondHorizon3 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateDiscarded, FinalizedAt: ptrutil.Ptr(deleteHorizon.Add(-1 * time.Hour))}) // Will not be deleted. - jobWithinHorizon1 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateCancelled, FinalizedAt: ptrutil.Ptr(deleteHorizon.Add(1 * time.Hour))}) - jobWithinHorizon2 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateCompleted, FinalizedAt: ptrutil.Ptr(deleteHorizon.Add(1 * time.Hour))}) - jobWithinHorizon3 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateDiscarded, FinalizedAt: ptrutil.Ptr(deleteHorizon.Add(1 * time.Hour))}) + jobWithinHorizon1 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateCancelled, FinalizedAt: ptrutil.Ptr(deleteHorizon.Add(1 * time.Hour))}) + jobWithinHorizon2 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateCompleted, FinalizedAt: ptrutil.Ptr(deleteHorizon.Add(1 * time.Hour))}) + jobWithinHorizon3 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateDiscarded, FinalizedAt: ptrutil.Ptr(deleteHorizon.Add(1 * time.Hour))}) startClient(ctx, t, client) @@ -1871,7 +1873,7 @@ func Test_Client_Maintenance(t *testing.T) { svc := maintenance.GetService[*maintenance.PeriodicJobEnqueuer](client.queueMaintainer) svc.TestSignals.InsertedJobs.WaitOrTimeout() - jobs, err := queries.JobGetByKind(ctx, client.driver.GetDBPool(), (periodicJobArgs{}).Kind()) + jobs, err := client.driver.GetExecutor().JobGetByKindMany(ctx, []string{(periodicJobArgs{}).Kind()}) require.NoError(t, err) require.Len(t, jobs, 1, "Expected to find exactly one job of kind: "+(periodicJobArgs{}).Kind()) }) @@ -1895,7 +1897,7 @@ func Test_Client_Maintenance(t *testing.T) { svc.TestSignals.EnteredLoop.WaitOrTimeout() // No jobs yet because the RunOnStart option was not specified. - jobs, err := queries.JobGetByKind(ctx, client.driver.GetDBPool(), (periodicJobArgs{}).Kind()) + jobs, err := client.driver.GetExecutor().JobGetByKindMany(ctx, []string{(periodicJobArgs{}).Kind()}) require.NoError(t, err) require.Empty(t, jobs) }) @@ -1919,34 +1921,36 @@ func Test_Client_Maintenance(t *testing.T) { t.Run("Rescuer", func(t *testing.T) { t.Parallel() + dbPool := riverinternaltest.TestDB(ctx, t) + config := newTestConfig(t, nil) config.RescueStuckJobsAfter = 5 * time.Minute - client := newTestClient(ctx, t, config) + client := newTestClient(t, dbPool, config) now := time.Now() // Take care to insert jobs before starting the client because otherwise // there's a race condition where the rescuer could run its initial // pass before our insertion is complete. - ineligibleJob1 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{Kind: "noOp", State: dbsqlc.JobStateScheduled, ScheduledAt: ptrutil.Ptr(now.Add(time.Minute))}) - ineligibleJob2 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{Kind: "noOp", State: dbsqlc.JobStateRetryable, ScheduledAt: ptrutil.Ptr(now.Add(time.Minute))}) - ineligibleJob3 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{Kind: "noOp", State: dbsqlc.JobStateCompleted, FinalizedAt: ptrutil.Ptr(now.Add(-time.Minute))}) + ineligibleJob1 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{Kind: "noOp", State: rivertype.JobStateScheduled, ScheduledAt: ptrutil.Ptr(now.Add(time.Minute))}) + ineligibleJob2 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{Kind: "noOp", State: rivertype.JobStateRetryable, ScheduledAt: ptrutil.Ptr(now.Add(time.Minute))}) + ineligibleJob3 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{Kind: "noOp", State: rivertype.JobStateCompleted, FinalizedAt: ptrutil.Ptr(now.Add(-time.Minute))}) // large attempt number ensures these don't immediately start executing again: - jobStuckToRetry1 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{Kind: "noOp", State: dbsqlc.JobStateRunning, Attempt: 20, AttemptedAt: ptrutil.Ptr(now.Add(-1 * time.Hour))}) - jobStuckToRetry2 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{Kind: "noOp", State: dbsqlc.JobStateRunning, Attempt: 20, AttemptedAt: ptrutil.Ptr(now.Add(-30 * time.Minute))}) - jobStuckToDiscard := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{ - State: dbsqlc.JobStateRunning, + jobStuckToRetry1 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{Kind: "noOp", State: rivertype.JobStateRunning, Attempt: 20, AttemptedAt: ptrutil.Ptr(now.Add(-1 * time.Hour))}) + jobStuckToRetry2 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{Kind: "noOp", State: rivertype.JobStateRunning, Attempt: 20, AttemptedAt: ptrutil.Ptr(now.Add(-30 * time.Minute))}) + jobStuckToDiscard := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{ + State: rivertype.JobStateRunning, Attempt: 20, AttemptedAt: ptrutil.Ptr(now.Add(-5*time.Minute - time.Second)), MaxAttempts: 1, }) // Will not be rescued. - jobNotYetStuck1 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{Kind: "noOp", State: dbsqlc.JobStateRunning, AttemptedAt: ptrutil.Ptr(now.Add(-4 * time.Minute))}) - jobNotYetStuck2 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{Kind: "noOp", State: dbsqlc.JobStateRunning, AttemptedAt: ptrutil.Ptr(now.Add(-1 * time.Minute))}) - jobNotYetStuck3 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{Kind: "noOp", State: dbsqlc.JobStateRunning, AttemptedAt: ptrutil.Ptr(now.Add(-10 * time.Second))}) + jobNotYetStuck1 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{Kind: "noOp", State: rivertype.JobStateRunning, AttemptedAt: ptrutil.Ptr(now.Add(-4 * time.Minute))}) + jobNotYetStuck2 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{Kind: "noOp", State: rivertype.JobStateRunning, AttemptedAt: ptrutil.Ptr(now.Add(-1 * time.Minute))}) + jobNotYetStuck3 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{Kind: "noOp", State: rivertype.JobStateRunning, AttemptedAt: ptrutil.Ptr(now.Add(-10 * time.Second))}) startClient(ctx, t, client) @@ -1955,9 +1959,9 @@ func Test_Client_Maintenance(t *testing.T) { svc.TestSignals.FetchedBatch.WaitOrTimeout() svc.TestSignals.UpdatedBatch.WaitOrTimeout() - requireJobHasState := func(jobID int64, state dbsqlc.JobState) { + requireJobHasState := func(jobID int64, state rivertype.JobState) { t.Helper() - job, err := queries.JobGetByID(ctx, client.driver.GetDBPool(), jobID) + job, err := client.driver.GetExecutor().JobGetByID(ctx, jobID) require.NoError(t, err) require.Equal(t, state, job.State) } @@ -1968,11 +1972,11 @@ func Test_Client_Maintenance(t *testing.T) { requireJobHasState(ineligibleJob3.ID, ineligibleJob3.State) // Jobs to retry should be retryable: - requireJobHasState(jobStuckToRetry1.ID, dbsqlc.JobStateRetryable) - requireJobHasState(jobStuckToRetry2.ID, dbsqlc.JobStateRetryable) + requireJobHasState(jobStuckToRetry1.ID, rivertype.JobStateRetryable) + requireJobHasState(jobStuckToRetry2.ID, rivertype.JobStateRetryable) // This one should be discarded because it's already at MaxAttempts: - requireJobHasState(jobStuckToDiscard.ID, dbsqlc.JobStateDiscarded) + requireJobHasState(jobStuckToDiscard.ID, rivertype.JobStateDiscarded) // not eligible for rescue, not stuck long enough yet: requireJobHasState(jobNotYetStuck1.ID, jobNotYetStuck1.State) @@ -1983,28 +1987,30 @@ func Test_Client_Maintenance(t *testing.T) { t.Run("Scheduler", func(t *testing.T) { t.Parallel() + dbPool := riverinternaltest.TestDB(ctx, t) + config := newTestConfig(t, nil) config.Queues = map[string]QueueConfig{"another_queue": {MaxWorkers: 1}} // don't work jobs on the default queue we're using in this test - client := newTestClient(ctx, t, config) + client := newTestClient(t, dbPool, config) now := time.Now() // Take care to insert jobs before starting the client because otherwise // there's a race condition where the scheduler could run its initial // pass before our insertion is complete. - ineligibleJob1 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateAvailable}) - ineligibleJob2 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateRunning}) - ineligibleJob3 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateCompleted, FinalizedAt: ptrutil.Ptr(now.Add(-1 * time.Hour))}) + ineligibleJob1 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateAvailable}) + ineligibleJob2 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateRunning}) + ineligibleJob3 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateCompleted, FinalizedAt: ptrutil.Ptr(now.Add(-1 * time.Hour))}) - jobInPast1 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateScheduled, ScheduledAt: ptrutil.Ptr(now.Add(-1 * time.Hour))}) - jobInPast2 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateScheduled, ScheduledAt: ptrutil.Ptr(now.Add(-1 * time.Minute))}) - jobInPast3 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateScheduled, ScheduledAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) + jobInPast1 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateScheduled, ScheduledAt: ptrutil.Ptr(now.Add(-1 * time.Hour))}) + jobInPast2 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateScheduled, ScheduledAt: ptrutil.Ptr(now.Add(-1 * time.Minute))}) + jobInPast3 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateScheduled, ScheduledAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) // Will not be scheduled. - jobInFuture1 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateCancelled, FinalizedAt: ptrutil.Ptr(now.Add(1 * time.Hour))}) - jobInFuture2 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateCompleted, FinalizedAt: ptrutil.Ptr(now.Add(1 * time.Minute))}) - jobInFuture3 := insertJob(ctx, client.driver.GetDBPool(), insertJobParams{State: dbsqlc.JobStateDiscarded, FinalizedAt: ptrutil.Ptr(now.Add(10 * time.Second))}) + jobInFuture1 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateCancelled, FinalizedAt: ptrutil.Ptr(now.Add(1 * time.Hour))}) + jobInFuture2 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateCompleted, FinalizedAt: ptrutil.Ptr(now.Add(1 * time.Minute))}) + jobInFuture3 := insertJob(ctx, client.driver.GetExecutor(), insertJobParams{State: rivertype.JobStateDiscarded, FinalizedAt: ptrutil.Ptr(now.Add(10 * time.Second))}) startClient(ctx, t, client) @@ -2012,9 +2018,9 @@ func Test_Client_Maintenance(t *testing.T) { scheduler := maintenance.GetService[*maintenance.Scheduler](client.queueMaintainer) scheduler.TestSignals.ScheduledBatch.WaitOrTimeout() - requireJobHasState := func(jobID int64, state dbsqlc.JobState) { + requireJobHasState := func(jobID int64, state rivertype.JobState) { t.Helper() - job, err := queries.JobGetByID(ctx, client.driver.GetDBPool(), jobID) + job, err := client.JobGet(ctx, jobID) require.NoError(t, err) require.Equal(t, state, job.State) } @@ -2025,9 +2031,9 @@ func Test_Client_Maintenance(t *testing.T) { requireJobHasState(ineligibleJob3.ID, ineligibleJob3.State) // Jobs with past timestamps should be now be made available: - requireJobHasState(jobInPast1.ID, dbsqlc.JobStateAvailable) - requireJobHasState(jobInPast2.ID, dbsqlc.JobStateAvailable) - requireJobHasState(jobInPast3.ID, dbsqlc.JobStateAvailable) + requireJobHasState(jobInPast1.ID, rivertype.JobStateAvailable) + requireJobHasState(jobInPast2.ID, rivertype.JobStateAvailable) + requireJobHasState(jobInPast3.ID, rivertype.JobStateAvailable) // not scheduled, still in future requireJobHasState(jobInFuture1.ID, jobInFuture1.State) @@ -2050,6 +2056,8 @@ func Test_Client_RetryPolicy(t *testing.T) { t.Run("RetryUntilDiscarded", func(t *testing.T) { t.Parallel() + dbPool := riverinternaltest.TestDB(ctx, t) + config := newTestConfig(t, func(ctx context.Context, job *Job[callbackArgs]) error { return errors.New("job error") }) @@ -2058,30 +2066,29 @@ func Test_Client_RetryPolicy(t *testing.T) { // out of it to make comparisons easier. config.RetryPolicy = &retryPolicyNoJitter{} - client := newTestClient(ctx, t, config) - queries := dbsqlc.New() + client := newTestClient(t, dbPool, config) subscribeChan, cancel := client.Subscribe(EventKindJobCompleted, EventKindJobFailed) t.Cleanup(cancel) - originalJobs := make([]*dbsqlc.RiverJob, rivercommon.MaxAttemptsDefault) + originalJobs := make([]*rivertype.JobRow, rivercommon.MaxAttemptsDefault) for i := 0; i < len(originalJobs); i++ { job := requireInsert(ctx, client) // regression protection to ensure we're testing the right number of jobs: require.Equal(t, rivercommon.MaxAttemptsDefault, job.MaxAttempts) - updatedJob, err := queries.JobUpdate(ctx, client.driver.GetDBPool(), dbsqlc.JobUpdateParams{ + updatedJob, err := client.driver.GetExecutor().JobUpdate(ctx, &riverdriver.JobUpdateParams{ ID: job.ID, AttemptedAtDoUpdate: true, AttemptedAt: ptrutil.Ptr(time.Now().UTC()), AttemptDoUpdate: true, - Attempt: int16(i), // starts at i, but will be i + 1 by the time it's being worked + Attempt: i, // starts at i, but will be i + 1 by the time it's being worked // Need to find a cleaner way around this, but state is required // because sqlc can't encode an empty string to the // corresponding enum. This value is not actually used because // StateDoUpdate was not supplied. - State: dbsqlc.JobStateAvailable, + State: rivertype.JobStateAvailable, }) require.NoError(t, err) @@ -2096,15 +2103,15 @@ func Test_Client_RetryPolicy(t *testing.T) { _ = riverinternaltest.WaitOrTimeout(t, subscribeChan) } - finishedJobs, err := queries.JobGetByIDMany(ctx, client.driver.GetDBPool(), - sliceutil.Map(originalJobs, func(m *dbsqlc.RiverJob) int64 { return m.ID })) + finishedJobs, err := client.driver.GetExecutor().JobGetByIDMany(ctx, + sliceutil.Map(originalJobs, func(m *rivertype.JobRow) int64 { return m.ID })) require.NoError(t, err) // Jobs aren't guaranteed to come back out of the queue in the same // order that we inserted them, so make sure to compare using a lookup // map. finishedJobsByID := sliceutil.KeyBy(finishedJobs, - func(m *dbsqlc.RiverJob) (int64, *dbsqlc.RiverJob) { return m.ID, m }) + func(m *rivertype.JobRow) (int64, *rivertype.JobRow) { return m.ID, m }) for i, originalJob := range originalJobs { // This loop will check all jobs that were to be rescheduled, but @@ -2119,7 +2126,7 @@ func Test_Client_RetryPolicy(t *testing.T) { // how it would've looked after being run through the queue. originalJob.Attempt += 1 - expectedNextScheduledAt := client.config.RetryPolicy.NextRetry(dbsqlc.JobRowFromInternal(originalJob)) + expectedNextScheduledAt := client.config.RetryPolicy.NextRetry(originalJob) t.Logf("Attempt number %d scheduled %v from original `attempted_at`", originalJob.Attempt, finishedJob.ScheduledAt.Sub(*originalJob.AttemptedAt)) @@ -2131,7 +2138,7 @@ func Test_Client_RetryPolicy(t *testing.T) { // time.Now into adapter which may happen with baseservice require.WithinDuration(t, expectedNextScheduledAt, finishedJob.ScheduledAt, 2*time.Second) - require.Equal(t, dbsqlc.JobStateRetryable, finishedJob.State) + require.Equal(t, rivertype.JobStateRetryable, finishedJob.State) } // One last discarded job. @@ -2145,7 +2152,7 @@ func Test_Client_RetryPolicy(t *testing.T) { // TODO(brandur): See note on tolerance above. require.WithinDuration(t, time.Now(), *finishedJob.FinalizedAt, 2*time.Second) - require.Equal(t, dbsqlc.JobStateDiscarded, finishedJob.State) + require.Equal(t, rivertype.JobStateDiscarded, finishedJob.State) } }) } @@ -2172,6 +2179,8 @@ func Test_Client_Subscribe(t *testing.T) { t.Run("Success", func(t *testing.T) { t.Parallel() + dbPool := riverinternaltest.TestDB(ctx, t) + // Fail/succeed jobs based on their name so we can get a mix of both to // verify. config := newTestConfig(t, func(ctx context.Context, job *Job[callbackArgs]) error { @@ -2181,7 +2190,7 @@ func Test_Client_Subscribe(t *testing.T) { return nil }) - client := newTestClient(ctx, t, config) + client := newTestClient(t, dbPool, config) subscribeChan, cancel := client.Subscribe(EventKindJobCompleted, EventKindJobFailed) t.Cleanup(cancel) @@ -2240,6 +2249,8 @@ func Test_Client_Subscribe(t *testing.T) { t.Run("CompletedOnly", func(t *testing.T) { t.Parallel() + dbPool := riverinternaltest.TestDB(ctx, t) + config := newTestConfig(t, func(ctx context.Context, job *Job[callbackArgs]) error { if strings.HasPrefix(job.Args.Name, "failed") { return errors.New("job error") @@ -2247,7 +2258,7 @@ func Test_Client_Subscribe(t *testing.T) { return nil }) - client := newTestClient(ctx, t, config) + client := newTestClient(t, dbPool, config) subscribeChan, cancel := client.Subscribe(EventKindJobCompleted) t.Cleanup(cancel) @@ -2281,6 +2292,8 @@ func Test_Client_Subscribe(t *testing.T) { t.Run("FailedOnly", func(t *testing.T) { t.Parallel() + dbPool := riverinternaltest.TestDB(ctx, t) + config := newTestConfig(t, func(ctx context.Context, job *Job[callbackArgs]) error { if strings.HasPrefix(job.Args.Name, "failed") { return errors.New("job error") @@ -2288,7 +2301,7 @@ func Test_Client_Subscribe(t *testing.T) { return nil }) - client := newTestClient(ctx, t, config) + client := newTestClient(t, dbPool, config) subscribeChan, cancel := client.Subscribe(EventKindJobFailed) t.Cleanup(cancel) @@ -2322,11 +2335,13 @@ func Test_Client_Subscribe(t *testing.T) { t.Run("EventsDropWithNoListeners", func(t *testing.T) { t.Parallel() + dbPool := riverinternaltest.TestDB(ctx, t) + config := newTestConfig(t, func(ctx context.Context, job *Job[callbackArgs]) error { return nil }) - client := newTestClient(ctx, t, config) + client := newTestClient(t, dbPool, config) // A first channel that we'll use to make sure all the expected jobs are // finished. @@ -2361,11 +2376,13 @@ func Test_Client_Subscribe(t *testing.T) { t.Run("PanicOnUnknownKind", func(t *testing.T) { t.Parallel() + dbPool := riverinternaltest.TestDB(ctx, t) + config := newTestConfig(t, func(ctx context.Context, job *Job[callbackArgs]) error { return nil }) - client := newTestClient(ctx, t, config) + client := newTestClient(t, dbPool, config) require.PanicsWithError(t, "unknown event kind: does_not_exist", func() { _, _ = client.Subscribe(EventKind("does_not_exist")) @@ -2375,11 +2392,13 @@ func Test_Client_Subscribe(t *testing.T) { t.Run("SubscriptionCancellation", func(t *testing.T) { t.Parallel() + dbPool := riverinternaltest.TestDB(ctx, t) + config := newTestConfig(t, func(ctx context.Context, job *Job[callbackArgs]) error { return nil }) - client := newTestClient(ctx, t, config) + client := newTestClient(t, dbPool, config) subscribeChan, cancel := client.Subscribe(EventKindJobCompleted) cancel() @@ -2403,12 +2422,15 @@ func Test_Client_InsertTriggersImmediateWork(t *testing.T) { doneCh := make(chan struct{}) close(doneCh) // don't need to block any jobs from completing startedCh := make(chan int64) + + dbPool := riverinternaltest.TestDB(ctx, t) + config := newTestConfig(t, makeAwaitCallback(startedCh, doneCh)) config.FetchCooldown = 20 * time.Millisecond config.FetchPollInterval = 20 * time.Second // essentially disable polling config.Queues = map[string]QueueConfig{QueueDefault: {MaxWorkers: 2}} - client := newTestClient(ctx, t, config) + client := newTestClient(t, dbPool, config) statusUpdateCh := client.monitor.RegisterUpdates() insertedJob, err := client.Insert(ctx, callbackArgs{}, nil) @@ -2449,22 +2471,26 @@ func Test_Client_JobCompletion(t *testing.T) { ctx := context.Background() type testBundle struct { + DBPool *pgxpool.Pool SubscribeChan <-chan *Event } setup := func(t *testing.T, config *Config) (*Client[pgx.Tx], *testBundle) { t.Helper() - client := runNewTestClient(ctx, t, config) + dbPool := riverinternaltest.TestDB(ctx, t) + client := newTestClient(t, dbPool, config) + startClient(ctx, t, client) subscribeChan, cancel := client.Subscribe(EventKindJobCancelled, EventKindJobCompleted, EventKindJobFailed) t.Cleanup(cancel) - return client, &testBundle{SubscribeChan: subscribeChan} + return client, &testBundle{ + DBPool: dbPool, + SubscribeChan: subscribeChan, + } } - queries := dbsqlc.New() - t.Run("JobThatReturnsNilIsCompleted", func(t *testing.T) { t.Parallel() @@ -2493,21 +2519,22 @@ func Test_Client_JobCompletion(t *testing.T) { t.Parallel() require := require.New(t) - var dbPool *pgxpool.Pool + var exec riverdriver.Executor now := time.Now().UTC() config := newTestConfig(t, func(ctx context.Context, job *Job[callbackArgs]) error { - _, err := queries.JobSetState(ctx, dbPool, dbsqlc.JobSetStateParams{ + _, err := exec.JobUpdate(ctx, &riverdriver.JobUpdateParams{ ID: job.ID, FinalizedAtDoUpdate: true, FinalizedAt: &now, - State: dbsqlc.JobStateCompleted, + StateDoUpdate: true, + State: rivertype.JobStateCompleted, }) require.NoError(err) return nil }) client, bundle := setup(t, config) - dbPool = client.driver.GetDBPool() + exec = client.driver.GetExecutor() job, err := client.Insert(ctx, callbackArgs{}, nil) require.NoError(err) @@ -2577,25 +2604,29 @@ func Test_Client_JobCompletion(t *testing.T) { t.Parallel() require := require.New(t) - var dbPool *pgxpool.Pool now := time.Now().UTC() - config := newTestConfig(t, func(ctx context.Context, job *Job[callbackArgs]) error { - _, err := queries.JobSetState(ctx, dbPool, dbsqlc.JobSetStateParams{ + + client, bundle := setup(t, newTestConfig(t, nil)) + + type JobArgs struct { + JobArgsReflectKind[JobArgs] + } + + AddWorker(client.config.Workers, WorkFunc(func(ctx context.Context, job *Job[JobArgs]) error { + _, err := client.driver.GetExecutor().JobUpdate(ctx, &riverdriver.JobUpdateParams{ ID: job.ID, - ErrorDoUpdate: true, - Error: []byte("{\"error\": \"oops\"}"), + ErrorsDoUpdate: true, + Errors: [][]byte{[]byte("{\"error\": \"oops\"}")}, FinalizedAtDoUpdate: true, FinalizedAt: &now, - State: dbsqlc.JobStateDiscarded, + StateDoUpdate: true, + State: rivertype.JobStateDiscarded, }) require.NoError(err) return errors.New("oops") - }) - - client, bundle := setup(t, config) - dbPool = client.driver.GetDBPool() + })) - job, err := client.Insert(ctx, callbackArgs{}, nil) + job, err := client.Insert(ctx, JobArgs{}, nil) require.NoError(err) event := riverinternaltest.WaitOrTimeout(t, bundle.SubscribeChan) @@ -2613,24 +2644,26 @@ func Test_Client_JobCompletion(t *testing.T) { t.Parallel() require := require.New(t) - var dbPool *pgxpool.Pool now := time.Now().UTC() - var updatedJob *Job[callbackArgs] - config := newTestConfig(t, func(ctx context.Context, job *Job[callbackArgs]) error { - tx, err := dbPool.Begin(ctx) + client, bundle := setup(t, newTestConfig(t, nil)) + + type JobArgs struct { + JobArgsReflectKind[JobArgs] + } + + var updatedJob *Job[JobArgs] + AddWorker(client.config.Workers, WorkFunc(func(ctx context.Context, job *Job[JobArgs]) error { + tx, err := bundle.DBPool.Begin(ctx) require.NoError(err) updatedJob, err = JobCompleteTx[*riverpgxv5.Driver](ctx, tx, job) require.NoError(err) return tx.Commit(ctx) - }) - - client, bundle := setup(t, config) - dbPool = client.driver.GetDBPool() + })) - job, err := client.Insert(ctx, callbackArgs{}, nil) + job, err := client.Insert(ctx, JobArgs{}, nil) require.NoError(err) event := riverinternaltest.WaitOrTimeout(t, bundle.SubscribeChan) @@ -2678,19 +2711,19 @@ func Test_Client_UnknownJobKindErrorsTheJob(t *testing.T) { subscribeChan, cancel := client.Subscribe(EventKindJobFailed) t.Cleanup(cancel) - insertParams, err := insertParamsFromArgsAndOptions(unregisteredJobArgs{}, nil) + insertParams, _, err := insertParamsFromArgsAndOptions(unregisteredJobArgs{}, nil) require.NoError(err) - insertRes, err := client.adapter.JobInsert(ctx, insertParams) + insertedJob, err := client.driver.GetExecutor().JobInsert(ctx, insertParams) require.NoError(err) event := riverinternaltest.WaitOrTimeout(t, subscribeChan) - require.Equal(insertRes.Job.ID, event.Job.ID) - require.Equal("RandomWorkerNameThatIsNeverRegistered", insertRes.Job.Kind) + require.Equal(insertedJob.ID, event.Job.ID) + require.Equal("RandomWorkerNameThatIsNeverRegistered", insertedJob.Kind) require.Len(event.Job.Errors, 1) require.Equal((&UnknownJobKindError{Kind: "RandomWorkerNameThatIsNeverRegistered"}).Error(), event.Job.Errors[0].Error) require.Equal(JobStateRetryable, event.Job.State) // Ensure that ScheduledAt was updated with next run time: - require.True(event.Job.ScheduledAt.After(insertRes.Job.ScheduledAt)) + require.True(event.Job.ScheduledAt.After(insertedJob.ScheduledAt)) // It's the 1st attempt that failed. Attempt won't be incremented again until // the job gets fetched a 2nd time. require.Equal(1, event.Job.Attempt) @@ -2706,11 +2739,13 @@ func Test_Client_Start_Error(t *testing.T) { t.Run("NoQueueConfiguration", func(t *testing.T) { t.Parallel() + dbPool := riverinternaltest.TestDB(ctx, t) + config := newTestConfig(t, nil) config.Queues = nil config.Workers = nil - client := newTestClient(ctx, t, config) + client := newTestClient(t, dbPool, config) err := client.Start(ctx) require.EqualError(t, err, "client Queues and Workers must be configured for a client to start working") }) @@ -2718,10 +2753,12 @@ func Test_Client_Start_Error(t *testing.T) { t.Run("NoRegisteredWorkers", func(t *testing.T) { t.Parallel() + dbPool := riverinternaltest.TestDB(ctx, t) + config := newTestConfig(t, nil) config.Workers = NewWorkers() // initialized, but empty - client := newTestClient(ctx, t, config) + client := newTestClient(t, dbPool, config) err := client.Start(ctx) require.EqualError(t, err, "at least one Worker must be added to the Workers bundle") }) @@ -2736,8 +2773,7 @@ func Test_Client_Start_Error(t *testing.T) { config := newTestConfig(t, nil) - client := newTestClient(ctx, t, config) - client.driver = riverpgxv5.New(dbPool) + client := newTestClient(t, dbPool, config) err = client.Start(ctx) require.Error(t, err) @@ -2752,7 +2788,8 @@ func Test_NewClient_BaseServiceName(t *testing.T) { t.Parallel() ctx := context.Background() - client := newTestClient(ctx, t, newTestConfig(t, nil)) + dbPool := riverinternaltest.TestDB(ctx, t) + client := newTestClient(t, dbPool, newTestConfig(t, nil)) // Ensure we get the clean name "Client" instead of the fully qualified name // with generic type param: require.Equal(t, "Client", client.baseService.Name) @@ -2808,13 +2845,16 @@ func Test_NewClient_Defaults(t *testing.T) { }) require.NoError(t, err) - require.Zero(t, client.adapter.(*dbadapter.StandardAdapter).Config.AdvisoryLockPrefix) //nolint:forcetypeassert + require.Zero(t, client.uniqueInserter.AdvisoryLockPrefix) jobCleaner := maintenance.GetService[*maintenance.JobCleaner](client.queueMaintainer) require.Equal(t, maintenance.CancelledJobRetentionPeriodDefault, jobCleaner.Config.CancelledJobRetentionPeriod) require.Equal(t, maintenance.CompletedJobRetentionPeriodDefault, jobCleaner.Config.CompletedJobRetentionPeriod) require.Equal(t, maintenance.DiscardedJobRetentionPeriodDefault, jobCleaner.Config.DiscardedJobRetentionPeriod) + enqueuer := maintenance.GetService[*maintenance.PeriodicJobEnqueuer](client.queueMaintainer) + require.Zero(t, enqueuer.Config.AdvisoryLockPrefix) + require.Nil(t, client.config.ErrorHandler) require.Equal(t, FetchCooldownDefault, client.config.FetchCooldown) require.Equal(t, FetchPollIntervalDefault, client.config.FetchPollInterval) @@ -2856,13 +2896,16 @@ func Test_NewClient_Overrides(t *testing.T) { }) require.NoError(t, err) - require.Equal(t, int32(123_456), client.adapter.(*dbadapter.StandardAdapter).Config.AdvisoryLockPrefix) //nolint:forcetypeassert + require.Equal(t, int32(123_456), client.uniqueInserter.AdvisoryLockPrefix) jobCleaner := maintenance.GetService[*maintenance.JobCleaner](client.queueMaintainer) require.Equal(t, 1*time.Hour, jobCleaner.Config.CancelledJobRetentionPeriod) require.Equal(t, 2*time.Hour, jobCleaner.Config.CompletedJobRetentionPeriod) require.Equal(t, 3*time.Hour, jobCleaner.Config.DiscardedJobRetentionPeriod) + enqueuer := maintenance.GetService[*maintenance.PeriodicJobEnqueuer](client.queueMaintainer) + require.Equal(t, int32(123_456), enqueuer.Config.AdvisoryLockPrefix) + require.Equal(t, errorHandler, client.config.ErrorHandler) require.Equal(t, 123*time.Millisecond, client.config.FetchCooldown) require.Equal(t, 124*time.Millisecond, client.config.FetchPollInterval) @@ -3246,16 +3289,15 @@ func TestInsertParamsFromJobArgsAndOptions(t *testing.T) { t.Run("Defaults", func(t *testing.T) { t.Parallel() - insertParams, err := insertParamsFromArgsAndOptions(noOpArgs{}, nil) + insertParams, _, err := insertParamsFromArgsAndOptions(noOpArgs{}, nil) require.NoError(t, err) require.Equal(t, `{"name":""}`, string(insertParams.EncodedArgs)) require.Equal(t, (noOpArgs{}).Kind(), insertParams.Kind) require.Equal(t, rivercommon.MaxAttemptsDefault, insertParams.MaxAttempts) require.Equal(t, rivercommon.PriorityDefault, insertParams.Priority) require.Equal(t, QueueDefault, insertParams.Queue) - require.Equal(t, time.Time{}, insertParams.ScheduledAt) - require.Equal(t, []string(nil), insertParams.Tags) - require.False(t, insertParams.Unique) + require.Nil(t, insertParams.ScheduledAt) + require.Equal(t, []string{}, insertParams.Tags) }) t.Run("InsertOptsOverrides", func(t *testing.T) { @@ -3268,19 +3310,19 @@ func TestInsertParamsFromJobArgsAndOptions(t *testing.T) { ScheduledAt: time.Now().Add(time.Hour), Tags: []string{"tag1", "tag2"}, } - insertParams, err := insertParamsFromArgsAndOptions(noOpArgs{}, opts) + insertParams, _, err := insertParamsFromArgsAndOptions(noOpArgs{}, opts) require.NoError(t, err) require.Equal(t, 42, insertParams.MaxAttempts) require.Equal(t, 2, insertParams.Priority) require.Equal(t, "other", insertParams.Queue) - require.Equal(t, opts.ScheduledAt, insertParams.ScheduledAt) + require.Equal(t, opts.ScheduledAt, *insertParams.ScheduledAt) require.Equal(t, []string{"tag1", "tag2"}, insertParams.Tags) }) t.Run("WorkerInsertOptsOverrides", func(t *testing.T) { t.Parallel() - insertParams, err := insertParamsFromArgsAndOptions(&customInsertOptsJobArgs{}, nil) + insertParams, _, err := insertParamsFromArgsAndOptions(&customInsertOptsJobArgs{}, nil) require.NoError(t, err) // All these come from overrides in customInsertOptsJobArgs's definition: require.Equal(t, 42, insertParams.MaxAttempts) @@ -3292,7 +3334,7 @@ func TestInsertParamsFromJobArgsAndOptions(t *testing.T) { t.Run("PriorityIsLimitedTo4", func(t *testing.T) { t.Parallel() - insertParams, err := insertParamsFromArgsAndOptions(noOpArgs{}, &InsertOpts{Priority: 5}) + insertParams, _, err := insertParamsFromArgsAndOptions(noOpArgs{}, &InsertOpts{Priority: 5}) require.ErrorContains(t, err, "priority must be between 1 and 4") require.Nil(t, insertParams) }) @@ -3301,7 +3343,7 @@ func TestInsertParamsFromJobArgsAndOptions(t *testing.T) { t.Parallel() args := timeoutTestArgs{TimeoutValue: time.Hour} - insertParams, err := insertParamsFromArgsAndOptions(args, nil) + insertParams, _, err := insertParamsFromArgsAndOptions(args, nil) require.NoError(t, err) require.Equal(t, `{"timeout_value":3600000000000}`, string(insertParams.EncodedArgs)) }) @@ -3312,7 +3354,7 @@ func TestInsertParamsFromJobArgsAndOptions(t *testing.T) { // Ensure that unique opts are validated. No need to be exhaustive here // since we already have tests elsewhere for that. Just make sure validation // is running. - insertParams, err := insertParamsFromArgsAndOptions( + insertParams, _, err := insertParamsFromArgsAndOptions( noOpArgs{}, &InsertOpts{UniqueOpts: UniqueOpts{ByPeriod: 1 * time.Millisecond}}, ) @@ -3327,7 +3369,8 @@ func TestID(t *testing.T) { t.Run("IsGeneratedWhenNotSpecifiedInConfig", func(t *testing.T) { t.Parallel() - client := newTestClient(ctx, t, newTestConfig(t, nil)) + dbPool := riverinternaltest.TestDB(ctx, t) + client := newTestClient(t, dbPool, newTestConfig(t, nil)) require.NotEmpty(t, client.ID()) }) @@ -3335,7 +3378,8 @@ func TestID(t *testing.T) { t.Parallel() config := newTestConfig(t, nil) config.ID = "my-client-id" - client := newTestClient(ctx, t, config) + dbPool := riverinternaltest.TestDB(ctx, t) + client := newTestClient(t, dbPool, config) require.Equal(t, "my-client-id", client.ID()) }) } @@ -3485,7 +3529,9 @@ func TestUniqueOpts(t *testing.T) { workers := NewWorkers() AddWorker(workers, &noOpWorker{}) - client := newTestClient(ctx, t, newTestConfig(t, nil)) + dbPool := riverinternaltest.TestDB(ctx, t) + + client := newTestClient(t, dbPool, newTestConfig(t, nil)) return client, &testBundle{} } diff --git a/driver_test.go b/driver_test.go new file mode 100644 index 00000000..a3f5bd29 --- /dev/null +++ b/driver_test.go @@ -0,0 +1,195 @@ +package river_test + +import ( + "context" + "database/sql" + "runtime" + "testing" + "time" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/stdlib" + "github.com/stretchr/testify/require" + + "github.com/riverqueue/river/internal/rivercommon" + "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/internal/riverinternaltest/riverdrivertest" + "github.com/riverqueue/river/riverdriver" + "github.com/riverqueue/river/riverdriver/riverdatabasesql" + "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivertype" +) + +func TestDriverDatabaseSQL_Executor(t *testing.T) { + t.Parallel() + + ctx := context.Background() + dbPool := riverinternaltest.TestDB(ctx, t) + + stdPool := stdlib.OpenDBFromPool(dbPool) + t.Cleanup(func() { require.NoError(t, stdPool.Close()) }) + + driver := riverdatabasesql.New(nil) + riverdrivertest.ExerciseExecutorMigrationOnly(ctx, t, driver, func(ctx context.Context, t *testing.T) *sql.Tx { + t.Helper() + + tx, err := stdPool.BeginTx(ctx, nil) + require.NoError(t, err) + t.Cleanup(func() { _ = tx.Rollback() }) + + return tx + }) +} + +func TestDriverRiverPgxV5_Executor(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + driver := riverpgxv5.New(nil) + riverdrivertest.ExerciseExecutorFull(ctx, t, driver, func(ctx context.Context, t *testing.T) pgx.Tx { + t.Helper() + + return riverinternaltest.TestTx(ctx, t) + }) +} + +func TestDriverRiverPgxV5_Listener(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + riverdrivertest.ExerciseListener(ctx, t, func(ctx context.Context, t *testing.T) riverdriver.Driver[pgx.Tx] { + t.Helper() + + dbPool := riverinternaltest.TestDB(ctx, t) + return riverpgxv5.New(dbPool) + }) +} + +func BenchmarkDriverRiverPgxV5_Executor(b *testing.B) { + const ( + clientID = "test-client-id" + timeout = 5 * time.Minute + ) + + ctx := context.Background() + + type testBundle struct{} + + setupPool := func(b *testing.B) (riverdriver.Executor, *testBundle) { + b.Helper() + + driver := riverpgxv5.New(riverinternaltest.TestDB(ctx, b)) + + b.ResetTimer() + + return driver.GetExecutor(), &testBundle{} + } + + setupTx := func(b *testing.B) (riverdriver.Executor, *testBundle) { + b.Helper() + + driver := riverpgxv5.New(nil) + tx := riverinternaltest.TestTx(ctx, b) + + b.ResetTimer() + + return driver.UnwrapExecutor(tx), &testBundle{} + } + + makeInsertParams := func() *riverdriver.JobInsertParams { + return &riverdriver.JobInsertParams{ + EncodedArgs: []byte(`{}`), + Kind: "fake_job", + MaxAttempts: rivercommon.MaxAttemptsDefault, + Metadata: []byte(`{}`), + Priority: rivercommon.PriorityDefault, + Queue: rivercommon.QueueDefault, + ScheduledAt: nil, + State: rivertype.JobStateAvailable, + } + } + + b.Run("JobInsert_Sequential", func(b *testing.B) { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + exec, _ := setupTx(b) + + for i := 0; i < b.N; i++ { + if _, err := exec.JobInsert(ctx, makeInsertParams()); err != nil { + b.Fatal(err) + } + } + }) + + b.Run("JobInsert_Parallel", func(b *testing.B) { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + exec, _ := setupPool(b) + + b.RunParallel(func(pb *testing.PB) { + i := 0 + for pb.Next() { + if _, err := exec.JobInsert(ctx, makeInsertParams()); err != nil { + b.Fatal(err) + } + i++ + } + }) + }) + + b.Run("JobGetAvailable_100_Sequential", func(b *testing.B) { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + exec, _ := setupTx(b) + + for i := 0; i < b.N*100; i++ { + if _, err := exec.JobInsert(ctx, makeInsertParams()); err != nil { + b.Fatal(err) + } + } + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + if _, err := exec.JobGetAvailable(ctx, &riverdriver.JobGetAvailableParams{ + AttemptedBy: clientID, + Max: 100, + Queue: rivercommon.QueueDefault, + }); err != nil { + b.Fatal(err) + } + } + }) + + b.Run("JobGetAvailable_100_Parallel", func(b *testing.B) { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + exec, _ := setupPool(b) + + for i := 0; i < b.N*100*runtime.NumCPU(); i++ { + if _, err := exec.JobInsert(ctx, makeInsertParams()); err != nil { + b.Fatal(err) + } + } + + b.ResetTimer() + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + if _, err := exec.JobGetAvailable(ctx, &riverdriver.JobGetAvailableParams{ + AttemptedBy: clientID, + Max: 100, + Queue: rivercommon.QueueDefault, + }); err != nil { + b.Fatal(err) + } + } + }) + }) +} diff --git a/go.mod b/go.mod index b1dfe431..3f1b02bb 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/riverqueue/river -go 1.21 +go 1.21.4 replace github.com/riverqueue/river/riverdriver => ./riverdriver @@ -8,6 +8,8 @@ replace github.com/riverqueue/river/riverdriver/riverpgxv5 => ./riverdriver/rive replace github.com/riverqueue/river/riverdriver/riverdatabasesql => ./riverdriver/riverdatabasesql +replace github.com/riverqueue/river/rivertype => ./rivertype + require ( github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa github.com/jackc/pgx/v5 v5.5.3 @@ -16,6 +18,7 @@ require ( github.com/riverqueue/river/riverdriver v0.0.20 github.com/riverqueue/river/riverdriver/riverdatabasesql v0.0.20 github.com/riverqueue/river/riverdriver/riverpgxv5 v0.0.20 + github.com/riverqueue/river/rivertype v0.0.20 github.com/robfig/cron/v3 v3.0.1 github.com/spf13/cobra v1.8.0 github.com/stretchr/testify v1.8.4 diff --git a/insert_opts.go b/insert_opts.go index 7062bbf7..a8492a75 100644 --- a/insert_opts.go +++ b/insert_opts.go @@ -133,6 +133,16 @@ func (o *UniqueOpts) isEmpty() bool { o.ByState == nil } +var jobStateAll = []rivertype.JobState{ //nolint:gochecknoglobals + rivertype.JobStateAvailable, + rivertype.JobStateCancelled, + rivertype.JobStateCompleted, + rivertype.JobStateDiscarded, + rivertype.JobStateRetryable, + rivertype.JobStateRunning, + rivertype.JobStateScheduled, +} + func (o *UniqueOpts) validate() error { if o.isEmpty() { return nil diff --git a/insert_opts_test.go b/insert_opts_test.go new file mode 100644 index 00000000..f75a134c --- /dev/null +++ b/insert_opts_test.go @@ -0,0 +1,25 @@ +package river + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/riverqueue/river/rivertype" +) + +func TestJobUniqueOpts_validate(t *testing.T) { + t.Parallel() + + require.NoError(t, (&UniqueOpts{}).validate()) + require.NoError(t, (&UniqueOpts{ + ByArgs: true, + ByPeriod: 1 * time.Second, + ByQueue: true, + ByState: []rivertype.JobState{rivertype.JobStateAvailable}, + }).validate()) + + require.EqualError(t, (&UniqueOpts{ByPeriod: 1 * time.Millisecond}).validate(), "JobUniqueOpts.ByPeriod should not be less than 1 second") + require.EqualError(t, (&UniqueOpts{ByState: []rivertype.JobState{rivertype.JobState("invalid")}}).validate(), `JobUniqueOpts.ByState contains invalid state "invalid"`) +} diff --git a/internal/dbadapter/db_adapter.go b/internal/dbadapter/db_adapter.go deleted file mode 100644 index ce3ad01a..00000000 --- a/internal/dbadapter/db_adapter.go +++ /dev/null @@ -1,643 +0,0 @@ -package dbadapter - -import ( - "context" - "errors" - "fmt" - "math" - "slices" - "strings" - "time" - - "github.com/jackc/pgx/v5" - - "github.com/riverqueue/river/internal/baseservice" - "github.com/riverqueue/river/internal/dblist" - "github.com/riverqueue/river/internal/dbsqlc" - "github.com/riverqueue/river/internal/notifier" - "github.com/riverqueue/river/internal/util/dbutil" - "github.com/riverqueue/river/internal/util/hashutil" - "github.com/riverqueue/river/internal/util/ptrutil" - "github.com/riverqueue/river/internal/util/sliceutil" - "github.com/riverqueue/river/internal/util/valutil" - "github.com/riverqueue/river/riverdriver" - "github.com/riverqueue/river/rivertype" -) - -// When a job has specified unique options, but has not set the ByState -// parameter explicitly, this is the set of default states that are used to -// determine uniqueness. So for example, a new unique job may be inserted even -// if another job already exists, as long as that other job is set `cancelled` -// or `discarded`. -var defaultUniqueStates = []string{ //nolint:gochecknoglobals - string(dbsqlc.JobStateAvailable), - string(dbsqlc.JobStateCompleted), - string(dbsqlc.JobStateRunning), - string(dbsqlc.JobStateRetryable), - string(dbsqlc.JobStateScheduled), -} - -type JobToComplete struct { - ID int64 - FinalizedAt time.Time -} - -// JobInsertParams are parameters for Adapter's `JobInsert*“ functions. They -// roughly reflect the properties of an inserted job, but only ones that are -// allowed to be used on input. -type JobInsertParams struct { - EncodedArgs []byte - Kind string - MaxAttempts int - Metadata []byte - Priority int - Queue string - ScheduledAt time.Time - State dbsqlc.JobState - Tags []string - Unique bool - UniqueByArgs bool - UniqueByPeriod time.Duration - UniqueByQueue bool - UniqueByState []dbsqlc.JobState -} - -type JobInsertResult struct { - // Job is information about an inserted job. - // - // For an insertion that was skipped due to a duplicate, contains the job - // that already existed. - Job *dbsqlc.RiverJob - - // UniqueSkippedAsDuplicate indicates that the insert didn't occur because - // it was a unique job, and another unique job within the unique parameters - // was already in the database. - UniqueSkippedAsDuplicate bool -} - -type SortOrder int - -const ( - SortOrderUnspecified SortOrder = iota - SortOrderAsc - SortOrderDesc -) - -type JobListOrderBy struct { - Expr string - Order SortOrder -} - -type JobListParams struct { - Conditions string - LimitCount int32 - NamedArgs map[string]any - OrderBy []JobListOrderBy - Priorities []int16 - Queues []string - State rivertype.JobState -} - -// Adapter is an interface to the various database-level operations which River -// needs to operate. It's quite non-generic for the moment, but the idea is that -// it'd give us a way to implement access to non-Postgres databases, and may be -// reimplemented for pro features or exposed to users for customization. -// -// TODO: If exposing publicly, we must first make sure to add an intermediary -// layer between Adapter types and dbsqlc types. We return `dbsqlc.RiverJob` for -// expedience, but this should be converted to a more stable API if Adapter -// would be exported. -type Adapter interface { - JobCancel(ctx context.Context, id int64) (*dbsqlc.RiverJob, error) - JobCancelTx(ctx context.Context, tx pgx.Tx, id int64) (*dbsqlc.RiverJob, error) - - JobInsert(ctx context.Context, params *JobInsertParams) (*JobInsertResult, error) - JobInsertTx(ctx context.Context, tx pgx.Tx, params *JobInsertParams) (*JobInsertResult, error) - - // TODO: JobInsertMany functions don't support unique jobs. - JobInsertMany(ctx context.Context, params []*JobInsertParams) (int64, error) - JobInsertManyTx(ctx context.Context, tx pgx.Tx, params []*JobInsertParams) (int64, error) - - JobGet(ctx context.Context, id int64) (*dbsqlc.RiverJob, error) - JobGetTx(ctx context.Context, tx pgx.Tx, id int64) (*dbsqlc.RiverJob, error) - JobGetAvailable(ctx context.Context, queueName string, limit int32) ([]*dbsqlc.RiverJob, error) - JobGetAvailableTx(ctx context.Context, tx pgx.Tx, queueName string, limit int32) ([]*dbsqlc.RiverJob, error) - - JobList(ctx context.Context, params JobListParams) ([]*dbsqlc.RiverJob, error) - JobListTx(ctx context.Context, tx pgx.Tx, params JobListParams) ([]*dbsqlc.RiverJob, error) - - // JobRetryImmediately makes a job available to run immediately, but only if - // it's not already running. - JobRetryImmediately(ctx context.Context, id int64) (*dbsqlc.RiverJob, error) - // JobRetryImmediatelyTx makes a job available to run immediately, but only if - // it's not already running. - JobRetryImmediatelyTx(ctx context.Context, tx pgx.Tx, id int64) (*dbsqlc.RiverJob, error) - - // JobSetStateIfRunning sets the state of a currently running job. Jobs which are not - // running (i.e. which have already have had their state set to something - // new through an explicit snooze or cancellation), are ignored. - JobSetStateIfRunning(ctx context.Context, params *JobSetStateIfRunningParams) (*dbsqlc.RiverJob, error) - - // LeadershipAttemptElect attempts to elect a leader for the given name. The - // bool alreadyElected indicates whether this is a potential reelection of - // an already-elected leader. If the election is successful because there is - // no leader or the previous leader expired, the provided leaderID will be - // set as the new leader with a TTL of ttl. - // - // Returns whether this leader was successfully elected or an error if one - // occurred. - LeadershipAttemptElect(ctx context.Context, alreadyElected bool, name, leaderID string, ttl time.Duration) (bool, error) - - // LeadershipResign resigns any currently held leaderships for the given name - // and leader ID. - LeadershipResign(ctx context.Context, name, leaderID string) error -} - -type StandardAdapter struct { - baseservice.BaseService - - Config *StandardAdapterConfig // exported so top-level package can test against it; unexport if adapterdb is ever made public - deadlineTimeout time.Duration - executor dbutil.Executor - queries *dbsqlc.Queries - workerName string -} - -type StandardAdapterConfig struct { - // AdvisoryLockPrefix is a configurable 32-bit prefix that River will use - // when generating any key to acquire a Postgres advisory lock. - AdvisoryLockPrefix int32 - - // Executor is a database executor to perform database operations with. In - // non-test environments it's a database pool. - Executor dbutil.Executor - - // DeadlineTimeout is a timeout used to set a context deadline for every - // adapter operation. - DeadlineTimeout time.Duration - - // WorkerName is a name to assign this worker. - WorkerName string -} - -// TODO: If `StandardAdapter` is ever exposed publicly, we should find a way to -// internalize archetype. Some options might be for `NewStandardAdapter` to -// return the `Adapter` interface instead of a concrete struct so that its -// properties aren't visible, and we could move base service initialization out -// to the client that accepts it so the user is never aware of its existence. -func NewStandardAdapter(archetype *baseservice.Archetype, config *StandardAdapterConfig) *StandardAdapter { - return baseservice.Init(archetype, &StandardAdapter{ - Config: config, - deadlineTimeout: valutil.ValOrDefault(config.DeadlineTimeout, 5*time.Second), - executor: config.Executor, - queries: dbsqlc.New(), - workerName: config.WorkerName, - }) -} - -func (a *StandardAdapter) JobCancel(ctx context.Context, id int64) (*dbsqlc.RiverJob, error) { - return dbutil.WithTxV(ctx, a.executor, func(ctx context.Context, tx pgx.Tx) (*dbsqlc.RiverJob, error) { - return a.JobCancelTx(ctx, tx, id) - }) -} - -func (a *StandardAdapter) JobCancelTx(ctx context.Context, tx pgx.Tx, id int64) (*dbsqlc.RiverJob, error) { - ctx, cancel := context.WithTimeout(ctx, a.deadlineTimeout) - defer cancel() - - cancelledAt, err := a.TimeNowUTC().MarshalJSON() - if err != nil { - return nil, err - } - - job, err := a.queries.JobCancel(ctx, a.executor, dbsqlc.JobCancelParams{ - CancelAttemptedAt: cancelledAt, - ID: id, - JobControlTopic: string(notifier.NotificationTopicJobControl), - }) - if errors.Is(err, pgx.ErrNoRows) { - return nil, riverdriver.ErrNoRows - } - if err != nil { - return nil, err - } - - return job, nil -} - -func (a *StandardAdapter) JobInsert(ctx context.Context, params *JobInsertParams) (*JobInsertResult, error) { - return dbutil.WithTxV(ctx, a.executor, func(ctx context.Context, tx pgx.Tx) (*JobInsertResult, error) { - return a.JobInsertTx(ctx, tx, params) - }) -} - -func (a *StandardAdapter) JobInsertTx(ctx context.Context, tx pgx.Tx, params *JobInsertParams) (*JobInsertResult, error) { - ctx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - - if params.Unique { - // For uniqueness checks returns an advisory lock hash to use for lock, - // parameters to check for an existing unique job with the same - // properties, and a boolean indicating whether a uniqueness check - // should be performed at all (in some cases the check can be skipped if - // we can determine ahead of time that this insert will not violate - // uniqueness conditions). - buildUniqueParams := func() (*hashutil.AdvisoryLockHash, *dbsqlc.JobGetByKindAndUniquePropertiesParams, bool) { - advisoryLockHash := hashutil.NewAdvisoryLockHash(a.Config.AdvisoryLockPrefix) - advisoryLockHash.Write([]byte("unique_key")) - advisoryLockHash.Write([]byte("kind=" + params.Kind)) - - getParams := dbsqlc.JobGetByKindAndUniquePropertiesParams{ - Kind: params.Kind, - } - - if params.UniqueByArgs { - advisoryLockHash.Write([]byte("&args=")) - advisoryLockHash.Write(params.EncodedArgs) - - getParams.Args = params.EncodedArgs - getParams.ByArgs = true - } - - if params.UniqueByPeriod != time.Duration(0) { - lowerPeriodBound := a.TimeNowUTC().Truncate(params.UniqueByPeriod) - - advisoryLockHash.Write([]byte("&period=" + lowerPeriodBound.Format(time.RFC3339))) - - getParams.ByCreatedAt = true - getParams.CreatedAtStart = lowerPeriodBound - getParams.CreatedAtEnd = lowerPeriodBound.Add(params.UniqueByPeriod) - } - - if params.UniqueByQueue { - advisoryLockHash.Write([]byte("&queue=" + params.Queue)) - - getParams.ByQueue = true - getParams.Queue = params.Queue - } - - { - stateSet := defaultUniqueStates - if len(params.UniqueByState) > 0 { - stateSet = sliceutil.Map(params.UniqueByState, func(s dbsqlc.JobState) string { return string(s) }) - } - - advisoryLockHash.Write([]byte("&state=" + strings.Join(stateSet, ","))) - - if !slices.Contains(stateSet, string(params.State)) { - return nil, nil, false - } - - getParams.ByState = true - getParams.State = stateSet - } - - return advisoryLockHash, &getParams, true - } - - if advisoryLockHash, getParams, doUniquenessCheck := buildUniqueParams(); doUniquenessCheck { - // The wrapping transaction should maintain snapshot consistency even if - // we were to only have a SELECT + INSERT, but given that a conflict is - // possible, obtain an advisory lock based on the parameters of the - // unique job first, and have contending inserts wait for it. This is a - // synchronous lock so we rely on context timeout in case something goes - // wrong and it's blocking for too long. - if err := a.queries.PGAdvisoryXactLock(ctx, tx, advisoryLockHash.Key()); err != nil { - return nil, fmt.Errorf("error acquiring unique lock: %w", err) - } - - existing, err := a.queries.JobGetByKindAndUniqueProperties(ctx, tx, *getParams) - if err != nil { - if !errors.Is(err, pgx.ErrNoRows) { - return nil, fmt.Errorf("error getting unique job: %w", err) - } - } - - if !existing.CreatedAt.IsZero() { - return &JobInsertResult{Job: existing, UniqueSkippedAsDuplicate: true}, nil - } - } - } - - var scheduledAt *time.Time - if !params.ScheduledAt.IsZero() { - scheduledAt = ptrutil.Ptr(params.ScheduledAt.UTC()) - } - - // TODO: maybe want to handle defaults (queue name, priority, etc) at a higher level - // so that it's applied for all adapters consistently. - inserted, err := a.queries.JobInsert(ctx, tx, dbsqlc.JobInsertParams{ - Args: params.EncodedArgs, - CreatedAt: ptrutil.Ptr(a.TimeNowUTC()), - Kind: params.Kind, - MaxAttempts: int16(min(params.MaxAttempts, math.MaxInt16)), - Metadata: params.Metadata, - Priority: int16(min(params.Priority, math.MaxInt16)), - Queue: params.Queue, - ScheduledAt: scheduledAt, - State: params.State, - Tags: params.Tags, - }) - if err != nil { - return nil, err - } - - return &JobInsertResult{Job: inserted}, nil -} - -func (a *StandardAdapter) JobInsertMany(ctx context.Context, params []*JobInsertParams) (int64, error) { - return dbutil.WithTxV(ctx, a.executor, func(ctx context.Context, tx pgx.Tx) (int64, error) { - return a.JobInsertManyTx(ctx, tx, params) - }) -} - -func (a *StandardAdapter) JobInsertManyTx(ctx context.Context, tx pgx.Tx, params []*JobInsertParams) (int64, error) { - ctx, cancel := context.WithTimeout(ctx, a.deadlineTimeout) - defer cancel() - - insertJobsParams := make([]dbsqlc.JobInsertManyParams, len(params)) - - now := a.TimeNowUTC() - - for i := 0; i < len(params); i++ { - params := params[i] - - metadata := params.Metadata - if metadata == nil { - metadata = []byte("{}") - } - - tags := params.Tags - if tags == nil { - tags = []string{} - } - scheduledAt := now - if !params.ScheduledAt.IsZero() { - scheduledAt = params.ScheduledAt.UTC() - } - - insertJobsParams[i] = dbsqlc.JobInsertManyParams{ - Args: params.EncodedArgs, - Kind: params.Kind, - MaxAttempts: int16(min(params.MaxAttempts, math.MaxInt16)), - Metadata: metadata, - Priority: int16(min(params.Priority, math.MaxInt16)), - Queue: params.Queue, - State: params.State, - ScheduledAt: scheduledAt, - Tags: tags, - } - } - - numInserted, err := a.queries.JobInsertMany(ctx, tx, insertJobsParams) - if err != nil { - return 0, fmt.Errorf("error inserting many jobs: %w", err) - } - - return numInserted, nil -} - -func (a *StandardAdapter) JobGet(ctx context.Context, id int64) (*dbsqlc.RiverJob, error) { - return dbutil.WithTxV(ctx, a.executor, func(ctx context.Context, tx pgx.Tx) (*dbsqlc.RiverJob, error) { - return a.JobGetTx(ctx, tx, id) - }) -} - -func (a *StandardAdapter) JobGetTx(ctx context.Context, tx pgx.Tx, id int64) (*dbsqlc.RiverJob, error) { - ctx, cancel := context.WithTimeout(ctx, a.deadlineTimeout) - defer cancel() - - job, err := a.queries.JobGetByID(ctx, tx, id) - if errors.Is(err, pgx.ErrNoRows) { - return nil, riverdriver.ErrNoRows - } - if err != nil { - return nil, err - } - - return job, nil -} - -func (a *StandardAdapter) JobGetAvailable(ctx context.Context, queueName string, limit int32) ([]*dbsqlc.RiverJob, error) { - ctx, cancel := context.WithTimeout(ctx, a.deadlineTimeout) - defer cancel() - - tx, err := a.executor.Begin(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback(ctx) - - jobs, err := a.JobGetAvailableTx(ctx, tx, queueName, limit) - if err != nil { - return nil, err - } - if err := tx.Commit(ctx); err != nil { - return nil, err - } - return jobs, nil -} - -func (a *StandardAdapter) JobGetAvailableTx(ctx context.Context, tx pgx.Tx, queueName string, limit int32) ([]*dbsqlc.RiverJob, error) { - ctx, cancel := context.WithTimeout(ctx, a.deadlineTimeout) - defer cancel() - - jobs, err := a.queries.JobGetAvailable(ctx, tx, dbsqlc.JobGetAvailableParams{ - LimitCount: limit, - Queue: queueName, - Worker: a.workerName, - }) - if err != nil { - return nil, err - } - return jobs, nil -} - -func (a *StandardAdapter) JobList(ctx context.Context, params JobListParams) ([]*dbsqlc.RiverJob, error) { - ctx, cancel := context.WithTimeout(ctx, a.deadlineTimeout) - defer cancel() - - tx, err := a.executor.Begin(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback(ctx) - - jobs, err := a.JobListTx(ctx, tx, params) - if err != nil { - return nil, err - } - if err := tx.Commit(ctx); err != nil { - return nil, err - } - return jobs, nil -} - -func (a *StandardAdapter) JobListTx(ctx context.Context, tx pgx.Tx, params JobListParams) ([]*dbsqlc.RiverJob, error) { - ctx, cancel := context.WithTimeout(ctx, a.deadlineTimeout) - defer cancel() - - var conditionsBuilder strings.Builder - - orderBy := make([]dblist.JobListOrderBy, len(params.OrderBy)) - for i, o := range params.OrderBy { - orderBy[i] = dblist.JobListOrderBy{ - Expr: o.Expr, - Order: dblist.SortOrder(o.Order), - } - } - - namedArgs := params.NamedArgs - if namedArgs == nil { - namedArgs = make(map[string]any) - } - - if len(params.Queues) > 0 { - namedArgs["queues"] = params.Queues - conditionsBuilder.WriteString("queue = any(@queues::text[])") - if params.Conditions != "" { - conditionsBuilder.WriteString("\n AND ") - } - } - - if params.Conditions != "" { - conditionsBuilder.WriteString(params.Conditions) - } - - jobs, err := dblist.JobList(ctx, tx, dblist.JobListParams{ - Conditions: conditionsBuilder.String(), - LimitCount: params.LimitCount, - NamedArgs: namedArgs, - OrderBy: orderBy, - Priorities: params.Priorities, - State: dbsqlc.JobState(params.State), - }) - if err != nil { - return nil, err - } - return jobs, nil -} - -func (a *StandardAdapter) JobRetryImmediately(ctx context.Context, id int64) (*dbsqlc.RiverJob, error) { - return dbutil.WithTxV(ctx, a.executor, func(ctx context.Context, tx pgx.Tx) (*dbsqlc.RiverJob, error) { - return a.JobRetryImmediatelyTx(ctx, tx, id) - }) -} - -func (a *StandardAdapter) JobRetryImmediatelyTx(ctx context.Context, tx pgx.Tx, id int64) (*dbsqlc.RiverJob, error) { - ctx, cancel := context.WithTimeout(ctx, a.deadlineTimeout) - defer cancel() - - job, err := a.queries.JobRetryImmediately(ctx, a.executor, id) - if errors.Is(err, pgx.ErrNoRows) { - return nil, riverdriver.ErrNoRows - } - if err != nil { - return nil, err - } - - return job, nil -} - -// JobSetStateIfRunningParams are parameters to update the state of a currently running -// job. Use one of the constructors below to ensure a correct combination of -// parameters. -type JobSetStateIfRunningParams struct { - ID int64 - errData []byte - finalizedAt *time.Time - maxAttempts *int - scheduledAt *time.Time - state dbsqlc.JobState -} - -func JobSetStateCancelled(id int64, finalizedAt time.Time, errData []byte) *JobSetStateIfRunningParams { - return &JobSetStateIfRunningParams{ID: id, errData: errData, finalizedAt: &finalizedAt, state: dbsqlc.JobStateCancelled} -} - -func JobSetStateCompleted(id int64, finalizedAt time.Time) *JobSetStateIfRunningParams { - return &JobSetStateIfRunningParams{ID: id, finalizedAt: &finalizedAt, state: dbsqlc.JobStateCompleted} -} - -func JobSetStateDiscarded(id int64, finalizedAt time.Time, errData []byte) *JobSetStateIfRunningParams { - return &JobSetStateIfRunningParams{ID: id, errData: errData, finalizedAt: &finalizedAt, state: dbsqlc.JobStateDiscarded} -} - -func JobSetStateErrorAvailable(id int64, scheduledAt time.Time, errData []byte) *JobSetStateIfRunningParams { - return &JobSetStateIfRunningParams{ID: id, errData: errData, scheduledAt: &scheduledAt, state: dbsqlc.JobStateAvailable} -} - -func JobSetStateErrorRetryable(id int64, scheduledAt time.Time, errData []byte) *JobSetStateIfRunningParams { - return &JobSetStateIfRunningParams{ID: id, errData: errData, scheduledAt: &scheduledAt, state: dbsqlc.JobStateRetryable} -} - -func JobSetStateSnoozed(id int64, scheduledAt time.Time, maxAttempts int) *JobSetStateIfRunningParams { - return &JobSetStateIfRunningParams{ID: id, maxAttempts: &maxAttempts, scheduledAt: &scheduledAt, state: dbsqlc.JobStateScheduled} -} - -func JobSetStateSnoozedAvailable(id int64, scheduledAt time.Time, maxAttempts int) *JobSetStateIfRunningParams { - return &JobSetStateIfRunningParams{ID: id, maxAttempts: &maxAttempts, scheduledAt: &scheduledAt, state: dbsqlc.JobStateAvailable} -} - -func (a *StandardAdapter) JobSetStateIfRunning(ctx context.Context, params *JobSetStateIfRunningParams) (*dbsqlc.RiverJob, error) { - ctx, cancel := context.WithTimeout(ctx, a.deadlineTimeout) - defer cancel() - - return a.queries.JobSetStateIfRunning(ctx, a.executor, dbsqlc.JobSetStateIfRunningParams{ - ID: params.ID, - ErrorDoUpdate: params.errData != nil, - Error: params.errData, - FinalizedAtDoUpdate: params.finalizedAt != nil, - FinalizedAt: params.finalizedAt, - MaxAttemptsUpdate: params.maxAttempts != nil, - MaxAttempts: int16(ptrutil.ValOrDefault(params.maxAttempts, 0)), // default never used - ScheduledAtDoUpdate: params.scheduledAt != nil, - ScheduledAt: ptrutil.ValOrDefault(params.scheduledAt, time.Time{}), // default never used - State: params.state, - }) -} - -func (a *StandardAdapter) LeadershipAttemptElect(ctx context.Context, alreadyElected bool, name, leaderID string, ttl time.Duration) (bool, error) { - ctx, cancel := context.WithTimeout(ctx, a.deadlineTimeout) - defer cancel() - - return dbutil.WithTxV(ctx, a.executor, func(ctx context.Context, tx pgx.Tx) (bool, error) { - if err := a.queries.LeadershipDeleteExpired(ctx, tx, name); err != nil { - return false, err - } - - var ( - electionsWon int64 - err error - ) - if alreadyElected { - electionsWon, err = a.queries.LeadershipAttemptReelect(ctx, tx, dbsqlc.LeadershipAttemptReelectParams{ - LeaderID: leaderID, - Name: name, - TTL: ttl, - }) - } else { - electionsWon, err = a.queries.LeadershipAttemptElect(ctx, tx, dbsqlc.LeadershipAttemptElectParams{ - LeaderID: leaderID, - Name: name, - TTL: ttl, - }) - } - if err != nil { - return false, err - } - - return electionsWon > 0, nil - }) -} - -func (a *StandardAdapter) LeadershipResign(ctx context.Context, name, leaderID string) error { - ctx, cancel := context.WithTimeout(ctx, a.deadlineTimeout) - defer cancel() - - return a.queries.LeadershipResign(ctx, a.executor, dbsqlc.LeadershipResignParams{ - LeaderID: leaderID, - LeadershipTopic: string(notifier.NotificationTopicLeadership), - Name: name, - }) -} diff --git a/internal/dbadapter/db_adapter_test.go b/internal/dbadapter/db_adapter_test.go deleted file mode 100644 index fc649b3b..00000000 --- a/internal/dbadapter/db_adapter_test.go +++ /dev/null @@ -1,1553 +0,0 @@ -package dbadapter - -import ( - "context" - "encoding/json" - "fmt" - "runtime" - "slices" - "sort" - "sync" - "testing" - "time" - - "github.com/jackc/pgx/v5" - "github.com/stretchr/testify/require" - - "github.com/riverqueue/river/internal/dbsqlc" - "github.com/riverqueue/river/internal/rivercommon" - "github.com/riverqueue/river/internal/riverinternaltest" - "github.com/riverqueue/river/internal/util/dbutil" - "github.com/riverqueue/river/internal/util/ptrutil" - "github.com/riverqueue/river/internal/util/sliceutil" - "github.com/riverqueue/river/riverdriver" - "github.com/riverqueue/river/rivertype" -) - -func Test_StandardAdapter_JobCancel(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - type testBundle struct { - baselineTime time.Time // baseline time frozen at now when setup is called - ex dbutil.Executor - } - - setup := func(t *testing.T, ex dbutil.Executor) (*StandardAdapter, *testBundle) { - t.Helper() - - bundle := &testBundle{ - baselineTime: time.Now().UTC(), - ex: ex, - } - - adapter := NewStandardAdapter(riverinternaltest.BaseServiceArchetype(t), testAdapterConfig(bundle.ex)) - adapter.TimeNowUTC = func() time.Time { return bundle.baselineTime } - - return adapter, bundle - } - - setupTx := func(t *testing.T) (*StandardAdapter, *testBundle) { - t.Helper() - return setup(t, riverinternaltest.TestTx(ctx, t)) - } - - for _, startingState := range []dbsqlc.JobState{ - dbsqlc.JobStateAvailable, - dbsqlc.JobStateRetryable, - dbsqlc.JobStateScheduled, - } { - startingState := startingState - - t.Run(fmt.Sprintf("CancelsJobIn%sState", startingState), func(t *testing.T) { - t.Parallel() - - adapter, bundle := setupTx(t) - timeNowString := bundle.baselineTime.Format(time.RFC3339Nano) - - params := makeFakeJobInsertParams(0, nil) - params.State = startingState - insertResult, err := adapter.JobInsert(ctx, params) - require.NoError(t, err) - require.Equal(t, startingState, insertResult.Job.State) - - jobAfter, err := adapter.JobCancel(ctx, insertResult.Job.ID) - require.NoError(t, err) - require.NotNil(t, jobAfter) - - require.Equal(t, dbsqlc.JobStateCancelled, jobAfter.State) - require.WithinDuration(t, time.Now(), *jobAfter.FinalizedAt, 2*time.Second) - require.JSONEq(t, fmt.Sprintf(`{"cancel_attempted_at":%q}`, timeNowString), string(jobAfter.Metadata)) - }) - } - - t.Run("RunningJobIsNotImmediatelyCancelled", func(t *testing.T) { - t.Parallel() - - adapter, bundle := setupTx(t) - timeNowString := bundle.baselineTime.Format(time.RFC3339Nano) - - params := makeFakeJobInsertParams(0, nil) - params.State = dbsqlc.JobStateRunning - insertResult, err := adapter.JobInsert(ctx, params) - require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateRunning, insertResult.Job.State) - - jobAfter, err := adapter.JobCancel(ctx, insertResult.Job.ID) - require.NoError(t, err) - require.NotNil(t, jobAfter) - require.Equal(t, dbsqlc.JobStateRunning, jobAfter.State) - require.Nil(t, jobAfter.FinalizedAt) - require.JSONEq(t, fmt.Sprintf(`{"cancel_attempted_at":%q}`, timeNowString), string(jobAfter.Metadata)) - }) - - for _, startingState := range []dbsqlc.JobState{ - dbsqlc.JobStateCancelled, - dbsqlc.JobStateCompleted, - dbsqlc.JobStateDiscarded, - } { - startingState := startingState - - t.Run(fmt.Sprintf("DoesNotAlterFinalizedJobIn%sState", startingState), func(t *testing.T) { - t.Parallel() - adapter, bundle := setupTx(t) - - params := makeFakeJobInsertParams(0, nil) - initialRes, err := adapter.JobInsert(ctx, params) - require.NoError(t, err) - - res, err := adapter.queries.JobUpdate(ctx, bundle.ex, dbsqlc.JobUpdateParams{ - ID: initialRes.Job.ID, - FinalizedAtDoUpdate: true, - FinalizedAt: ptrutil.Ptr(time.Now()), - StateDoUpdate: true, - State: startingState, - }) - require.NoError(t, err) - - jobAfter, err := adapter.JobCancel(ctx, res.ID) - require.NoError(t, err) - require.Equal(t, startingState, jobAfter.State) - require.WithinDuration(t, *res.FinalizedAt, *jobAfter.FinalizedAt, time.Microsecond) - require.JSONEq(t, `{}`, string(jobAfter.Metadata)) - }) - } - - t.Run("ReturnsErrNoRowsIfJobDoesNotExist", func(t *testing.T) { - t.Parallel() - - adapter, _ := setupTx(t) - - jobAfter, err := adapter.JobCancel(ctx, 1234567890) - require.ErrorIs(t, err, riverdriver.ErrNoRows) - require.Nil(t, jobAfter) - }) -} - -func Test_StandardAdapter_JobGet(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - type testBundle struct { - baselineTime time.Time // baseline time frozen at now when setup is called - ex dbutil.Executor - } - - setup := func(t *testing.T, ex dbutil.Executor) (*StandardAdapter, *testBundle) { - t.Helper() - - bundle := &testBundle{ - baselineTime: time.Now().UTC(), - ex: ex, - } - - adapter := NewStandardAdapter(riverinternaltest.BaseServiceArchetype(t), testAdapterConfig(bundle.ex)) - adapter.TimeNowUTC = func() time.Time { return bundle.baselineTime } - - return adapter, bundle - } - - setupTx := func(t *testing.T) (*StandardAdapter, *testBundle) { - t.Helper() - return setup(t, riverinternaltest.TestTx(ctx, t)) - } - - t.Run("FetchesAnExistingJob", func(t *testing.T) { - t.Parallel() - - adapter, bundle := setupTx(t) - - params := makeFakeJobInsertParams(0, nil) - insertResult, err := adapter.JobInsert(ctx, params) - require.NoError(t, err) - - job, err := adapter.JobGet(ctx, insertResult.Job.ID) - require.NoError(t, err) - require.NotNil(t, job) - - require.Equal(t, insertResult.Job.ID, job.ID) - require.Equal(t, dbsqlc.JobStateAvailable, job.State) - require.WithinDuration(t, bundle.baselineTime, job.CreatedAt, time.Millisecond) - require.WithinDuration(t, bundle.baselineTime, job.ScheduledAt, time.Millisecond) - }) - - t.Run("ReturnsErrNoRowsIfJobDoesntExist", func(t *testing.T) { - t.Parallel() - - adapter, _ := setupTx(t) - - job, err := adapter.JobGet(ctx, 99999) - require.Error(t, err) - require.ErrorIs(t, err, riverdriver.ErrNoRows) - require.Nil(t, job) - }) -} - -func Test_StandardAdapter_JobGetAvailable(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - type testBundle struct { - baselineTime time.Time // baseline time frozen at now when setup is called - tx pgx.Tx - } - - setup := func(t *testing.T) (*StandardAdapter, *testBundle) { - t.Helper() - - bundle := &testBundle{ - baselineTime: time.Now(), - tx: riverinternaltest.TestTx(ctx, t), - } - - adapter := NewStandardAdapter(riverinternaltest.BaseServiceArchetype(t), testAdapterConfig(bundle.tx)) - adapter.TimeNowUTC = func() time.Time { return bundle.baselineTime } - - return adapter, bundle - } - - t.Run("Success", func(t *testing.T) { - t.Parallel() - - adapter, bundle := setup(t) - - _, err := adapter.JobInsertTx(ctx, bundle.tx, makeFakeJobInsertParams(0, nil)) - require.NoError(t, err) - - jobRows, err := adapter.JobGetAvailableTx(ctx, bundle.tx, rivercommon.QueueDefault, 100) - require.NoError(t, err) - require.Len(t, jobRows, 1) - - jobRow := jobRows[0] - require.Equal(t, []string{adapter.workerName}, jobRow.AttemptedBy) - }) - - t.Run("ConstrainedToLimit", func(t *testing.T) { - t.Parallel() - - adapter, bundle := setup(t) - - _, err := adapter.JobInsertTx(ctx, bundle.tx, makeFakeJobInsertParams(0, nil)) - require.NoError(t, err) - _, err = adapter.JobInsertTx(ctx, bundle.tx, makeFakeJobInsertParams(1, nil)) - require.NoError(t, err) - - // Two rows inserted but only one found because of the added limit. - jobRows, err := adapter.JobGetAvailableTx(ctx, bundle.tx, rivercommon.QueueDefault, 1) - require.NoError(t, err) - require.Len(t, jobRows, 1) - }) - - t.Run("ConstrainedToQueue", func(t *testing.T) { - t.Parallel() - - adapter, bundle := setup(t) - - _, err := adapter.JobInsertTx(ctx, bundle.tx, makeFakeJobInsertParams(0, &makeFakeJobInsertParamsOpts{ - Queue: ptrutil.Ptr("other-queue"), - })) - require.NoError(t, err) - - // Job is in a non-default queue so it's not found. - jobRows, err := adapter.JobGetAvailableTx(ctx, bundle.tx, rivercommon.QueueDefault, 1) - require.NoError(t, err) - require.Empty(t, jobRows) - }) - - t.Run("ConstrainedToScheduledAtBeforeNow", func(t *testing.T) { - t.Parallel() - - adapter, bundle := setup(t) - - _, err := adapter.JobInsertTx(ctx, bundle.tx, makeFakeJobInsertParams(0, &makeFakeJobInsertParamsOpts{ - ScheduledAt: ptrutil.Ptr(time.Now().Add(1 * time.Minute)), - })) - require.NoError(t, err) - - // Job is scheduled a while from now so it's not found. - jobRows, err := adapter.JobGetAvailableTx(ctx, bundle.tx, rivercommon.QueueDefault, 1) - require.NoError(t, err) - require.Empty(t, jobRows) - }) -} - -func Test_StandardAdapter_JobInsert(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - type testBundle struct { - baselineTime time.Time // baseline time frozen at now when setup is called - ex dbutil.Executor - } - - setup := func(t *testing.T, ex dbutil.Executor) (*StandardAdapter, *testBundle) { - t.Helper() - - bundle := &testBundle{ - baselineTime: time.Now(), - ex: ex, - } - - adapter := NewStandardAdapter(riverinternaltest.BaseServiceArchetype(t), testAdapterConfig(bundle.ex)) - adapter.TimeNowUTC = func() time.Time { return bundle.baselineTime } - - return adapter, bundle - } - - setupTx := func(t *testing.T) (*StandardAdapter, *testBundle) { - t.Helper() - return setup(t, riverinternaltest.TestTx(ctx, t)) - } - - t.Run("Success", func(t *testing.T) { - t.Parallel() - - adapter, _ := setupTx(t) - - insertParams := makeFakeJobInsertParams(0, nil) - res, err := adapter.JobInsert(ctx, insertParams) - require.NoError(t, err) - - // Sanity check, following assertion depends on this: - require.True(t, insertParams.ScheduledAt.IsZero()) - - require.Greater(t, res.Job.ID, int64(0), "expected job ID to be set, got %d", res.Job.ID) - require.JSONEq(t, string(insertParams.EncodedArgs), string(res.Job.Args)) - require.Equal(t, int16(0), res.Job.Attempt) - require.Nil(t, res.Job.AttemptedAt) - require.Empty(t, res.Job.AttemptedBy) - require.WithinDuration(t, time.Now(), res.Job.CreatedAt, 2*time.Second) - require.Empty(t, res.Job.Errors) - require.Nil(t, res.Job.FinalizedAt) - require.Equal(t, insertParams.Kind, res.Job.Kind) - require.Equal(t, int16(insertParams.MaxAttempts), res.Job.MaxAttempts) - require.Equal(t, insertParams.Metadata, res.Job.Metadata) - require.Equal(t, int16(insertParams.Priority), res.Job.Priority) - require.Equal(t, insertParams.Queue, res.Job.Queue) - require.Equal(t, dbsqlc.JobStateAvailable, res.Job.State) - require.WithinDuration(t, time.Now(), res.Job.ScheduledAt, 2*time.Second) - require.Empty(t, res.Job.Tags) - }) - - t.Run("InsertAndFetch", func(t *testing.T) { - t.Parallel() - - adapter, _ := setupTx(t) - - const maxJobsToFetch = 8 - - res, err := adapter.JobInsert(ctx, makeFakeJobInsertParams(0, nil)) - require.NoError(t, err) - require.NotEqual(t, 0, res.Job.ID, "expected job ID to be set, got %d", res.Job.ID) - require.WithinDuration(t, time.Now(), res.Job.ScheduledAt, 1*time.Second) - - jobs, err := adapter.JobGetAvailable(ctx, rivercommon.QueueDefault, maxJobsToFetch) - require.NoError(t, err) - require.Len(t, jobs, 1, - "inserted 1 job but fetched %d jobs:\n%+v", len(jobs), jobs) - require.Equal(t, dbsqlc.JobStateRunning, jobs[0].State, - "expected selected job to be in running state, got %q", jobs[0].State) - - for i := 1; i < 10; i++ { - _, err := adapter.JobInsert(ctx, makeFakeJobInsertParams(i, nil)) - require.NoError(t, err) - } - - jobs, err = adapter.JobGetAvailable(ctx, rivercommon.QueueDefault, maxJobsToFetch) - require.NoError(t, err) - require.Len(t, jobs, maxJobsToFetch, - "inserted 9 more jobs and expected to fetch max of %d jobs but fetched %d jobs:\n%+v", maxJobsToFetch, len(jobs), jobs) - for _, j := range jobs { - require.Equal(t, dbsqlc.JobStateRunning, j.State, - "expected selected job to be in running state, got %q", j.State) - } - - jobs, err = adapter.JobGetAvailable(ctx, rivercommon.QueueDefault, maxJobsToFetch) - require.NoError(t, err) - require.Len(t, jobs, 1, - "expected to fetch 1 remaining job but fetched %d jobs:\n%+v", len(jobs), jobs) - }) - - t.Run("UniqueJobByArgs", func(t *testing.T) { - t.Parallel() - - adapter, _ := setupTx(t) - - insertParams := makeFakeJobInsertParams(0, nil) - insertParams.Unique = true - insertParams.UniqueByArgs = true - - res0, err := adapter.JobInsert(ctx, insertParams) - require.NoError(t, err) - require.False(t, res0.UniqueSkippedAsDuplicate) - - // Insert a second job with the same args, but expect that the same job - // ID to come back because we're still within its unique parameters. - res1, err := adapter.JobInsert(ctx, insertParams) - require.NoError(t, err) - require.Equal(t, res0.Job.ID, res1.Job.ID) - require.True(t, res1.UniqueSkippedAsDuplicate) - - insertParams.EncodedArgs = []byte(`{"key":"different"}`) - - // Same operation again, except that because we've modified the unique - // dimension, another job is allowed to be queued, so the new ID is - // not the same. - res2, err := adapter.JobInsert(ctx, insertParams) - require.NoError(t, err) - require.NotEqual(t, res0.Job.ID, res2.Job.ID) - require.False(t, res2.UniqueSkippedAsDuplicate) - }) - - t.Run("UniqueJobByPeriod", func(t *testing.T) { - t.Parallel() - - adapter, bundle := setupTx(t) - - insertParams := makeFakeJobInsertParams(0, nil) - insertParams.Unique = true - insertParams.UniqueByPeriod = 15 * time.Minute - - res0, err := adapter.JobInsert(ctx, insertParams) - require.NoError(t, err) - require.False(t, res0.UniqueSkippedAsDuplicate) - - // Insert a second job with the same args, but expect that the same job - // ID to come back because we're still within its unique parameters. - res1, err := adapter.JobInsert(ctx, insertParams) - require.NoError(t, err) - require.Equal(t, res0.Job.ID, res1.Job.ID) - require.True(t, res1.UniqueSkippedAsDuplicate) - - adapter.TimeNowUTC = func() time.Time { return bundle.baselineTime.Add(insertParams.UniqueByPeriod).Add(1 * time.Second) } - - // Same operation again, except that because we've advanced time passed - // the period within unique bounds, another job is allowed to be queued, - // so the new ID is not the same. - res2, err := adapter.JobInsert(ctx, insertParams) - require.NoError(t, err) - require.NotEqual(t, res0.Job.ID, res2.Job.ID) - require.False(t, res2.UniqueSkippedAsDuplicate) - }) - - t.Run("UniqueJobByQueue", func(t *testing.T) { - t.Parallel() - - adapter, _ := setupTx(t) - - insertParams := makeFakeJobInsertParams(0, nil) - insertParams.Unique = true - insertParams.UniqueByQueue = true - - res0, err := adapter.JobInsert(ctx, insertParams) - require.NoError(t, err) - require.False(t, res0.UniqueSkippedAsDuplicate) - - // Insert a second job with the same args, but expect that the same job - // ID to come back because we're still within its unique parameters. - res1, err := adapter.JobInsert(ctx, insertParams) - require.NoError(t, err) - require.Equal(t, res0.Job.ID, res1.Job.ID) - require.True(t, res1.UniqueSkippedAsDuplicate) - - insertParams.Queue = "alternate_queue" - - // Same operation again, except that because we've modified the unique - // dimension, another job is allowed to be queued, so the new ID is - // not the same. - res2, err := adapter.JobInsert(ctx, insertParams) - require.NoError(t, err) - require.NotEqual(t, res0.Job.ID, res2.Job.ID) - require.False(t, res2.UniqueSkippedAsDuplicate) - }) - - t.Run("UniqueJobByState", func(t *testing.T) { - t.Parallel() - - adapter, bundle := setupTx(t) - - insertParams := makeFakeJobInsertParams(0, nil) - insertParams.Unique = true - insertParams.UniqueByState = []dbsqlc.JobState{dbsqlc.JobStateAvailable, dbsqlc.JobStateRunning} - - res0, err := adapter.JobInsert(ctx, insertParams) - require.NoError(t, err) - require.False(t, res0.UniqueSkippedAsDuplicate) - - // Insert a second job with the same args, but expect that the same job - // ID to come back because we're still within its unique parameters. - res1, err := adapter.JobInsert(ctx, insertParams) - require.NoError(t, err) - require.Equal(t, res0.Job.ID, res1.Job.ID) - require.True(t, res1.UniqueSkippedAsDuplicate) - - // A new job is allowed if we're inserting the job with a state that's - // not included in the unique state set. - { - insertParams := *insertParams // dup - insertParams.State = dbsqlc.JobStateCompleted - - res2, err := adapter.JobInsert(ctx, &insertParams) - require.NoError(t, err) - require.NotEqual(t, res0.Job.ID, res2.Job.ID) - require.False(t, res2.UniqueSkippedAsDuplicate) - } - - // A new job is also allowed if the state of the originally inserted job - // changes to one that's not included in the unique state set. - { - _, err := adapter.queries.JobUpdate(ctx, bundle.ex, dbsqlc.JobUpdateParams{ - ID: res0.Job.ID, - StateDoUpdate: true, - State: dbsqlc.JobStateCompleted, - }) - require.NoError(t, err) - - res2, err := adapter.JobInsert(ctx, insertParams) - require.NoError(t, err) - require.NotEqual(t, res0.Job.ID, res2.Job.ID) - require.False(t, res2.UniqueSkippedAsDuplicate) - } - }) - - // Unlike other unique options, state gets a default set when it's not - // supplied. This test case checks that the default is working as expected. - t.Run("UniqueJobByDefaultState", func(t *testing.T) { - t.Parallel() - - adapter, bundle := setupTx(t) - - insertParams := makeFakeJobInsertParams(0, nil) - insertParams.Unique = true - insertParams.UniqueByQueue = true - - res0, err := adapter.JobInsert(ctx, insertParams) - require.NoError(t, err) - require.False(t, res0.UniqueSkippedAsDuplicate) - - // Insert a second job with the same args, but expect that the same job - // ID to come back because we're still within its unique parameters. - res1, err := adapter.JobInsert(ctx, insertParams) - require.NoError(t, err) - require.Equal(t, res0.Job.ID, res1.Job.ID) - require.True(t, res1.UniqueSkippedAsDuplicate) - - // Test all the other default unique states (see `defaultUniqueStates`) - // to make sure that in each case an inserted job still counts as a - // duplicate. The only state we don't test is `available` because that's - // already been done above. - for _, defaultState := range []dbsqlc.JobState{ - dbsqlc.JobStateCompleted, - dbsqlc.JobStateRunning, - dbsqlc.JobStateRetryable, - dbsqlc.JobStateScheduled, - } { - var finalizedAt *time.Time - if defaultState == dbsqlc.JobStateCompleted { - finalizedAt = ptrutil.Ptr(bundle.baselineTime) - } - - _, err = adapter.queries.JobUpdate(ctx, bundle.ex, dbsqlc.JobUpdateParams{ - ID: res0.Job.ID, - FinalizedAt: finalizedAt, - FinalizedAtDoUpdate: true, - StateDoUpdate: true, - State: defaultState, - }) - require.NoError(t, err) - - // Still counts as a duplicate. - res1, err := adapter.JobInsert(ctx, insertParams) - require.NoError(t, err) - require.Equal(t, res0.Job.ID, res1.Job.ID) - require.True(t, res1.UniqueSkippedAsDuplicate) - } - - _, err = adapter.queries.JobUpdate(ctx, bundle.ex, dbsqlc.JobUpdateParams{ - ID: res0.Job.ID, - FinalizedAt: ptrutil.Ptr(bundle.baselineTime), - FinalizedAtDoUpdate: true, - StateDoUpdate: true, - State: dbsqlc.JobStateDiscarded, - }) - require.NoError(t, err) - - // Uniqueness includes a default set of states, so by moving the - // original job to "discarded", we're now allowed to insert a new job - // again, despite not having explicitly set the `ByState` option. - res2, err := adapter.JobInsert(ctx, insertParams) - require.NoError(t, err) - require.NotEqual(t, res0.Job.ID, res2.Job.ID) - require.False(t, res2.UniqueSkippedAsDuplicate) - }) - - t.Run("UniqueJobAllOptions", func(t *testing.T) { - t.Parallel() - - adapter, bundle := setupTx(t) - - insertParams := makeFakeJobInsertParams(0, nil) - insertParams.Unique = true - insertParams.UniqueByArgs = true - insertParams.UniqueByPeriod = 15 * time.Minute - insertParams.UniqueByQueue = true - insertParams.UniqueByState = []dbsqlc.JobState{dbsqlc.JobStateAvailable, dbsqlc.JobStateRunning} - - // Gut check to make sure all the unique properties were correctly set. - require.True(t, insertParams.Unique) - require.True(t, insertParams.UniqueByArgs) - require.NotZero(t, insertParams.UniqueByPeriod) - require.True(t, insertParams.UniqueByQueue) - require.Equal(t, []dbsqlc.JobState{dbsqlc.JobStateAvailable, dbsqlc.JobStateRunning}, insertParams.UniqueByState) - - res0, err := adapter.JobInsert(ctx, insertParams) - require.NoError(t, err) - require.False(t, res0.UniqueSkippedAsDuplicate) - - // Insert a second job with the same args, but expect that the same job - // ID to come back because we're still within its unique parameters. - res1, err := adapter.JobInsert(ctx, insertParams) - require.NoError(t, err) - require.Equal(t, res0.Job.ID, res1.Job.ID) - require.True(t, res1.UniqueSkippedAsDuplicate) - - // With args modified - { - insertParams := *insertParams // dup - insertParams.EncodedArgs = []byte(`{"key":"different"}`) - - // New job because a unique dimension has changed. - res2, err := adapter.JobInsert(ctx, &insertParams) - require.NoError(t, err) - require.NotEqual(t, res0.Job.ID, res2.Job.ID) - require.False(t, res2.UniqueSkippedAsDuplicate) - } - - // With period modified - { - insertParams := *insertParams // dup - adapter.TimeNowUTC = func() time.Time { return bundle.baselineTime.Add(insertParams.UniqueByPeriod).Add(1 * time.Second) } - - // New job because a unique dimension has changed. - res2, err := adapter.JobInsert(ctx, &insertParams) - require.NoError(t, err) - require.NotEqual(t, res0.Job.ID, res2.Job.ID) - require.False(t, res2.UniqueSkippedAsDuplicate) - - // Make sure to change timeNow back - adapter.TimeNowUTC = func() time.Time { return bundle.baselineTime } - } - - // With queue modified - { - insertParams := *insertParams // dup - insertParams.Queue = "alternate_queue" - - // New job because a unique dimension has changed. - res2, err := adapter.JobInsert(ctx, &insertParams) - require.NoError(t, err) - require.NotEqual(t, res0.Job.ID, res2.Job.ID) - require.False(t, res2.UniqueSkippedAsDuplicate) - } - - // With state modified - { - insertParams := *insertParams // dup - insertParams.State = dbsqlc.JobStateCompleted - - // New job because a unique dimension has changed. - res2, err := adapter.JobInsert(ctx, &insertParams) - require.NoError(t, err) - require.NotEqual(t, res0.Job.ID, res2.Job.ID) - require.False(t, res2.UniqueSkippedAsDuplicate) - } - }) - - t.Run("UniqueJobContention", func(t *testing.T) { - t.Parallel() - - adapter, bundle := setup(t, riverinternaltest.TestDB(ctx, t)) - - insertParams := makeFakeJobInsertParams(0, nil) - insertParams.Unique = true - insertParams.UniqueByPeriod = 15 * time.Minute - - var ( - numContendingJobs = runtime.NumCPU() // max allowed test manager connections - insertedJobs = make([]*dbsqlc.RiverJob, numContendingJobs) - insertedJobsMu sync.Mutex - wg sync.WaitGroup - ) - - for i := 0; i < numContendingJobs; i++ { - jobNum := i - wg.Add(1) - - go func() { - _, err := dbutil.WithTxV(ctx, bundle.ex, func(ctx context.Context, tx pgx.Tx) (struct{}, error) { - res, err := adapter.JobInsertTx(ctx, tx, insertParams) - require.NoError(t, err) - - insertedJobsMu.Lock() - insertedJobs[jobNum] = res.Job - insertedJobsMu.Unlock() - - return struct{}{}, nil - }) - require.NoError(t, err) - - wg.Done() - }() - } - - wg.Wait() - - firstJobID := insertedJobs[0].ID - for i := 1; i < numContendingJobs; i++ { - require.Equal(t, firstJobID, insertedJobs[i].ID) - } - }) -} - -func Test_Adapter_JobInsertMany(t *testing.T) { - t.Parallel() - - // This test needs to use a time from before the transaction begins, otherwise - // the newly-scheduled jobs won't yet show as available because their - // scheduled_at (which gets a default value from time.Now() in code) will be - // after the start of the transaction. - now := time.Now().UTC().Add(-1 * time.Minute) - - ctx := context.Background() - tx := riverinternaltest.TestTx(ctx, t) - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - adapter := NewStandardAdapter(riverinternaltest.BaseServiceArchetype(t), testAdapterConfig(tx)) - adapter.TimeNowUTC = func() time.Time { return now } - - insertParams := make([]*JobInsertParams, 10) - for i := 0; i < len(insertParams); i++ { - insertParams[i] = makeFakeJobInsertParams(i, nil) - } - - count, err := adapter.JobInsertMany(ctx, insertParams) - require.NoError(t, err) - require.Len(t, insertParams, int(count)) - - jobsAfter, err := adapter.JobGetAvailable(ctx, rivercommon.QueueDefault, int32(len(insertParams))) - require.NoError(t, err) - require.Len(t, jobsAfter, len(insertParams)) -} - -func Test_StandardAdapter_FetchIsPrioritized(t *testing.T) { - t.Parallel() - - ctx := context.Background() - tx := riverinternaltest.TestTx(ctx, t) - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - adapter := NewStandardAdapter(riverinternaltest.BaseServiceArchetype(t), testAdapterConfig(tx)) - - for i := 3; i > 0; i-- { - // Insert jobs with decreasing priority numbers (3, 2, 1) which means increasing priority. - insertParams := makeFakeJobInsertParams(i, nil) - insertParams.Priority = i - _, err := adapter.JobInsert(ctx, insertParams) - require.NoError(t, err) - } - - // We should fetch the 2 highest priority jobs first in order (priority 1, then 2): - jobs, err := adapter.JobGetAvailable(ctx, rivercommon.QueueDefault, 2) - require.NoError(t, err) - require.Len(t, jobs, 2, "expected to fetch exactly 2 jobs") - - // Because the jobs are ordered within the fetch query's CTE but *not* within - // the final query, the final result list may not actually be sorted. This is - // fine, because we've already ensured that we've fetched the jobs we wanted - // to fetch via that ORDER BY. For testing we'll need to sort the list after - // fetch to easily assert that the expected jobs are in it. - sort.Slice(jobs, func(i, j int) bool { return jobs[i].Priority < jobs[j].Priority }) - - require.Equal(t, int16(1), jobs[0].Priority, "expected first job to have priority 1") - require.Equal(t, int16(2), jobs[1].Priority, "expected second job to have priority 2") - - // Should fetch the one remaining job on the next attempt: - jobs, err = adapter.JobGetAvailable(ctx, rivercommon.QueueDefault, 1) - require.NoError(t, err) - require.Len(t, jobs, 1, "expected to fetch exactly 1 job") - require.Equal(t, int16(3), jobs[0].Priority, "expected final job to have priority 2") -} - -func Test_StandardAdapter_JobList_and_JobListTx(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - type testBundle struct { - baselineTime time.Time // baseline time frozen at now when setup is called - ex dbutil.Executor - tx pgx.Tx - jobs []*dbsqlc.RiverJob - } - - setup := func(t *testing.T, tx pgx.Tx) (*StandardAdapter, *testBundle) { - t.Helper() - - bundle := &testBundle{ - baselineTime: time.Now(), - ex: tx, - tx: tx, - } - - adapter := NewStandardAdapter(riverinternaltest.BaseServiceArchetype(t), testAdapterConfig(bundle.ex)) - adapter.TimeNowUTC = func() time.Time { return bundle.baselineTime } - - params := makeFakeJobInsertParams(1, &makeFakeJobInsertParamsOpts{Queue: ptrutil.Ptr("priority")}) - job1, err := adapter.JobInsert(ctx, params) - require.NoError(t, err) - - params = makeFakeJobInsertParams(2, nil) - job2, err := adapter.JobInsert(ctx, params) - require.NoError(t, err) - - params = makeFakeJobInsertParams(3, &makeFakeJobInsertParamsOpts{Metadata: []byte(`{"some_key": "some_value"}`)}) - job3, err := adapter.JobInsert(ctx, params) - require.NoError(t, err) - - params = makeFakeJobInsertParams(4, &makeFakeJobInsertParamsOpts{State: ptrutil.Ptr(dbsqlc.JobStateRunning)}) - job4, err := adapter.JobInsert(ctx, params) - require.NoError(t, err) - - bundle.jobs = []*dbsqlc.RiverJob{job1.Job, job2.Job, job3.Job, job4.Job} - - return adapter, bundle - } - - setupTx := func(t *testing.T) (*StandardAdapter, *testBundle) { - t.Helper() - return setup(t, riverinternaltest.TestTx(ctx, t)) - } - - type testListFunc func(jobs []*dbsqlc.RiverJob, err error) - - execTest := func(ctx context.Context, t *testing.T, adapter *StandardAdapter, params JobListParams, tx pgx.Tx, testFunc testListFunc) { - t.Helper() - t.Logf("testing JobList") - jobs, err := adapter.JobList(ctx, params) - testFunc(jobs, err) - - t.Logf("testing JobListTx") - // use a sub-transaction in case it's rolled back or errors: - subTx, err := tx.Begin(ctx) - require.NoError(t, err) - defer subTx.Rollback(ctx) - jobs, err = adapter.JobListTx(ctx, subTx, params) - testFunc(jobs, err) - } - - t.Run("Minimal", func(t *testing.T) { - t.Parallel() - - adapter, bundle := setupTx(t) - - params := JobListParams{ - LimitCount: 2, - OrderBy: []JobListOrderBy{{Expr: "id", Order: SortOrderDesc}}, - State: rivertype.JobStateAvailable, - } - - execTest(ctx, t, adapter, params, bundle.tx, func(jobs []*dbsqlc.RiverJob, err error) { - require.NoError(t, err) - - // job 1 is excluded due to pagination limit of 2, while job 4 is excluded - // due to its state: - job2 := bundle.jobs[1] - job3 := bundle.jobs[2] - - returnedIDs := sliceutil.Map(jobs, func(j *dbsqlc.RiverJob) int64 { return j.ID }) - require.Equal(t, []int64{job3.ID, job2.ID}, returnedIDs) - }) - }) - - t.Run("ComplexConditionsWithNamedArgs", func(t *testing.T) { - t.Parallel() - - adapter, bundle := setupTx(t) - - params := JobListParams{ - Conditions: "jsonb_extract_path(args, VARIADIC @paths1::text[]) = @value1::jsonb", - LimitCount: 2, - NamedArgs: map[string]any{"paths1": []string{"job_num"}, "value1": 2}, - OrderBy: []JobListOrderBy{{Expr: "id", Order: SortOrderDesc}}, - State: rivertype.JobStateAvailable, - } - - execTest(ctx, t, adapter, params, bundle.tx, func(jobs []*dbsqlc.RiverJob, err error) { - require.NoError(t, err) - - job2 := bundle.jobs[1] - returnedIDs := sliceutil.Map(jobs, func(j *dbsqlc.RiverJob) int64 { return j.ID }) - require.Equal(t, []int64{job2.ID}, returnedIDs) - }) - }) - - t.Run("ConditionsWithQueues", func(t *testing.T) { - t.Parallel() - - adapter, bundle := setupTx(t) - - params := JobListParams{ - Conditions: "finalized_at IS NULL", - LimitCount: 2, - OrderBy: []JobListOrderBy{{Expr: "id", Order: SortOrderDesc}}, - Queues: []string{"priority"}, - State: rivertype.JobStateAvailable, - } - - execTest(ctx, t, adapter, params, bundle.tx, func(jobs []*dbsqlc.RiverJob, err error) { - require.NoError(t, err) - - job1 := bundle.jobs[0] - returnedIDs := sliceutil.Map(jobs, func(j *dbsqlc.RiverJob) int64 { return j.ID }) - require.Equal(t, []int64{job1.ID}, returnedIDs) - }) - }) - - t.Run("WithMetadataAndNoStateFilter", func(t *testing.T) { - t.Parallel() - - adapter, bundle := setupTx(t) - - params := JobListParams{ - Conditions: "metadata @> @metadata_filter::jsonb", - LimitCount: 2, - NamedArgs: map[string]any{"metadata_filter": `{"some_key": "some_value"}`}, - OrderBy: []JobListOrderBy{{Expr: "id", Order: SortOrderDesc}}, - } - - execTest(ctx, t, adapter, params, bundle.tx, func(jobs []*dbsqlc.RiverJob, err error) { - require.NoError(t, err) - - job3 := bundle.jobs[2] - returnedIDs := sliceutil.Map(jobs, func(j *dbsqlc.RiverJob) int64 { return j.ID }) - require.Equal(t, []int64{job3.ID}, returnedIDs) - }) - }) -} - -func Test_StandardAdapter_JobRetryImmediately(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - type testBundle struct { - baselineTime time.Time // baseline time frozen at now when setup is called - ex dbutil.Executor - } - - setup := func(t *testing.T, ex dbutil.Executor) (*StandardAdapter, *testBundle) { - t.Helper() - - bundle := &testBundle{ - baselineTime: time.Now(), - ex: ex, - } - - adapter := NewStandardAdapter(riverinternaltest.BaseServiceArchetype(t), testAdapterConfig(bundle.ex)) - adapter.TimeNowUTC = func() time.Time { return bundle.baselineTime } - - return adapter, bundle - } - - setupTx := func(t *testing.T) (*StandardAdapter, *testBundle) { - t.Helper() - return setup(t, riverinternaltest.TestTx(ctx, t)) - } - - t.Run("DoesNotUpdateARunningJob", func(t *testing.T) { - t.Parallel() - - adapter, bundle := setupTx(t) - - params := makeFakeJobInsertParams(0, nil) - params.State = dbsqlc.JobStateRunning - res, err := adapter.JobInsert(ctx, params) - require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateRunning, res.Job.State) - - jAfter, err := adapter.JobRetryImmediately(ctx, res.Job.ID) - require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateRunning, jAfter.State) - require.WithinDuration(t, res.Job.ScheduledAt, jAfter.ScheduledAt, time.Microsecond) - - j, err := adapter.queries.JobGetByID(ctx, bundle.ex, res.Job.ID) - require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateRunning, j.State) - }) - - for _, state := range []dbsqlc.JobState{ - dbsqlc.JobStateAvailable, - dbsqlc.JobStateCancelled, - dbsqlc.JobStateCompleted, - dbsqlc.JobStateDiscarded, - // TODO(bgentry): add Pending to this list when it's added: - dbsqlc.JobStateRetryable, - dbsqlc.JobStateScheduled, - } { - state := state - - t.Run(fmt.Sprintf("UpdatesA_%s_JobToBeScheduledImmediately", state), func(t *testing.T) { - t.Parallel() - - adapter, bundle := setupTx(t) - - params := makeFakeJobInsertParams(0, nil) - // As long as the job is scheduled for any time in the future, it - // scheduled_at should be updated to now: - params.ScheduledAt = bundle.baselineTime.Add(time.Hour) - res, err := adapter.JobInsert(ctx, params) - require.NoError(t, err) - - // Finalized states require a FinalizedAt. JobInsert doesn't allow setting - // FinalizedAt so do it with a subsequent update after insert: - setFinalized := slices.Contains([]dbsqlc.JobState{ - dbsqlc.JobStateCancelled, - dbsqlc.JobStateCompleted, - dbsqlc.JobStateDiscarded, - }, state) - _, err = adapter.queries.JobUpdate(ctx, bundle.ex, dbsqlc.JobUpdateParams{ - FinalizedAtDoUpdate: setFinalized, - FinalizedAt: &bundle.baselineTime, - ID: res.Job.ID, - StateDoUpdate: true, - State: state, - }) - require.NoError(t, err) - - jAfter, err := adapter.JobRetryImmediately(ctx, res.Job.ID) - require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateAvailable, jAfter.State) - require.WithinDuration(t, time.Now().UTC(), jAfter.ScheduledAt, 100*time.Millisecond) - - j, err := adapter.queries.JobGetByID(ctx, bundle.ex, res.Job.ID) - require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateAvailable, j.State) - require.Nil(t, j.FinalizedAt) - }) - } - - t.Run("AltersScheduledAtForAlreadyCompletedJob", func(t *testing.T) { - // A job which has already completed will have a ScheduledAt that could be - // long in the past. Now that we're re-scheduling it, we should update that - // to the current time to slot it in alongside other recently-scheduled jobs - // and not skip the line; also, its wait duration can't be calculated - // accurately if we don't reset the scheduled_at. - t.Parallel() - - adapter, bundle := setupTx(t) - - params := makeFakeJobInsertParams(0, nil) - params.ScheduledAt = bundle.baselineTime.Add(-1 * time.Hour) - res, err := adapter.JobInsert(ctx, params) - require.NoError(t, err) - _, err = adapter.queries.JobUpdate(ctx, bundle.ex, dbsqlc.JobUpdateParams{ - FinalizedAtDoUpdate: true, - FinalizedAt: &bundle.baselineTime, - ID: res.Job.ID, - StateDoUpdate: true, - State: dbsqlc.JobStateCompleted, - }) - require.NoError(t, err) - - jAfter, err := adapter.JobRetryImmediately(ctx, res.Job.ID) - require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateAvailable, jAfter.State) - require.WithinDuration(t, time.Now().UTC(), jAfter.ScheduledAt, 5*time.Second) - }) - - t.Run("DoesNotAlterScheduledAtIfInThePastAndJobAlreadyAvailable", func(t *testing.T) { - // We don't want to update ScheduledAt if the job was already available - // because doing so can make it lose its place in line. - t.Parallel() - - adapter, bundle := setupTx(t) - - params := makeFakeJobInsertParams(0, nil) - params.ScheduledAt = bundle.baselineTime.Add(-1 * time.Hour) - res, err := adapter.JobInsert(ctx, params) - require.NoError(t, err) - - jAfter, err := adapter.JobRetryImmediately(ctx, res.Job.ID) - require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateAvailable, jAfter.State) - require.WithinDuration(t, params.ScheduledAt, jAfter.ScheduledAt, time.Microsecond) - - j, err := adapter.queries.JobGetByID(ctx, bundle.ex, res.Job.ID) - require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateAvailable, j.State) - }) - - t.Run("ReturnsErrNoRowsIfJobNotFound", func(t *testing.T) { - t.Parallel() - - adapter, _ := setupTx(t) - - _, err := adapter.JobRetryImmediately(ctx, 999999) - require.Error(t, err) - require.ErrorIs(t, err, riverdriver.ErrNoRows) - }) -} - -func Test_StandardAdapter_JobSetStateCompleted(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - type testBundle struct { - baselineTime time.Time // baseline time frozen at now when setup is called - ex dbutil.Executor - } - - setup := func(t *testing.T, ex dbutil.Executor) (*StandardAdapter, *testBundle) { - t.Helper() - - bundle := &testBundle{ - baselineTime: time.Now(), - ex: ex, - } - - adapter := NewStandardAdapter(riverinternaltest.BaseServiceArchetype(t), testAdapterConfig(bundle.ex)) - adapter.TimeNowUTC = func() time.Time { return bundle.baselineTime } - - return adapter, bundle - } - - setupTx := func(t *testing.T) (*StandardAdapter, *testBundle) { - t.Helper() - return setup(t, riverinternaltest.TestTx(ctx, t)) - } - - t.Run("CompletesARunningJob", func(t *testing.T) { - t.Parallel() - - adapter, bundle := setupTx(t) - - params := makeFakeJobInsertParams(0, nil) - params.State = dbsqlc.JobStateRunning - res, err := adapter.JobInsert(ctx, params) - require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateRunning, res.Job.State) - - jAfter, err := adapter.JobSetStateIfRunning(ctx, JobSetStateCompleted(res.Job.ID, bundle.baselineTime)) - require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateCompleted, jAfter.State) - require.WithinDuration(t, bundle.baselineTime, *jAfter.FinalizedAt, time.Microsecond) - - j, err := adapter.queries.JobGetByID(ctx, bundle.ex, res.Job.ID) - require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateCompleted, j.State) - }) - - t.Run("DoesNotCompleteARetryableJob", func(t *testing.T) { - t.Parallel() - - adapter, bundle := setupTx(t) - - params := makeFakeJobInsertParams(0, nil) - params.State = dbsqlc.JobStateRetryable - res, err := adapter.JobInsert(ctx, params) - require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateRetryable, res.Job.State) - - jAfter, err := adapter.JobSetStateIfRunning(ctx, JobSetStateCompleted(res.Job.ID, bundle.baselineTime)) - require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateRetryable, jAfter.State) - require.Nil(t, jAfter.FinalizedAt) - - j, err := adapter.queries.JobGetByID(ctx, bundle.ex, res.Job.ID) - require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateRetryable, j.State) - }) -} - -func Test_StandardAdapter_JobSetStateErrored(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - type testBundle struct { - baselineTime time.Time // baseline time frozen at now when setup is called - errPayload []byte - ex dbutil.Executor - } - - setup := func(t *testing.T, executor dbutil.Executor) (*StandardAdapter, *testBundle) { - t.Helper() - - tNow := time.Now() - - errPayload, err := json.Marshal(dbsqlc.AttemptError{ - Attempt: 1, At: tNow.UTC(), Error: "fake error", Trace: "foo.go:123\nbar.go:456", - }) - - require.NoError(t, err) - bundle := &testBundle{ - baselineTime: tNow, - errPayload: errPayload, - ex: executor, - } - - adapter := NewStandardAdapter(riverinternaltest.BaseServiceArchetype(t), testAdapterConfig(bundle.ex)) - adapter.TimeNowUTC = func() time.Time { return bundle.baselineTime } - - return adapter, bundle - } - - setupTx := func(t *testing.T) (*StandardAdapter, *testBundle) { - t.Helper() - return setup(t, riverinternaltest.TestTx(ctx, t)) - } - - t.Run("SetsARunningJobToRetryable", func(t *testing.T) { - t.Parallel() - - adapter, bundle := setupTx(t) - - params := makeFakeJobInsertParams(0, nil) - params.State = dbsqlc.JobStateRunning - res, err := adapter.JobInsert(ctx, params) - require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateRunning, res.Job.State) - - jAfter, err := adapter.JobSetStateIfRunning(ctx, JobSetStateErrorRetryable(res.Job.ID, bundle.baselineTime, bundle.errPayload)) - require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateRetryable, jAfter.State) - require.WithinDuration(t, bundle.baselineTime, jAfter.ScheduledAt, time.Microsecond) - - j, err := adapter.queries.JobGetByID(ctx, bundle.ex, res.Job.ID) - require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateRetryable, j.State) - - // validate error payload: - require.Len(t, jAfter.Errors, 1) - require.Equal(t, bundle.baselineTime.UTC(), jAfter.Errors[0].At) - require.Equal(t, uint16(1), jAfter.Errors[0].Attempt) - require.Equal(t, "fake error", jAfter.Errors[0].Error) - require.Equal(t, "foo.go:123\nbar.go:456", jAfter.Errors[0].Trace) - }) - - t.Run("DoesNotTouchAlreadyRetryableJob", func(t *testing.T) { - t.Parallel() - - adapter, bundle := setupTx(t) - - params := makeFakeJobInsertParams(0, nil) - params.State = dbsqlc.JobStateRetryable - params.ScheduledAt = bundle.baselineTime.Add(10 * time.Second) - res, err := adapter.JobInsert(ctx, params) - require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateRetryable, res.Job.State) - - jAfter, err := adapter.JobSetStateIfRunning(ctx, JobSetStateErrorRetryable(res.Job.ID, bundle.baselineTime, bundle.errPayload)) - require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateRetryable, jAfter.State) - require.WithinDuration(t, params.ScheduledAt, jAfter.ScheduledAt, time.Microsecond) - - j, err := adapter.queries.JobGetByID(ctx, bundle.ex, res.Job.ID) - require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateRetryable, j.State) - require.WithinDuration(t, params.ScheduledAt, jAfter.ScheduledAt, time.Microsecond) - }) - - t.Run("SetsAJobWithCancelAttemptedAtToCancelled", func(t *testing.T) { - // If a job has cancel_attempted_at in its metadata, it means that the user - // tried to cancel the job with the Cancel API but that the job - // finished/errored before the producer received the cancel notification. - // - // In this case, we want to move the job to cancelled instead of retryable - // so that the job is not retried. - t.Parallel() - - adapter, bundle := setupTx(t) - - params := makeFakeJobInsertParams(0, &makeFakeJobInsertParamsOpts{ - ScheduledAt: ptrutil.Ptr(bundle.baselineTime.Add(-10 * time.Second)), - }) - params.State = dbsqlc.JobStateRunning - params.Metadata = []byte(fmt.Sprintf(`{"cancel_attempted_at":"%s"}`, time.Now().UTC().Format(time.RFC3339))) - res, err := adapter.JobInsert(ctx, params) - require.NoError(t, err) - - jAfter, err := adapter.JobSetStateIfRunning(ctx, JobSetStateErrorRetryable(res.Job.ID, bundle.baselineTime, bundle.errPayload)) - require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateCancelled, jAfter.State) - require.NotNil(t, jAfter.FinalizedAt) - // Loose assertion against FinalizedAt just to make sure it was set (it uses - // the database's now() instead of a passed-in time): - require.WithinDuration(t, time.Now().UTC(), *jAfter.FinalizedAt, 2*time.Second) - // ScheduledAt should not be touched: - require.WithinDuration(t, params.ScheduledAt, jAfter.ScheduledAt, time.Microsecond) - // Errors should still be appended to: - require.Len(t, jAfter.Errors, 1) - require.Contains(t, jAfter.Errors[0].Error, "fake error") - - j, err := adapter.queries.JobGetByID(ctx, bundle.ex, res.Job.ID) - require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateCancelled, j.State) - require.WithinDuration(t, params.ScheduledAt, jAfter.ScheduledAt, time.Microsecond) - }) -} - -func getLeadershipExpiresAt(ctx context.Context, t *testing.T, tx pgx.Tx) time.Time { - t.Helper() - var expiresAt time.Time - err := tx.QueryRow(ctx, "SELECT expires_at FROM river_leader WHERE name = $1", rivercommon.QueueDefault).Scan(&expiresAt) - require.NoError(t, err) - return expiresAt -} - -func electLeader(ctx context.Context, t *testing.T, adapter *StandardAdapter, name string, ttl time.Duration) { - t.Helper() - won, err := adapter.LeadershipAttemptElect(ctx, false, rivercommon.QueueDefault, name, ttl) - require.NoError(t, err) - require.True(t, won) -} - -func Test_StandardAdapter_LeadershipAttemptElect_CannotElectTwiceInARow(t *testing.T) { - t.Parallel() - - ctx := context.Background() - tx := riverinternaltest.TestTx(ctx, t) - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - adapter := NewStandardAdapter(riverinternaltest.BaseServiceArchetype(t), testAdapterConfig(tx)) - electLeader(ctx, t, adapter, "fakeWorker0", 10*time.Second) - - expiresAt := getLeadershipExpiresAt(ctx, t, tx) - require.NotZero(t, expiresAt) - - won, err := adapter.LeadershipAttemptElect(ctx, false, rivercommon.QueueDefault, "fakeWorker1", 30*time.Second) - require.NoError(t, err) - require.False(t, won) - - // The time should not have changed because we specified that we were not - // already elected, and the elect query is a no-op if there's already a - // leader: - expiresAtAfter := getLeadershipExpiresAt(ctx, t, tx) - require.Equal(t, expiresAt, expiresAtAfter) -} - -func Test_StandardAdapter_LeadershipAttemptElect_SuccessfullyReElectsSameLeader(t *testing.T) { - t.Parallel() - - ctx := context.Background() - tx := riverinternaltest.TestTx(ctx, t) - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - adapter := NewStandardAdapter(riverinternaltest.BaseServiceArchetype(t), testAdapterConfig(tx)) - electLeader(ctx, t, adapter, "fakeWorker0", 10*time.Second) - - expiresAt := getLeadershipExpiresAt(ctx, t, tx) - require.NotZero(t, expiresAt) - - // Re-elect the same leader. Use a larger TTL to see if time is updated, - // because we are in a test transaction and the time is frozen at the start of - // the transaction. - won, err := adapter.LeadershipAttemptElect(ctx, true, rivercommon.QueueDefault, "fakeWorker0", 30*time.Second) - require.NoError(t, err) - require.True(t, won) // won re-election - - // expires_at should be incremented because this is the same leader that won - // previously and we specified that we're already elected: - expiresAtAfter := getLeadershipExpiresAt(ctx, t, tx) - require.Greater(t, expiresAtAfter, expiresAt) -} - -func Test_StandardAdapter_LeadershipAttemptReelect_CannotReElectNonLeader(t *testing.T) { - t.Parallel() - - ctx := context.Background() - tx := riverinternaltest.TestTx(ctx, t) - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - adapter := NewStandardAdapter(riverinternaltest.BaseServiceArchetype(t), testAdapterConfig(tx)) - electLeader(ctx, t, adapter, "fakeWorker0", 10*time.Second) - - // read the expiration time from the database to make sure it's set to the - // future and won't be changed by the next LeadershipAttemptElect: - expiresAt := getLeadershipExpiresAt(ctx, t, tx) - require.NotZero(t, expiresAt) - - // Attempt to re-elect a *different* leader. Use a larger TTL to see if time - // is updated, because we are in a test transaction and the time is frozen at - // the start of the transaction. - won, err := adapter.LeadershipAttemptElect(ctx, true, rivercommon.QueueDefault, "fakeWorker1", 30*time.Second) - require.NoError(t, err) - require.False(t, won) - - // The time should not be altered because this was a different leader: - expiresAtAfter := getLeadershipExpiresAt(ctx, t, tx) - require.Equal(t, expiresAt, expiresAtAfter) -} - -func Benchmark_StandardAdapter_Insert(b *testing.B) { - ctx := context.Background() - tx := riverinternaltest.TestTx(ctx, b) - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() - - adapter := NewStandardAdapter(riverinternaltest.BaseServiceArchetype(b), testAdapterConfig(tx)) - b.ResetTimer() - - for i := 0; i < b.N; i++ { - if _, err := adapter.JobInsert(ctx, makeFakeJobInsertParams(i, nil)); err != nil { - b.Fatal(err) - } - } -} - -func Benchmark_StandardAdapter_Insert_Parallelized(b *testing.B) { - ctx := context.Background() - dbPool := riverinternaltest.TestDB(ctx, b) - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() - - adapter := NewStandardAdapter(riverinternaltest.BaseServiceArchetype(b), testAdapterConfig(dbPool)) - b.ResetTimer() - - b.RunParallel(func(pb *testing.PB) { - i := 0 - for pb.Next() { - if _, err := adapter.JobInsert(ctx, makeFakeJobInsertParams(i, nil)); err != nil { - b.Fatal(err) - } - i++ - } - }) -} - -func Benchmark_StandardAdapter_Fetch_100(b *testing.B) { - ctx := context.Background() - - dbPool := riverinternaltest.TestDB(ctx, b) - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() - - adapter := NewStandardAdapter(riverinternaltest.BaseServiceArchetype(b), testAdapterConfig(dbPool)) - - for i := 0; i < b.N*100; i++ { - insertParams := makeFakeJobInsertParams(i, nil) - if _, err := adapter.JobInsert(ctx, insertParams); err != nil { - b.Fatal(err) - } - } - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - if _, err := adapter.JobGetAvailable(ctx, rivercommon.QueueDefault, 100); err != nil { - b.Fatal(err) - } - } -} - -func Benchmark_StandardAdapter_Fetch_100_Parallelized(b *testing.B) { - ctx := context.Background() - dbPool := riverinternaltest.TestDB(ctx, b) - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() - - adapter := NewStandardAdapter(riverinternaltest.BaseServiceArchetype(b), testAdapterConfig(dbPool)) - - for i := 0; i < b.N*100*runtime.NumCPU(); i++ { - insertParams := makeFakeJobInsertParams(i, nil) - if _, err := adapter.JobInsert(ctx, insertParams); err != nil { - b.Fatal(err) - } - } - - b.ResetTimer() - - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - if _, err := adapter.JobGetAvailable(ctx, rivercommon.QueueDefault, 100); err != nil { - b.Fatal(err) - } - } - }) -} - -func testAdapterConfig(ex dbutil.Executor) *StandardAdapterConfig { - return &StandardAdapterConfig{ - AdvisoryLockPrefix: 0, - Executor: ex, - WorkerName: "fakeWorker0", - } -} - -type makeFakeJobInsertParamsOpts struct { - Metadata []byte - Queue *string - ScheduledAt *time.Time - State *dbsqlc.JobState -} - -func makeFakeJobInsertParams(i int, opts *makeFakeJobInsertParamsOpts) *JobInsertParams { - if opts == nil { - opts = &makeFakeJobInsertParamsOpts{} - } - - metadata := []byte("{}") - if len(opts.Metadata) > 0 { - metadata = opts.Metadata - } - - return &JobInsertParams{ - EncodedArgs: []byte(fmt.Sprintf(`{"job_num":%d}`, i)), - Kind: "fake_job", - MaxAttempts: rivercommon.MaxAttemptsDefault, - Metadata: metadata, - Priority: rivercommon.PriorityDefault, - Queue: ptrutil.ValOrDefault(opts.Queue, rivercommon.QueueDefault), - ScheduledAt: ptrutil.ValOrDefault(opts.ScheduledAt, time.Time{}), - State: ptrutil.ValOrDefault(opts.State, dbsqlc.JobStateAvailable), - } -} diff --git a/internal/dbadaptertest/test_adapter.go b/internal/dbadaptertest/test_adapter.go deleted file mode 100644 index d81e3647..00000000 --- a/internal/dbadaptertest/test_adapter.go +++ /dev/null @@ -1,232 +0,0 @@ -package dbadaptertest - -import ( - "context" - "sync" - "time" - - "github.com/jackc/pgx/v5" - - "github.com/riverqueue/river/internal/dbadapter" - "github.com/riverqueue/river/internal/dbsqlc" -) - -// TestAdapter is an Adapter that allows any of its methods to be overridden, -// automatically falling back to the fallthroughAdapter if the method is not -// overridden. -type TestAdapter struct { - fallthroughAdapter dbadapter.Adapter - mu sync.Mutex - - JobCancelCalled bool - JobCancelTxCalled bool - JobInsertCalled bool - JobInsertTxCalled bool - JobInsertManyCalled bool - JobInsertManyTxCalled bool - JobGetCalled bool - JobGetTxCalled bool - JobGetAvailableCalled bool - JobGetAvailableTxCalled bool - JobListCalled bool - JobListTxCalled bool - JobRetryImmediatelyCalled bool - JobRetryImmediatelyTxCalled bool - JobSetStateIfRunningCalled bool - LeadershipAttemptElectCalled bool - LeadershipResignedCalled bool - - JobCancelFunc func(ctx context.Context, id int64) (*dbsqlc.RiverJob, error) - JobCancelTxFunc func(ctx context.Context, tx pgx.Tx, id int64) (*dbsqlc.RiverJob, error) - JobInsertFunc func(ctx context.Context, params *dbadapter.JobInsertParams) (*dbadapter.JobInsertResult, error) - JobInsertTxFunc func(ctx context.Context, tx pgx.Tx, params *dbadapter.JobInsertParams) (*dbadapter.JobInsertResult, error) - JobInsertManyFunc func(ctx context.Context, params []*dbadapter.JobInsertParams) (int64, error) - JobInsertManyTxFunc func(ctx context.Context, tx pgx.Tx, params []*dbadapter.JobInsertParams) (int64, error) - JobGetFunc func(ctx context.Context, id int64) (*dbsqlc.RiverJob, error) - JobGetTxFunc func(ctx context.Context, tx pgx.Tx, id int64) (*dbsqlc.RiverJob, error) - JobGetAvailableFunc func(ctx context.Context, queueName string, limit int32) ([]*dbsqlc.RiverJob, error) - JobGetAvailableTxFunc func(ctx context.Context, tx pgx.Tx, queueName string, limit int32) ([]*dbsqlc.RiverJob, error) - JobListFunc func(ctx context.Context, params dbadapter.JobListParams) ([]*dbsqlc.RiverJob, error) - JobListTxFunc func(ctx context.Context, tx pgx.Tx, params dbadapter.JobListParams) ([]*dbsqlc.RiverJob, error) - JobRetryImmediatelyFunc func(ctx context.Context, id int64) (*dbsqlc.RiverJob, error) - JobRetryImmediatelyTxFunc func(ctx context.Context, tx pgx.Tx, id int64) (*dbsqlc.RiverJob, error) - JobSetStateIfRunningFunc func(ctx context.Context, params *dbadapter.JobSetStateIfRunningParams) (*dbsqlc.RiverJob, error) - LeadershipAttemptElectFunc func(ctx context.Context) (bool, error) - LeadershipResignFunc func(ctx context.Context, name string, leaderID string) error -} - -func (ta *TestAdapter) JobCancel(ctx context.Context, id int64) (*dbsqlc.RiverJob, error) { - ta.atomicSetBoolTrue(&ta.JobCancelCalled) - - if ta.JobCancelFunc != nil { - return ta.JobCancelFunc(ctx, id) - } - - return ta.fallthroughAdapter.JobCancel(ctx, id) -} - -func (ta *TestAdapter) JobCancelTx(ctx context.Context, tx pgx.Tx, id int64) (*dbsqlc.RiverJob, error) { - ta.atomicSetBoolTrue(&ta.JobCancelTxCalled) - - if ta.JobCancelTxFunc != nil { - return ta.JobCancelTxFunc(ctx, tx, id) - } - - return ta.fallthroughAdapter.JobCancel(ctx, id) -} - -func (ta *TestAdapter) JobInsert(ctx context.Context, params *dbadapter.JobInsertParams) (*dbadapter.JobInsertResult, error) { - ta.atomicSetBoolTrue(&ta.JobInsertCalled) - - if ta.JobInsertFunc != nil { - return ta.JobInsertFunc(ctx, params) - } - - return ta.fallthroughAdapter.JobInsert(ctx, params) -} - -func (ta *TestAdapter) JobInsertTx(ctx context.Context, tx pgx.Tx, params *dbadapter.JobInsertParams) (*dbadapter.JobInsertResult, error) { - ta.atomicSetBoolTrue(&ta.JobInsertTxCalled) - - if ta.JobInsertTxFunc != nil { - return ta.JobInsertTxFunc(ctx, tx, params) - } - - return ta.fallthroughAdapter.JobInsertTx(ctx, tx, params) -} - -func (ta *TestAdapter) JobInsertMany(ctx context.Context, params []*dbadapter.JobInsertParams) (int64, error) { - ta.atomicSetBoolTrue(&ta.JobInsertManyCalled) - - if ta.JobInsertManyFunc != nil { - return ta.JobInsertManyFunc(ctx, params) - } - - return ta.fallthroughAdapter.JobInsertMany(ctx, params) -} - -func (ta *TestAdapter) JobInsertManyTx(ctx context.Context, tx pgx.Tx, params []*dbadapter.JobInsertParams) (int64, error) { - ta.atomicSetBoolTrue(&ta.JobInsertManyTxCalled) - - if ta.JobInsertManyTxFunc != nil { - return ta.JobInsertManyTxFunc(ctx, tx, params) - } - - return ta.fallthroughAdapter.JobInsertManyTx(ctx, tx, params) -} - -func (ta *TestAdapter) JobGet(ctx context.Context, id int64) (*dbsqlc.RiverJob, error) { - ta.atomicSetBoolTrue(&ta.JobGetCalled) - - if ta.JobGetFunc != nil { - return ta.JobGetFunc(ctx, id) - } - - return ta.fallthroughAdapter.JobGet(ctx, id) -} - -func (ta *TestAdapter) JobGetTx(ctx context.Context, tx pgx.Tx, id int64) (*dbsqlc.RiverJob, error) { - ta.atomicSetBoolTrue(&ta.JobGetTxCalled) - - if ta.JobCancelTxFunc != nil { - return ta.JobCancelTxFunc(ctx, tx, id) - } - - return ta.fallthroughAdapter.JobGet(ctx, id) -} - -func (ta *TestAdapter) JobGetAvailable(ctx context.Context, queueName string, limit int32) ([]*dbsqlc.RiverJob, error) { - ta.atomicSetBoolTrue(&ta.JobGetAvailableCalled) - - if ta.JobGetAvailableFunc != nil { - return ta.JobGetAvailableFunc(ctx, queueName, limit) - } - - return ta.fallthroughAdapter.JobGetAvailable(ctx, queueName, limit) -} - -func (ta *TestAdapter) JobGetAvailableTx(ctx context.Context, tx pgx.Tx, queueName string, limit int32) ([]*dbsqlc.RiverJob, error) { - ta.atomicSetBoolTrue(&ta.JobGetAvailableTxCalled) - - if ta.JobGetAvailableTxFunc != nil { - return ta.JobGetAvailableTxFunc(ctx, tx, queueName, limit) - } - - return ta.fallthroughAdapter.JobGetAvailableTx(ctx, tx, queueName, limit) -} - -func (ta *TestAdapter) JobList(ctx context.Context, params dbadapter.JobListParams) ([]*dbsqlc.RiverJob, error) { - ta.atomicSetBoolTrue(&ta.JobListCalled) - - if ta.JobListFunc != nil { - return ta.JobListFunc(ctx, params) - } - - return ta.fallthroughAdapter.JobList(ctx, params) -} - -func (ta *TestAdapter) JobListTx(ctx context.Context, tx pgx.Tx, params dbadapter.JobListParams) ([]*dbsqlc.RiverJob, error) { - ta.atomicSetBoolTrue(&ta.JobListTxCalled) - - if ta.JobListTxFunc != nil { - return ta.JobListTxFunc(ctx, tx, params) - } - - return ta.fallthroughAdapter.JobListTx(ctx, tx, params) -} - -func (ta *TestAdapter) JobRetryImmediately(ctx context.Context, id int64) (*dbsqlc.RiverJob, error) { - ta.atomicSetBoolTrue(&ta.JobRetryImmediatelyCalled) - - if ta.JobRetryImmediatelyFunc != nil { - return ta.JobRetryImmediatelyFunc(ctx, id) - } - - return ta.fallthroughAdapter.JobRetryImmediately(ctx, id) -} - -func (ta *TestAdapter) JobRetryImmediatelyTx(ctx context.Context, tx pgx.Tx, id int64) (*dbsqlc.RiverJob, error) { - ta.atomicSetBoolTrue(&ta.JobRetryImmediatelyTxCalled) - - if ta.JobRetryImmediatelyFunc != nil { - return ta.JobRetryImmediatelyTxFunc(ctx, tx, id) - } - - return ta.fallthroughAdapter.JobRetryImmediatelyTx(ctx, tx, id) -} - -func (ta *TestAdapter) JobSetStateIfRunning(ctx context.Context, params *dbadapter.JobSetStateIfRunningParams) (*dbsqlc.RiverJob, error) { - ta.atomicSetBoolTrue(&ta.JobSetStateIfRunningCalled) - - if ta.JobSetStateIfRunningFunc != nil { - return ta.JobSetStateIfRunningFunc(ctx, params) - } - - return ta.fallthroughAdapter.JobSetStateIfRunning(ctx, params) -} - -func (ta *TestAdapter) LeadershipAttemptElect(ctx context.Context, alreadyElected bool, name, leaderID string, ttl time.Duration) (bool, error) { - ta.atomicSetBoolTrue(&ta.LeadershipAttemptElectCalled) - - if ta.LeadershipAttemptElectFunc != nil { - return ta.LeadershipAttemptElectFunc(ctx) - } - - return ta.fallthroughAdapter.LeadershipAttemptElect(ctx, alreadyElected, name, leaderID, ttl) -} - -func (ta *TestAdapter) LeadershipResign(ctx context.Context, name, leaderID string) error { - ta.atomicSetBoolTrue(&ta.LeadershipResignedCalled) - - if ta.LeadershipResignFunc != nil { - return ta.LeadershipResignFunc(ctx, name, leaderID) - } - - return ta.fallthroughAdapter.LeadershipResign(ctx, name, leaderID) -} - -func (ta *TestAdapter) atomicSetBoolTrue(b *bool) { - ta.mu.Lock() - *b = true - ta.mu.Unlock() -} diff --git a/internal/dblist/db_list.go b/internal/dblist/db_list.go new file mode 100644 index 00000000..aed557d7 --- /dev/null +++ b/internal/dblist/db_list.go @@ -0,0 +1,121 @@ +package dblist + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/riverqueue/river/riverdriver" + "github.com/riverqueue/river/rivertype" +) + +const jobList = `-- name: JobList :many +SELECT + %s +FROM + river_job +%s +ORDER BY + %s +LIMIT @count::integer +` + +type SortOrder int + +const ( + SortOrderUnspecified SortOrder = iota + SortOrderAsc + SortOrderDesc +) + +type JobListOrderBy struct { + Expr string + Order SortOrder +} + +type JobListParams struct { + Conditions string + Kinds []string + LimitCount int32 + NamedArgs map[string]any + OrderBy []JobListOrderBy + Priorities []int16 + Queues []string + State rivertype.JobState +} + +func JobList(ctx context.Context, exec riverdriver.Executor, params *JobListParams) ([]*rivertype.JobRow, error) { + var conditionsBuilder strings.Builder + + orderBy := make([]JobListOrderBy, len(params.OrderBy)) + for i, o := range params.OrderBy { + orderBy[i] = JobListOrderBy{ + Expr: o.Expr, + Order: o.Order, + } + } + + namedArgs := params.NamedArgs + if namedArgs == nil { + namedArgs = make(map[string]any) + } + + writeWhereOrAnd := func() { + if conditionsBuilder.Len() == 0 { + conditionsBuilder.WriteString("WHERE\n ") + } else { + conditionsBuilder.WriteString("\n AND ") + } + } + + if len(params.Kinds) > 0 { + writeWhereOrAnd() + conditionsBuilder.WriteString("kind = any(@kinds::text[])") + namedArgs["kinds"] = params.Queues + } + + if len(params.Queues) > 0 { + writeWhereOrAnd() + conditionsBuilder.WriteString("queue = any(@queues::text[])") + namedArgs["queues"] = params.Queues + } + + if params.State != "" { + writeWhereOrAnd() + conditionsBuilder.WriteString("state = @state::river_job_state") + namedArgs["state"] = params.State + } + + if params.Conditions != "" { + writeWhereOrAnd() + conditionsBuilder.WriteString(params.Conditions) + } + + if params.LimitCount < 1 { + return nil, errors.New("required parameter 'Count' in JobList must be greater than zero") + } + namedArgs["count"] = params.LimitCount + + if len(params.OrderBy) == 0 { + return nil, errors.New("sort order is required") + } + + var orderByBuilder strings.Builder + + for i, orderBy := range params.OrderBy { + orderByBuilder.WriteString(orderBy.Expr) + if orderBy.Order == SortOrderAsc { + orderByBuilder.WriteString(" ASC") + } else if orderBy.Order == SortOrderDesc { + orderByBuilder.WriteString(" DESC") + } + if i < len(params.OrderBy)-1 { + orderByBuilder.WriteString(", ") + } + } + + sql := fmt.Sprintf(jobList, exec.JobListFields(), conditionsBuilder.String(), orderByBuilder.String()) + + return exec.JobList(ctx, sql, namedArgs) +} diff --git a/internal/dblist/db_list_test.go b/internal/dblist/db_list_test.go new file mode 100644 index 00000000..6a726160 --- /dev/null +++ b/internal/dblist/db_list_test.go @@ -0,0 +1,205 @@ +package dblist + +import ( + "context" + "testing" + "time" + + "github.com/jackc/pgx/v5" + "github.com/stretchr/testify/require" + + "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/internal/riverinternaltest/testfactory" + "github.com/riverqueue/river/internal/util/ptrutil" + "github.com/riverqueue/river/internal/util/sliceutil" + "github.com/riverqueue/river/riverdriver" + "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivertype" +) + +func TestJobListNoJobs(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + type testBundle struct { + exec riverdriver.Executor + } + + setup := func() *testBundle { + driver := riverpgxv5.New(nil) + + return &testBundle{ + exec: driver.UnwrapExecutor(riverinternaltest.TestTx(ctx, t)), + } + } + + t.Run("Minimal", func(t *testing.T) { + t.Parallel() + + bundle := setup() + + _, err := JobList(ctx, bundle.exec, &JobListParams{ + State: rivertype.JobStateCompleted, + LimitCount: 1, + OrderBy: []JobListOrderBy{{Expr: "id", Order: SortOrderAsc}}, + }) + require.NoError(t, err) + }) + + t.Run("WithConditionsAndSortOrders", func(t *testing.T) { + t.Parallel() + + bundle := setup() + + _, err := JobList(ctx, bundle.exec, &JobListParams{ + Conditions: "queue = 'test' AND priority = 1 AND args->>'foo' = @foo", + NamedArgs: pgx.NamedArgs{"foo": "bar"}, + State: rivertype.JobStateCompleted, + LimitCount: 1, + OrderBy: []JobListOrderBy{{Expr: "id", Order: SortOrderAsc}}, + }) + require.NoError(t, err) + }) +} + +func TestJobListWithJobs(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + type testBundle struct { + baselineTime time.Time // baseline time frozen at now when setup is called + driver riverdriver.Driver[pgx.Tx] + exec riverdriver.Executor + jobs []*rivertype.JobRow + } + + setup := func(t *testing.T) *testBundle { + t.Helper() + + var ( + driver = riverpgxv5.New(nil) + tx = riverinternaltest.TestTx(ctx, t) + exec = driver.UnwrapExecutor(tx) + ) + + job1 := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{Queue: ptrutil.Ptr("priority")}) + job2 := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{EncodedArgs: []byte(`{"job_num": 2}`)}) + job3 := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{Metadata: []byte(`{"some_key": "some_value"}`)}) + job4 := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateRunning)}) + + return &testBundle{ + baselineTime: time.Now(), + driver: driver, + exec: exec, + jobs: []*rivertype.JobRow{job1, job2, job3, job4}, + } + } + + type testListFunc func(jobs []*rivertype.JobRow, err error) + + execTest := func(ctx context.Context, t *testing.T, bundle *testBundle, params *JobListParams, testFunc testListFunc) { + t.Helper() + t.Logf("testing JobList in Executor") + jobs, err := JobList(ctx, bundle.exec, params) + testFunc(jobs, err) + + t.Logf("testing JobListTx") + // use a sub-transaction in case it's rolled back or errors: + execTx, err := bundle.exec.Begin(ctx) + require.NoError(t, err) + defer execTx.Rollback(ctx) + jobs, err = JobList(ctx, execTx, params) + testFunc(jobs, err) + } + + t.Run("Minimal", func(t *testing.T) { + t.Parallel() + + bundle := setup(t) + + params := &JobListParams{ + LimitCount: 2, + OrderBy: []JobListOrderBy{{Expr: "id", Order: SortOrderDesc}}, + State: rivertype.JobStateAvailable, + } + + execTest(ctx, t, bundle, params, func(jobs []*rivertype.JobRow, err error) { + require.NoError(t, err) + + // job 1 is excluded due to pagination limit of 2, while job 4 is excluded + // due to its state: + job2 := bundle.jobs[1] + job3 := bundle.jobs[2] + + returnedIDs := sliceutil.Map(jobs, func(j *rivertype.JobRow) int64 { return j.ID }) + require.Equal(t, []int64{job3.ID, job2.ID}, returnedIDs) + }) + }) + + t.Run("ComplexConditionsWithNamedArgs", func(t *testing.T) { + t.Parallel() + + bundle := setup(t) + + params := &JobListParams{ + Conditions: "jsonb_extract_path(args, VARIADIC @paths1::text[]) = @value1::jsonb", + LimitCount: 2, + NamedArgs: map[string]any{"paths1": []string{"job_num"}, "value1": 2}, + OrderBy: []JobListOrderBy{{Expr: "id", Order: SortOrderDesc}}, + State: rivertype.JobStateAvailable, + } + + execTest(ctx, t, bundle, params, func(jobs []*rivertype.JobRow, err error) { + require.NoError(t, err) + + job2 := bundle.jobs[1] + returnedIDs := sliceutil.Map(jobs, func(j *rivertype.JobRow) int64 { return j.ID }) + require.Equal(t, []int64{job2.ID}, returnedIDs) + }) + }) + + t.Run("ConditionsWithQueues", func(t *testing.T) { + t.Parallel() + + bundle := setup(t) + + params := &JobListParams{ + Conditions: "finalized_at IS NULL", + LimitCount: 2, + OrderBy: []JobListOrderBy{{Expr: "id", Order: SortOrderDesc}}, + Queues: []string{"priority"}, + State: rivertype.JobStateAvailable, + } + + execTest(ctx, t, bundle, params, func(jobs []*rivertype.JobRow, err error) { + require.NoError(t, err) + + job1 := bundle.jobs[0] + returnedIDs := sliceutil.Map(jobs, func(j *rivertype.JobRow) int64 { return j.ID }) + require.Equal(t, []int64{job1.ID}, returnedIDs) + }) + }) + + t.Run("WithMetadataAndNoStateFilter", func(t *testing.T) { + t.Parallel() + + bundle := setup(t) + + params := &JobListParams{ + Conditions: "metadata @> @metadata_filter::jsonb", + LimitCount: 2, + NamedArgs: map[string]any{"metadata_filter": `{"some_key": "some_value"}`}, + OrderBy: []JobListOrderBy{{Expr: "id", Order: SortOrderDesc}}, + } + + execTest(ctx, t, bundle, params, func(jobs []*rivertype.JobRow, err error) { + require.NoError(t, err) + + job3 := bundle.jobs[2] + returnedIDs := sliceutil.Map(jobs, func(j *rivertype.JobRow) int64 { return j.ID }) + require.Equal(t, []int64{job3.ID}, returnedIDs) + }) + }) +} diff --git a/internal/dblist/job_list.go b/internal/dblist/job_list.go deleted file mode 100644 index 1ba278a7..00000000 --- a/internal/dblist/job_list.go +++ /dev/null @@ -1,130 +0,0 @@ -package dblist - -import ( - "context" - "errors" - "fmt" - "strings" - - "github.com/jackc/pgx/v5" - - "github.com/riverqueue/river/internal/dbsqlc" -) - -const jobList = `-- name: JobList :many -SELECT - id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags -FROM - river_job -%s -ORDER BY - %s -LIMIT @count::integer -` - -type SortOrder int - -const ( - SortOrderUnspecified SortOrder = iota - SortOrderAsc - SortOrderDesc -) - -type JobListOrderBy struct { - Expr string - Order SortOrder -} - -type JobListParams struct { - State dbsqlc.JobState - Priorities []int16 - Conditions string - OrderBy []JobListOrderBy - NamedArgs map[string]any - LimitCount int32 -} - -func JobList(ctx context.Context, tx pgx.Tx, arg JobListParams) ([]*dbsqlc.RiverJob, error) { - namedArgs := make(pgx.NamedArgs) - for k, v := range arg.NamedArgs { - namedArgs[k] = v - } - if arg.LimitCount < 1 { - return nil, errors.New("required argument 'Count' in JobList must be greater than zero") - } - namedArgs["count"] = arg.LimitCount - - if len(arg.OrderBy) == 0 { - return nil, errors.New("sort order is required") - } - - var orderByBuilder strings.Builder - - for i, orderBy := range arg.OrderBy { - orderByBuilder.WriteString(orderBy.Expr) - if orderBy.Order == SortOrderAsc { - orderByBuilder.WriteString(" ASC") - } else if orderBy.Order == SortOrderDesc { - orderByBuilder.WriteString(" DESC") - } - if i < len(arg.OrderBy)-1 { - orderByBuilder.WriteString(", ") - } - } - - var conditions []string - if arg.State != "" { - conditions = append(conditions, "state = @state::river_job_state") - namedArgs["state"] = arg.State - } - if arg.Conditions != "" { - conditions = append(conditions, arg.Conditions) - } - var conditionsBuilder strings.Builder - if len(conditions) > 0 { - conditionsBuilder.WriteString("WHERE\n ") - } - for i, condition := range conditions { - if i > 0 { - conditionsBuilder.WriteString("\n AND ") - } - conditionsBuilder.WriteString(condition) - } - - query := fmt.Sprintf(jobList, conditionsBuilder.String(), orderByBuilder.String()) - rows, err := tx.Query(ctx, query, namedArgs) - if err != nil { - return nil, err - } - defer rows.Close() - - var items []*dbsqlc.RiverJob - for rows.Next() { - var i dbsqlc.RiverJob - if err := rows.Scan( - &i.ID, - &i.Args, - &i.Attempt, - &i.AttemptedAt, - &i.AttemptedBy, - &i.CreatedAt, - &i.Errors, - &i.FinalizedAt, - &i.Kind, - &i.MaxAttempts, - &i.Metadata, - &i.Priority, - &i.Queue, - &i.State, - &i.ScheduledAt, - &i.Tags, - ); err != nil { - return nil, err - } - items = append(items, &i) - } - if err := rows.Err(); err != nil { - return nil, err - } - return items, nil -} diff --git a/internal/dblist/job_list_test.go b/internal/dblist/job_list_test.go deleted file mode 100644 index 0b4714c3..00000000 --- a/internal/dblist/job_list_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package dblist - -import ( - "context" - "testing" - - "github.com/jackc/pgx/v5" - "github.com/stretchr/testify/require" - - "github.com/riverqueue/river/internal/dbsqlc" - "github.com/riverqueue/river/internal/riverinternaltest" -) - -func TestJobList(t *testing.T) { - t.Parallel() - - t.Run("Minimal", func(t *testing.T) { - t.Parallel() - - ctx := context.Background() - tx := riverinternaltest.TestTx(ctx, t) - - _, err := JobList(ctx, tx, JobListParams{ - State: dbsqlc.JobStateCompleted, - LimitCount: 1, - OrderBy: []JobListOrderBy{{Expr: "id", Order: SortOrderAsc}}, - }) - require.NoError(t, err) - }) - - t.Run("WithConditionsAndSortOrders", func(t *testing.T) { - t.Parallel() - - ctx := context.Background() - tx := riverinternaltest.TestTx(ctx, t) - - _, err := JobList(ctx, tx, JobListParams{ - Conditions: "queue = 'test' AND priority = 1 AND args->>'foo' = @foo", - NamedArgs: pgx.NamedArgs{"foo": "bar"}, - State: dbsqlc.JobStateCompleted, - LimitCount: 1, - OrderBy: []JobListOrderBy{{Expr: "id", Order: SortOrderAsc}}, - }) - require.NoError(t, err) - }) -} diff --git a/internal/dbsqlc/db.go b/internal/dbsqlc/db.go deleted file mode 100644 index 1446d77b..00000000 --- a/internal/dbsqlc/db.go +++ /dev/null @@ -1,26 +0,0 @@ -// Code generated by sqlc. DO NOT EDIT. -// versions: -// sqlc v1.24.0 - -package dbsqlc - -import ( - "context" - - "github.com/jackc/pgx/v5" - "github.com/jackc/pgx/v5/pgconn" -) - -type DBTX interface { - Exec(context.Context, string, ...interface{}) (pgconn.CommandTag, error) - Query(context.Context, string, ...interface{}) (pgx.Rows, error) - QueryRow(context.Context, string, ...interface{}) pgx.Row - CopyFrom(ctx context.Context, tableName pgx.Identifier, columnNames []string, rowSrc pgx.CopyFromSource) (int64, error) -} - -func New() *Queries { - return &Queries{} -} - -type Queries struct { -} diff --git a/internal/dbsqlc/models.go b/internal/dbsqlc/models.go deleted file mode 100644 index 746965a9..00000000 --- a/internal/dbsqlc/models.go +++ /dev/null @@ -1,84 +0,0 @@ -// Code generated by sqlc. DO NOT EDIT. -// versions: -// sqlc v1.24.0 - -package dbsqlc - -import ( - "database/sql/driver" - "fmt" - "time" -) - -type JobState string - -const ( - JobStateAvailable JobState = "available" - JobStateCancelled JobState = "cancelled" - JobStateCompleted JobState = "completed" - JobStateDiscarded JobState = "discarded" - JobStateRetryable JobState = "retryable" - JobStateRunning JobState = "running" - JobStateScheduled JobState = "scheduled" -) - -func (e *JobState) Scan(src interface{}) error { - switch s := src.(type) { - case []byte: - *e = JobState(s) - case string: - *e = JobState(s) - default: - return fmt.Errorf("unsupported scan type for JobState: %T", src) - } - return nil -} - -type NullJobState struct { - JobState JobState - Valid bool // Valid is true if JobState is not NULL -} - -// Scan implements the Scanner interface. -func (ns *NullJobState) Scan(value interface{}) error { - if value == nil { - ns.JobState, ns.Valid = "", false - return nil - } - ns.Valid = true - return ns.JobState.Scan(value) -} - -// Value implements the driver Valuer interface. -func (ns NullJobState) Value() (driver.Value, error) { - if !ns.Valid { - return nil, nil - } - return string(ns.JobState), nil -} - -type RiverJob struct { - ID int64 - Args []byte - Attempt int16 - AttemptedAt *time.Time - AttemptedBy []string - CreatedAt time.Time - Errors []AttemptError - FinalizedAt *time.Time - Kind string - MaxAttempts int16 - Metadata []byte - Priority int16 - Queue string - State JobState - ScheduledAt time.Time - Tags []string -} - -type RiverLeader struct { - ElectedAt time.Time - ExpiresAt time.Time - LeaderID string - Name string -} diff --git a/internal/dbsqlc/river_job.sql b/internal/dbsqlc/river_job.sql deleted file mode 100644 index 53351904..00000000 --- a/internal/dbsqlc/river_job.sql +++ /dev/null @@ -1,388 +0,0 @@ -CREATE TYPE river_job_state AS ENUM( - 'available', - 'cancelled', - 'completed', - 'discarded', - 'retryable', - 'running', - 'scheduled' -); - -CREATE TABLE river_job( - id bigserial PRIMARY KEY, - args jsonb, - attempt smallint NOT NULL DEFAULT 0, - attempted_at timestamptz, - attempted_by text[], - created_at timestamptz NOT NULL DEFAULT NOW(), - errors jsonb[], - finalized_at timestamptz, - kind text NOT NULL, - max_attempts smallint NOT NULL, - metadata jsonb NOT NULL DEFAULT '{}' ::jsonb, - priority smallint NOT NULL DEFAULT 1, - queue text NOT NULL DEFAULT 'default' ::text, - state river_job_state NOT NULL DEFAULT 'available' ::river_job_state, - scheduled_at timestamptz NOT NULL DEFAULT NOW(), - tags varchar(255)[] NOT NULL DEFAULT '{}' ::varchar(255)[], - CONSTRAINT finalized_or_finalized_at_null CHECK ((state IN ('cancelled', 'completed', 'discarded') AND finalized_at IS NOT NULL) OR finalized_at IS NULL), - CONSTRAINT priority_in_range CHECK (priority >= 1 AND priority <= 4), - CONSTRAINT queue_length CHECK (char_length(queue) > 0 AND char_length(queue) < 128), - CONSTRAINT kind_length CHECK (char_length(kind) > 0 AND char_length(kind) < 128) -); - --- name: JobCancel :one -WITH locked_job AS ( - SELECT - id, queue, state, finalized_at - FROM river_job - WHERE - river_job.id = @id - FOR UPDATE -), - -notification AS ( - SELECT - id, - pg_notify(@job_control_topic, json_build_object('action', 'cancel', 'job_id', id, 'queue', queue)::text) - FROM - locked_job - WHERE - state NOT IN ('cancelled', 'completed', 'discarded') - AND finalized_at IS NULL -), - -updated_job AS ( - UPDATE river_job - SET - -- If the job is actively running, we want to let its current client and - -- producer handle the cancellation. Otherwise, immediately cancel it. - state = CASE WHEN state = 'running'::river_job_state THEN state ELSE 'cancelled'::river_job_state END, - finalized_at = CASE WHEN state = 'running'::river_job_state THEN finalized_at ELSE now() END, - -- Mark the job as cancelled by query so that the rescuer knows not to - -- rescue it, even if it gets stuck in the running state: - metadata = jsonb_set(metadata, '{cancel_attempted_at}'::text[], @cancel_attempted_at::jsonb, true) - FROM notification - WHERE - river_job.id = notification.id - RETURNING river_job.* -) - -SELECT * -FROM river_job -WHERE id = @id::bigint - AND id NOT IN (SELECT id FROM updated_job) -UNION -SELECT * -FROM updated_job; - --- name: JobCountRunning :one -SELECT - count(*) -FROM - river_job -WHERE - state = 'running'; - --- name: JobDeleteBefore :one -WITH deleted_jobs AS ( - DELETE FROM - river_job - WHERE - id IN ( - SELECT - id - FROM - river_job - WHERE - (state = 'cancelled' AND finalized_at < @cancelled_finalized_at_horizon::timestamptz) OR - (state = 'completed' AND finalized_at < @completed_finalized_at_horizon::timestamptz) OR - (state = 'discarded' AND finalized_at < @discarded_finalized_at_horizon::timestamptz) - ORDER BY id - LIMIT @max::bigint - ) - RETURNING * -) -SELECT - count(*) -FROM - deleted_jobs; - --- name: JobGetAvailable :many -WITH locked_jobs AS ( - SELECT - * - FROM - river_job - WHERE - state = 'available'::river_job_state - AND queue = @queue::text - AND scheduled_at <= now() - ORDER BY - priority ASC, - scheduled_at ASC, - id ASC - LIMIT @limit_count::integer - FOR UPDATE - SKIP LOCKED) -UPDATE - river_job -SET - state = 'running'::river_job_state, - attempt = river_job.attempt + 1, - attempted_at = now(), - attempted_by = array_append(river_job.attempted_by, @worker::text) -FROM - locked_jobs -WHERE - river_job.id = locked_jobs.id -RETURNING - river_job.*; - --- name: JobGetByKind :many -SELECT * -FROM river_job -WHERE kind = @kind -ORDER BY id; - --- name: JobGetByKindMany :many -SELECT * -FROM river_job -WHERE kind = any(@kind::text[]) -ORDER BY id; - --- name: JobGetByID :one -SELECT - * -FROM - river_job -WHERE - id = @id -LIMIT 1; - --- name: JobGetByIDMany :many -SELECT - * -FROM - river_job -WHERE - id = any(@id::bigint[]); - --- name: JobGetByKindAndUniqueProperties :one -SELECT * -FROM river_job -WHERE kind = @kind - AND CASE WHEN @by_args::boolean THEN args = @args ELSE true END - AND CASE WHEN @by_created_at::boolean THEN tstzrange(@created_at_start::timestamptz, @created_at_end::timestamptz, '[)') @> created_at ELSE true END - AND CASE WHEN @by_queue::boolean THEN queue = @queue ELSE true END - AND CASE WHEN @by_state::boolean THEN state::text = any(@state::text[]) ELSE true END; - --- name: JobGetStuck :many -SELECT - * -FROM - river_job -WHERE - state = 'running'::river_job_state - AND attempted_at < @stuck_horizon::timestamptz -LIMIT @limit_count::integer; - --- name: JobInsert :one -INSERT INTO river_job( - args, - attempt, - attempted_at, - created_at, - errors, - finalized_at, - kind, - max_attempts, - metadata, - priority, - queue, - scheduled_at, - state, - tags -) VALUES ( - @args::jsonb, - coalesce(@attempt::smallint, 0), - @attempted_at, - coalesce(sqlc.narg('created_at')::timestamptz, now()), - @errors::jsonb[], - @finalized_at, - @kind::text, - @max_attempts::smallint, - coalesce(@metadata::jsonb, '{}'), - @priority::smallint, - @queue::text, - coalesce(sqlc.narg('scheduled_at')::timestamptz, now()), - @state::river_job_state, - coalesce(@tags::varchar(255)[], '{}') -) RETURNING *; - --- name: JobInsertMany :copyfrom -INSERT INTO river_job( - args, - errors, - kind, - max_attempts, - metadata, - priority, - queue, - scheduled_at, - state, - tags -) VALUES ( - @args, - @errors, - @kind, - @max_attempts, - @metadata, - @priority, - @queue, - @scheduled_at, - @state, - @tags -); - --- name: JobRetryImmediately :one -WITH job_to_update AS ( - SELECT - id - FROM - river_job - WHERE - river_job.id = @id - FOR UPDATE -), - -updated_job AS ( - UPDATE river_job - SET - state = 'available'::river_job_state, - scheduled_at = now(), - max_attempts = CASE WHEN attempt = max_attempts THEN max_attempts + 1 ELSE max_attempts END, - finalized_at = NULL - FROM job_to_update - WHERE river_job.id = job_to_update.id - -- Do not touch running jobs: - AND river_job.state != 'running'::river_job_state - -- If the job is already available with a prior scheduled_at, leave it alone. - AND NOT (river_job.state = 'available'::river_job_state AND river_job.scheduled_at < now()) - RETURNING river_job.* -) - -SELECT * -FROM river_job -WHERE id = @id::bigint - AND id NOT IN (SELECT id FROM updated_job) -UNION -SELECT * -FROM updated_job; - --- name: JobSchedule :one -WITH jobs_to_schedule AS ( - SELECT id - FROM river_job - WHERE - state IN ('scheduled', 'retryable') - AND queue IS NOT NULL - AND priority >= 0 - AND scheduled_at <= @now::timestamptz - ORDER BY - priority, - scheduled_at, - id - LIMIT @max::bigint - FOR UPDATE -), -river_job_scheduled AS ( - UPDATE river_job - SET state = 'available'::river_job_state - FROM jobs_to_schedule - WHERE river_job.id = jobs_to_schedule.id - RETURNING * -) -SELECT count(*) -FROM ( -SELECT pg_notify(@insert_topic, json_build_object('queue', queue)::text) -FROM river_job_scheduled) AS notifications_sent; - --- name: JobSetState :one -UPDATE river_job -SET errors = CASE WHEN @error_do_update::boolean THEN array_append(errors, @error::jsonb) ELSE errors END, - finalized_at = CASE WHEN @finalized_at_do_update::boolean THEN @finalized_at ELSE finalized_at END, - max_attempts = CASE WHEN @max_attempts_update::boolean THEN @max_attempts ELSE max_attempts END, - scheduled_at = CASE WHEN @scheduled_at_do_update::boolean THEN @scheduled_at ELSE scheduled_at END, - state = @state -WHERE id = @id -RETURNING *; - --- name: JobSetStateIfRunning :one -WITH job_to_update AS ( - SELECT - id, - @state::river_job_state IN ('retryable'::river_job_state, 'scheduled'::river_job_state) AND metadata ? 'cancel_attempted_at' AS should_cancel - FROM river_job - WHERE id = @id::bigint - FOR UPDATE -), -updated_job AS ( - UPDATE river_job - SET - state = CASE WHEN should_cancel THEN 'cancelled'::river_job_state - ELSE @state::river_job_state END, - finalized_at = CASE WHEN should_cancel THEN now() - WHEN @finalized_at_do_update::boolean THEN @finalized_at - ELSE finalized_at END, - errors = CASE WHEN @error_do_update::boolean THEN array_append(errors, @error::jsonb) - ELSE errors END, - max_attempts = CASE WHEN NOT should_cancel AND @max_attempts_update::boolean THEN @max_attempts - ELSE max_attempts END, - scheduled_at = CASE WHEN NOT should_cancel AND @scheduled_at_do_update::boolean THEN @scheduled_at - ELSE scheduled_at END - FROM job_to_update - WHERE river_job.id = job_to_update.id - AND river_job.state = 'running'::river_job_state - RETURNING river_job.* -) -SELECT * -FROM river_job -WHERE id = @id::bigint - AND id NOT IN (SELECT id FROM updated_job) -UNION -SELECT * -FROM updated_job; - --- Run by the rescuer to queue for retry or discard depending on job state. --- name: JobRescueMany :exec -UPDATE river_job -SET - errors = array_append(errors, updated_job.error), - finalized_at = updated_job.finalized_at, - scheduled_at = updated_job.scheduled_at, - state = updated_job.state -FROM ( - SELECT - unnest(@id::bigint[]) AS id, - unnest(@error::jsonb[]) AS error, - nullif(unnest(@finalized_at::timestamptz[]), '0001-01-01 00:00:00 +0000') AS finalized_at, - unnest(@scheduled_at::timestamptz[]) AS scheduled_at, - unnest(@state::text[])::river_job_state AS state -) AS updated_job -WHERE river_job.id = updated_job.id; - --- A generalized update for any property on a job. This brings in a large number --- of parameters and therefore may be more suitable for testing than production. --- name: JobUpdate :one -UPDATE river_job -SET - attempt = CASE WHEN @attempt_do_update::boolean THEN @attempt ELSE attempt END, - attempted_at = CASE WHEN @attempted_at_do_update::boolean THEN @attempted_at ELSE attempted_at END, - finalized_at = CASE WHEN @finalized_at_do_update::boolean THEN @finalized_at ELSE finalized_at END, - state = CASE WHEN @state_do_update::boolean THEN @state ELSE state END -WHERE id = @id -RETURNING *; - --- name: PGAdvisoryXactLock :exec -SELECT pg_advisory_xact_lock(@key); diff --git a/internal/dbsqlc/river_job_ext.go b/internal/dbsqlc/river_job_ext.go deleted file mode 100644 index e4595622..00000000 --- a/internal/dbsqlc/river_job_ext.go +++ /dev/null @@ -1,44 +0,0 @@ -package dbsqlc - -import ( - "github.com/riverqueue/river/internal/util/sliceutil" - "github.com/riverqueue/river/rivertype" -) - -func JobRowFromInternal(internal *RiverJob) *rivertype.JobRow { - return &rivertype.JobRow{ - ID: internal.ID, - Attempt: max(int(internal.Attempt), 0), - AttemptedAt: internal.AttemptedAt, - AttemptedBy: internal.AttemptedBy, - CreatedAt: internal.CreatedAt, - EncodedArgs: internal.Args, - Errors: sliceutil.Map(internal.Errors, func(e AttemptError) rivertype.AttemptError { return AttemptErrorFromInternal(&e) }), - FinalizedAt: internal.FinalizedAt, - Kind: internal.Kind, - MaxAttempts: max(int(internal.MaxAttempts), 0), - Metadata: internal.Metadata, - Priority: max(int(internal.Priority), 0), - Queue: internal.Queue, - ScheduledAt: internal.ScheduledAt.UTC(), // TODO(brandur): Very weird this is the only place a UTC conversion happens. - State: rivertype.JobState(internal.State), - Tags: internal.Tags, - } -} - -func JobRowsFromInternal(internal []*RiverJob) []*rivertype.JobRow { - rows := make([]*rivertype.JobRow, len(internal)) - for i, j := range internal { - rows[i] = JobRowFromInternal(j) - } - return rows -} - -func AttemptErrorFromInternal(e *AttemptError) rivertype.AttemptError { - return rivertype.AttemptError{ - At: e.At, - Attempt: int(e.Attempt), - Error: e.Error, - Trace: e.Trace, - } -} diff --git a/internal/dbsqlc/river_leader.sql b/internal/dbsqlc/river_leader.sql deleted file mode 100644 index 2d7ebda4..00000000 --- a/internal/dbsqlc/river_leader.sql +++ /dev/null @@ -1,49 +0,0 @@ -CREATE UNLOGGED TABLE river_leader( - elected_at timestamptz NOT NULL, - expires_at timestamptz NOT NULL, - leader_id text NOT NULL, - name text PRIMARY KEY, - CONSTRAINT name_length CHECK (char_length(name) > 0 AND char_length(name) < 128), - CONSTRAINT leader_id_length CHECK (char_length(leader_id) > 0 AND char_length(leader_id) < 128) -); - --- name: LeadershipAttemptElect :execrows -INSERT INTO river_leader(name, leader_id, elected_at, expires_at) - VALUES (@name::text, @leader_id::text, now(), now() + @ttl::interval) -ON CONFLICT (name) - DO NOTHING; - --- name: LeadershipAttemptReelect :execrows -INSERT INTO river_leader(name, leader_id, elected_at, expires_at) - VALUES (@name::text, @leader_id::text, now(), now() + @ttl::interval) -ON CONFLICT (name) - DO UPDATE SET - expires_at = now() + @ttl::interval - WHERE - river_leader.leader_id = @leader_id::text; - --- name: LeadershipDeleteExpired :exec -DELETE FROM river_leader -WHERE name = @name::text - AND expires_at < now(); - --- name: LeadershipResign :exec -WITH currently_held_leaders AS ( - SELECT - * - FROM - river_leader - WHERE - name = @name::text - AND leader_id = @leader_id::text - FOR UPDATE -), -notified_resignations AS ( - SELECT - pg_notify(@leadership_topic, json_build_object('name', name, 'leader_id', leader_id, 'action', 'resigned')::text), - currently_held_leaders.name - FROM - currently_held_leaders) -DELETE FROM river_leader USING notified_resignations -WHERE river_leader.name = notified_resignations.name; - diff --git a/internal/dbsqlc/river_leader.sql.go b/internal/dbsqlc/river_leader.sql.go deleted file mode 100644 index 8f076d12..00000000 --- a/internal/dbsqlc/river_leader.sql.go +++ /dev/null @@ -1,100 +0,0 @@ -// Code generated by sqlc. DO NOT EDIT. -// versions: -// sqlc v1.24.0 -// source: river_leader.sql - -package dbsqlc - -import ( - "context" - - "time" -) - -const leadershipAttemptElect = `-- name: LeadershipAttemptElect :execrows -INSERT INTO river_leader(name, leader_id, elected_at, expires_at) - VALUES ($1::text, $2::text, now(), now() + $3::interval) -ON CONFLICT (name) - DO NOTHING -` - -type LeadershipAttemptElectParams struct { - Name string - LeaderID string - TTL time.Duration -} - -func (q *Queries) LeadershipAttemptElect(ctx context.Context, db DBTX, arg LeadershipAttemptElectParams) (int64, error) { - result, err := db.Exec(ctx, leadershipAttemptElect, arg.Name, arg.LeaderID, arg.TTL) - if err != nil { - return 0, err - } - return result.RowsAffected(), nil -} - -const leadershipAttemptReelect = `-- name: LeadershipAttemptReelect :execrows -INSERT INTO river_leader(name, leader_id, elected_at, expires_at) - VALUES ($1::text, $2::text, now(), now() + $3::interval) -ON CONFLICT (name) - DO UPDATE SET - expires_at = now() + $3::interval - WHERE - river_leader.leader_id = $2::text -` - -type LeadershipAttemptReelectParams struct { - Name string - LeaderID string - TTL time.Duration -} - -func (q *Queries) LeadershipAttemptReelect(ctx context.Context, db DBTX, arg LeadershipAttemptReelectParams) (int64, error) { - result, err := db.Exec(ctx, leadershipAttemptReelect, arg.Name, arg.LeaderID, arg.TTL) - if err != nil { - return 0, err - } - return result.RowsAffected(), nil -} - -const leadershipDeleteExpired = `-- name: LeadershipDeleteExpired :exec -DELETE FROM river_leader -WHERE name = $1::text - AND expires_at < now() -` - -func (q *Queries) LeadershipDeleteExpired(ctx context.Context, db DBTX, name string) error { - _, err := db.Exec(ctx, leadershipDeleteExpired, name) - return err -} - -const leadershipResign = `-- name: LeadershipResign :exec -WITH currently_held_leaders AS ( - SELECT - elected_at, expires_at, leader_id, name - FROM - river_leader - WHERE - name = $1::text - AND leader_id = $2::text - FOR UPDATE -), -notified_resignations AS ( - SELECT - pg_notify($3, json_build_object('name', name, 'leader_id', leader_id, 'action', 'resigned')::text), - currently_held_leaders.name - FROM - currently_held_leaders) -DELETE FROM river_leader USING notified_resignations -WHERE river_leader.name = notified_resignations.name -` - -type LeadershipResignParams struct { - Name string - LeaderID string - LeadershipTopic string -} - -func (q *Queries) LeadershipResign(ctx context.Context, db DBTX, arg LeadershipResignParams) error { - _, err := db.Exec(ctx, leadershipResign, arg.Name, arg.LeaderID, arg.LeadershipTopic) - return err -} diff --git a/internal/dbsqlc/sqlc.yaml b/internal/dbsqlc/sqlc.yaml deleted file mode 100644 index 04556ee7..00000000 --- a/internal/dbsqlc/sqlc.yaml +++ /dev/null @@ -1,54 +0,0 @@ -version: "2" -sql: - - engine: "postgresql" - queries: - - river_job.sql - - river_leader.sql - schema: - - river_job.sql - - river_leader.sql - gen: - go: - package: "dbsqlc" - sql_package: "pgx/v5" - out: "." - emit_exact_table_names: true - emit_methods_with_db_argument: true - emit_result_struct_pointers: true - - rename: - river_job_state: "JobState" - river_job_state_available: "JobStateAvailable" - river_job_state_cancelled: "JobStateCancelled" - river_job_state_completed: "JobStateCompleted" - river_job_state_discarded: "JobStateDiscarded" - river_job_state_retryable: "JobStateRetryable" - river_job_state_running: "JobStateRunning" - river_job_state_scheduled: "JobStateScheduled" - ttl: "TTL" - - overrides: - # broad types - - db_type: "pg_catalog.interval" - go_type: "time.Duration" - - db_type: "pg_catalog.interval" - # It seems like this could be the simpler `go_type: "*time.Duration"`, but - # that outputs double pointers like `**time.Duration` for reasons that are - # beyond me (bug?). The expanded version of `go_type` usage below works. - go_type: - import: "time" - type: "Duration" - pointer: true - nullable: true - - db_type: "timestamptz" - go_type: "time.Time" - - db_type: "timestamptz" - go_type: - type: "time.Time" - pointer: true - nullable: true - - # specific columns - - column: "river_job.errors" - go_type: - type: "[]AttemptError" diff --git a/internal/dbunique/db_unique.go b/internal/dbunique/db_unique.go new file mode 100644 index 00000000..7f3a44d6 --- /dev/null +++ b/internal/dbunique/db_unique.go @@ -0,0 +1,167 @@ +package dbunique + +import ( + "context" + "errors" + "fmt" + "slices" + "strings" + "time" + + "github.com/riverqueue/river/internal/baseservice" + "github.com/riverqueue/river/internal/util/hashutil" + "github.com/riverqueue/river/internal/util/sliceutil" + "github.com/riverqueue/river/riverdriver" + "github.com/riverqueue/river/rivertype" +) + +// When a job has specified unique options, but has not set the ByState +// parameter explicitly, this is the set of default states that are used to +// determine uniqueness. So for example, a new unique job may be inserted even +// if another job already exists, as long as that other job is set `cancelled` +// or `discarded`. +var defaultUniqueStates = []string{ //nolint:gochecknoglobals + string(rivertype.JobStateAvailable), + string(rivertype.JobStateCompleted), + string(rivertype.JobStateRunning), + string(rivertype.JobStateRetryable), + string(rivertype.JobStateScheduled), +} + +type UniqueOpts struct { + ByArgs bool + ByPeriod time.Duration + ByQueue bool + ByState []rivertype.JobState +} + +func (o *UniqueOpts) IsEmpty() bool { + return !o.ByArgs && + o.ByPeriod == time.Duration(0) && + !o.ByQueue && + o.ByState == nil +} + +type UniqueInserter struct { + baseservice.BaseService + AdvisoryLockPrefix int32 +} + +type JobInsertResult struct { + Job *rivertype.JobRow + UniqueSkippedAsDuplicate bool +} + +func (i *UniqueInserter) JobInsert(ctx context.Context, exec riverdriver.Executor, params *riverdriver.JobInsertParams, uniqueOpts *UniqueOpts) (*JobInsertResult, error) { + var execTx riverdriver.ExecutorTx + + if uniqueOpts != nil && !uniqueOpts.IsEmpty() { + // For uniqueness checks returns an advisory lock hash to use for lock, + // parameters to check for an existing unique job with the same + // properties, and a boolean indicating whether a uniqueness check + // should be performed at all (in some cases the check can be skipped if + // we can determine ahead of time that this insert will not violate + // uniqueness conditions). + buildUniqueParams := func() (*hashutil.AdvisoryLockHash, *riverdriver.JobGetByKindAndUniquePropertiesParams, bool) { + advisoryLockHash := hashutil.NewAdvisoryLockHash(i.AdvisoryLockPrefix) + advisoryLockHash.Write([]byte("unique_key")) + advisoryLockHash.Write([]byte("kind=" + params.Kind)) + + getParams := riverdriver.JobGetByKindAndUniquePropertiesParams{ + Kind: params.Kind, + } + + if uniqueOpts.ByArgs { + advisoryLockHash.Write([]byte("&args=")) + advisoryLockHash.Write(params.EncodedArgs) + + getParams.Args = params.EncodedArgs + getParams.ByArgs = true + } + + if uniqueOpts.ByPeriod != time.Duration(0) { + lowerPeriodBound := i.TimeNowUTC().Truncate(uniqueOpts.ByPeriod) + + advisoryLockHash.Write([]byte("&period=" + lowerPeriodBound.Format(time.RFC3339))) + + getParams.ByCreatedAt = true + getParams.CreatedAtStart = lowerPeriodBound + getParams.CreatedAtEnd = lowerPeriodBound.Add(uniqueOpts.ByPeriod) + } + + if uniqueOpts.ByQueue { + advisoryLockHash.Write([]byte("&queue=" + params.Queue)) + + getParams.ByQueue = true + getParams.Queue = params.Queue + } + + { + stateSet := defaultUniqueStates + if len(uniqueOpts.ByState) > 0 { + stateSet = sliceutil.Map(uniqueOpts.ByState, func(s rivertype.JobState) string { return string(s) }) + } + + advisoryLockHash.Write([]byte("&state=" + strings.Join(stateSet, ","))) + + if !slices.Contains(stateSet, string(params.State)) { + return nil, nil, false + } + + getParams.ByState = true + getParams.State = stateSet + } + + return advisoryLockHash, &getParams, true + } + + if advisoryLockHash, getParams, doUniquenessCheck := buildUniqueParams(); doUniquenessCheck { + // Begin a subtransaction + exec, err := exec.Begin(ctx) + if err != nil { + return nil, err + } + defer exec.Rollback(ctx) + + // Make the subtransaction available at function scope so it can be + // committed in cases where we insert a job. + execTx = exec + + // The wrapping transaction should maintain snapshot consistency + // even if we were to only have a SELECT + INSERT, but given that a + // conflict is possible, obtain an advisory lock based on the + // parameters of the unique job first, and have contending inserts + // wait for it. This is a synchronous lock so we rely on context + // timeout in case something goes wrong and it's blocking for too + // long. + if _, err := exec.PGAdvisoryXactLock(ctx, advisoryLockHash.Key()); err != nil { + return nil, fmt.Errorf("error acquiring unique lock: %w", err) + } + + existing, err := exec.JobGetByKindAndUniqueProperties(ctx, getParams) + if err != nil { + if !errors.Is(err, rivertype.ErrNotFound) { + return nil, fmt.Errorf("error getting unique job: %w", err) + } + } + + if existing != nil { + // Insert skipped; returns an existing row. + return &JobInsertResult{Job: existing, UniqueSkippedAsDuplicate: true}, nil + } + } + } + + jobRow, err := exec.JobInsert(ctx, params) + if err != nil { + return nil, err + } + + if execTx != nil { + if err := execTx.Commit(ctx); err != nil { + return nil, err + } + } + + return &JobInsertResult{Job: jobRow}, nil +} diff --git a/internal/dbunique/db_unique_test.go b/internal/dbunique/db_unique_test.go new file mode 100644 index 00000000..f39bf441 --- /dev/null +++ b/internal/dbunique/db_unique_test.go @@ -0,0 +1,492 @@ +package dbunique + +import ( + "context" + "runtime" + "sync" + "testing" + "time" + + "github.com/jackc/pgx/v5" + "github.com/stretchr/testify/require" + + "github.com/riverqueue/river/internal/baseservice" + "github.com/riverqueue/river/internal/rivercommon" + "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/internal/util/dbutil" + "github.com/riverqueue/river/internal/util/ptrutil" + "github.com/riverqueue/river/riverdriver" + "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivertype" +) + +func TestUniqueInserter_JobInsert(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + type testBundle struct { + baselineTime time.Time // baseline time frozen at now when setup is called + driver riverdriver.Driver[pgx.Tx] + exec riverdriver.Executor + tx pgx.Tx + } + + setup := func(t *testing.T) (*UniqueInserter, *testBundle) { + t.Helper() + + var ( + driver = riverpgxv5.New(nil) + tx = riverinternaltest.TestTx(ctx, t) + ) + + bundle := &testBundle{ + baselineTime: time.Now(), + driver: driver, + exec: driver.UnwrapExecutor(tx), + tx: tx, + } + + inserter := baseservice.Init(riverinternaltest.BaseServiceArchetype(t), &UniqueInserter{}) + inserter.TimeNowUTC = func() time.Time { return bundle.baselineTime } + + return inserter, bundle + } + + makeInsertParams := func() *riverdriver.JobInsertParams { + return &riverdriver.JobInsertParams{ + EncodedArgs: []byte(`{}`), + Kind: "fake_job", + MaxAttempts: rivercommon.MaxAttemptsDefault, + Metadata: []byte(`{}`), + Priority: rivercommon.PriorityDefault, + Queue: rivercommon.QueueDefault, + ScheduledAt: nil, + State: rivertype.JobStateAvailable, + } + } + + t.Run("Success", func(t *testing.T) { + t.Parallel() + + inserter, bundle := setup(t) + + insertParams := makeInsertParams() + res, err := inserter.JobInsert(ctx, bundle.exec, insertParams, nil) + require.NoError(t, err) + + // Sanity check, following assertion depends on this: + require.Nil(t, insertParams.ScheduledAt) + + require.Greater(t, res.Job.ID, int64(0), "expected job ID to be set, got %d", res.Job.ID) + require.JSONEq(t, string(insertParams.EncodedArgs), string(res.Job.EncodedArgs)) + require.Equal(t, 0, res.Job.Attempt) + require.Nil(t, res.Job.AttemptedAt) + require.Empty(t, res.Job.AttemptedBy) + require.WithinDuration(t, time.Now(), res.Job.CreatedAt, 2*time.Second) + require.Empty(t, res.Job.Errors) + require.Nil(t, res.Job.FinalizedAt) + require.Equal(t, insertParams.Kind, res.Job.Kind) + require.Equal(t, insertParams.MaxAttempts, res.Job.MaxAttempts) + require.Equal(t, insertParams.Metadata, res.Job.Metadata) + require.Equal(t, insertParams.Priority, res.Job.Priority) + require.Equal(t, insertParams.Queue, res.Job.Queue) + require.Equal(t, rivertype.JobStateAvailable, res.Job.State) + require.WithinDuration(t, time.Now(), res.Job.ScheduledAt, 2*time.Second) + require.Empty(t, res.Job.Tags) + }) + + t.Run("InsertAndFetch", func(t *testing.T) { + t.Parallel() + + inserter, bundle := setup(t) + + const maxJobsToFetch = 8 + + res, err := inserter.JobInsert(ctx, bundle.exec, makeInsertParams(), nil) + require.NoError(t, err) + require.NotEqual(t, 0, res.Job.ID, "expected job ID to be set, got %d", res.Job.ID) + require.WithinDuration(t, time.Now(), res.Job.ScheduledAt, 1*time.Second) + + jobs, err := bundle.exec.JobGetAvailable(ctx, &riverdriver.JobGetAvailableParams{ + AttemptedBy: "test-id", + Max: maxJobsToFetch, + Queue: rivercommon.QueueDefault, + }) + require.NoError(t, err) + require.Len(t, jobs, 1, + "inserted 1 job but fetched %d jobs:\n%+v", len(jobs), jobs) + require.Equal(t, rivertype.JobStateRunning, jobs[0].State, + "expected selected job to be in running state, got %q", jobs[0].State) + + for i := 1; i < 10; i++ { + _, err := inserter.JobInsert(ctx, bundle.exec, makeInsertParams(), nil) + require.NoError(t, err) + } + + jobs, err = bundle.exec.JobGetAvailable(ctx, &riverdriver.JobGetAvailableParams{ + AttemptedBy: "test-id", + Max: maxJobsToFetch, + Queue: rivercommon.QueueDefault, + }) + require.NoError(t, err) + require.Len(t, jobs, maxJobsToFetch, + "inserted 9 more jobs and expected to fetch max of %d jobs but fetched %d jobs:\n%+v", maxJobsToFetch, len(jobs), jobs) + for _, j := range jobs { + require.Equal(t, rivertype.JobStateRunning, j.State, + "expected selected job to be in running state, got %q", j.State) + } + + jobs, err = bundle.exec.JobGetAvailable(ctx, &riverdriver.JobGetAvailableParams{ + AttemptedBy: "test-id", + Max: maxJobsToFetch, + Queue: rivercommon.QueueDefault, + }) + require.NoError(t, err) + require.Len(t, jobs, 1, + "expected to fetch 1 remaining job but fetched %d jobs:\n%+v", len(jobs), jobs) + }) + + t.Run("UniqueJobByArgs", func(t *testing.T) { + t.Parallel() + + inserter, bundle := setup(t) + + insertParams := makeInsertParams() + uniqueOpts := &UniqueOpts{ + ByArgs: true, + } + + res0, err := inserter.JobInsert(ctx, bundle.exec, insertParams, uniqueOpts) + require.NoError(t, err) + require.False(t, res0.UniqueSkippedAsDuplicate) + + // Insert a second job with the same args, but expect that the same job + // ID to come back because we're still within its unique parameters. + res1, err := inserter.JobInsert(ctx, bundle.exec, insertParams, uniqueOpts) + require.NoError(t, err) + require.Equal(t, res0.Job.ID, res1.Job.ID) + require.True(t, res1.UniqueSkippedAsDuplicate) + + insertParams.EncodedArgs = []byte(`{"key":"different"}`) + + // Same operation again, except that because we've modified the unique + // dimension, another job is allowed to be queued, so the new ID is + // not the same. + res2, err := inserter.JobInsert(ctx, bundle.exec, insertParams, uniqueOpts) + require.NoError(t, err) + require.NotEqual(t, res0.Job.ID, res2.Job.ID) + require.False(t, res2.UniqueSkippedAsDuplicate) + }) + + t.Run("UniqueJobByPeriod", func(t *testing.T) { + t.Parallel() + + inserter, bundle := setup(t) + + insertParams := makeInsertParams() + uniqueOpts := &UniqueOpts{ + ByPeriod: 15 * time.Minute, + } + + res0, err := inserter.JobInsert(ctx, bundle.exec, insertParams, uniqueOpts) + require.NoError(t, err) + require.False(t, res0.UniqueSkippedAsDuplicate) + + // Insert a second job with the same args, but expect that the same job + // ID to come back because we're still within its unique parameters. + res1, err := inserter.JobInsert(ctx, bundle.exec, insertParams, uniqueOpts) + require.NoError(t, err) + require.Equal(t, res0.Job.ID, res1.Job.ID) + require.True(t, res1.UniqueSkippedAsDuplicate) + + inserter.TimeNowUTC = func() time.Time { return bundle.baselineTime.Add(uniqueOpts.ByPeriod).Add(1 * time.Second) } + + // Same operation again, except that because we've advanced time passed + // the period within unique bounds, another job is allowed to be queued, + // so the new ID is not the same. + res2, err := inserter.JobInsert(ctx, bundle.exec, insertParams, uniqueOpts) + require.NoError(t, err) + require.NotEqual(t, res0.Job.ID, res2.Job.ID) + require.False(t, res2.UniqueSkippedAsDuplicate) + }) + + t.Run("UniqueJobByQueue", func(t *testing.T) { + t.Parallel() + + inserter, bundle := setup(t) + + insertParams := makeInsertParams() + uniqueOpts := &UniqueOpts{ + ByQueue: true, + } + + res0, err := inserter.JobInsert(ctx, bundle.exec, insertParams, uniqueOpts) + require.NoError(t, err) + require.False(t, res0.UniqueSkippedAsDuplicate) + + // Insert a second job with the same args, but expect that the same job + // ID to come back because we're still within its unique parameters. + res1, err := inserter.JobInsert(ctx, bundle.exec, insertParams, uniqueOpts) + require.NoError(t, err) + require.Equal(t, res0.Job.ID, res1.Job.ID) + require.True(t, res1.UniqueSkippedAsDuplicate) + + insertParams.Queue = "alternate_queue" + + // Same operation again, except that because we've modified the unique + // dimension, another job is allowed to be queued, so the new ID is + // not the same. + res2, err := inserter.JobInsert(ctx, bundle.exec, insertParams, uniqueOpts) + require.NoError(t, err) + require.NotEqual(t, res0.Job.ID, res2.Job.ID) + require.False(t, res2.UniqueSkippedAsDuplicate) + }) + + t.Run("UniqueJobByState", func(t *testing.T) { + t.Parallel() + + inserter, bundle := setup(t) + + insertParams := makeInsertParams() + uniqueOpts := &UniqueOpts{ + ByState: []rivertype.JobState{rivertype.JobStateAvailable, rivertype.JobStateRunning}, + } + + res0, err := inserter.JobInsert(ctx, bundle.exec, insertParams, uniqueOpts) + require.NoError(t, err) + require.False(t, res0.UniqueSkippedAsDuplicate) + + // Insert a second job with the same args, but expect that the same job + // ID to come back because we're still within its unique parameters. + res1, err := inserter.JobInsert(ctx, bundle.exec, insertParams, uniqueOpts) + require.NoError(t, err) + require.Equal(t, res0.Job.ID, res1.Job.ID) + require.True(t, res1.UniqueSkippedAsDuplicate) + + // A new job is allowed if we're inserting the job with a state that's + // not included in the unique state set. + { + insertParams := *insertParams // dup + insertParams.State = rivertype.JobStateCompleted + + res2, err := inserter.JobInsert(ctx, bundle.exec, &insertParams, uniqueOpts) + require.NoError(t, err) + require.NotEqual(t, res0.Job.ID, res2.Job.ID) + require.False(t, res2.UniqueSkippedAsDuplicate) + } + + // A new job is also allowed if the state of the originally inserted job + // changes to one that's not included in the unique state set. + { + _, err := bundle.exec.JobUpdate(ctx, &riverdriver.JobUpdateParams{ + ID: res0.Job.ID, + StateDoUpdate: true, + State: rivertype.JobStateCompleted, + }) + require.NoError(t, err) + + res2, err := inserter.JobInsert(ctx, bundle.exec, insertParams, uniqueOpts) + require.NoError(t, err) + require.NotEqual(t, res0.Job.ID, res2.Job.ID) + require.False(t, res2.UniqueSkippedAsDuplicate) + } + }) + + // Unlike other unique options, state gets a default set when it's not + // supplied. This test case checks that the default is working as expected. + t.Run("UniqueJobByDefaultState", func(t *testing.T) { + t.Parallel() + + inserter, bundle := setup(t) + + insertParams := makeInsertParams() + uniqueOpts := &UniqueOpts{ + ByQueue: true, + } + + res0, err := inserter.JobInsert(ctx, bundle.exec, insertParams, uniqueOpts) + require.NoError(t, err) + require.False(t, res0.UniqueSkippedAsDuplicate) + + // Insert a second job with the same args, but expect that the same job + // ID to come back because we're still within its unique parameters. + res1, err := inserter.JobInsert(ctx, bundle.exec, insertParams, uniqueOpts) + require.NoError(t, err) + require.Equal(t, res0.Job.ID, res1.Job.ID) + require.True(t, res1.UniqueSkippedAsDuplicate) + + // Test all the other default unique states (see `defaultUniqueStates`) + // to make sure that in each case an inserted job still counts as a + // duplicate. The only state we don't test is `available` because that's + // already been done above. + for _, defaultState := range []rivertype.JobState{ + rivertype.JobStateCompleted, + rivertype.JobStateRunning, + rivertype.JobStateRetryable, + rivertype.JobStateScheduled, + } { + var finalizedAt *time.Time + if defaultState == rivertype.JobStateCompleted { + finalizedAt = ptrutil.Ptr(bundle.baselineTime) + } + + _, err = bundle.exec.JobUpdate(ctx, &riverdriver.JobUpdateParams{ + ID: res0.Job.ID, + FinalizedAtDoUpdate: true, + FinalizedAt: finalizedAt, + StateDoUpdate: true, + State: defaultState, + }) + require.NoError(t, err) + + // Still counts as a duplicate. + res1, err := inserter.JobInsert(ctx, bundle.exec, insertParams, uniqueOpts) + require.NoError(t, err) + require.Equal(t, res0.Job.ID, res1.Job.ID) + require.True(t, res1.UniqueSkippedAsDuplicate) + } + + _, err = bundle.exec.JobUpdate(ctx, &riverdriver.JobUpdateParams{ + ID: res0.Job.ID, + FinalizedAtDoUpdate: true, + FinalizedAt: ptrutil.Ptr(bundle.baselineTime), + StateDoUpdate: true, + State: rivertype.JobStateDiscarded, + }) + require.NoError(t, err) + + // Uniqueness includes a default set of states, so by moving the + // original job to "discarded", we're now allowed to insert a new job + // again, despite not having explicitly set the `ByState` option. + res2, err := inserter.JobInsert(ctx, bundle.exec, insertParams, uniqueOpts) + require.NoError(t, err) + require.NotEqual(t, res0.Job.ID, res2.Job.ID) + require.False(t, res2.UniqueSkippedAsDuplicate) + }) + + t.Run("UniqueJobAllOptions", func(t *testing.T) { + t.Parallel() + + inserter, bundle := setup(t) + + insertParams := makeInsertParams() + uniqueOpts := &UniqueOpts{ + ByArgs: true, + ByPeriod: 15 * time.Minute, + ByQueue: true, + ByState: []rivertype.JobState{rivertype.JobStateAvailable, rivertype.JobStateRunning}, + } + + res0, err := inserter.JobInsert(ctx, bundle.exec, insertParams, uniqueOpts) + require.NoError(t, err) + require.False(t, res0.UniqueSkippedAsDuplicate) + + // Insert a second job with the same args, but expect that the same job + // ID to come back because we're still within its unique parameters. + res1, err := inserter.JobInsert(ctx, bundle.exec, insertParams, uniqueOpts) + require.NoError(t, err) + require.Equal(t, res0.Job.ID, res1.Job.ID) + require.True(t, res1.UniqueSkippedAsDuplicate) + + // With args modified + { + insertParams := *insertParams // dup + insertParams.EncodedArgs = []byte(`{"key":"different"}`) + + // New job because a unique dimension has changed. + res2, err := inserter.JobInsert(ctx, bundle.exec, &insertParams, uniqueOpts) + require.NoError(t, err) + require.NotEqual(t, res0.Job.ID, res2.Job.ID) + require.False(t, res2.UniqueSkippedAsDuplicate) + } + + // With period modified + { + insertParams := *insertParams // dup + inserter.TimeNowUTC = func() time.Time { return bundle.baselineTime.Add(uniqueOpts.ByPeriod).Add(1 * time.Second) } + + // New job because a unique dimension has changed. + res2, err := inserter.JobInsert(ctx, bundle.exec, &insertParams, uniqueOpts) + require.NoError(t, err) + require.NotEqual(t, res0.Job.ID, res2.Job.ID) + require.False(t, res2.UniqueSkippedAsDuplicate) + + // Make sure to change timeNow back + inserter.TimeNowUTC = func() time.Time { return bundle.baselineTime } + } + + // With queue modified + { + insertParams := *insertParams // dup + insertParams.Queue = "alternate_queue" + + // New job because a unique dimension has changed. + res2, err := inserter.JobInsert(ctx, bundle.exec, &insertParams, uniqueOpts) + require.NoError(t, err) + require.NotEqual(t, res0.Job.ID, res2.Job.ID) + require.False(t, res2.UniqueSkippedAsDuplicate) + } + + // With state modified + { + insertParams := *insertParams // dup + insertParams.State = rivertype.JobStateCompleted + + // New job because a unique dimension has changed. + res2, err := inserter.JobInsert(ctx, bundle.exec, &insertParams, uniqueOpts) + require.NoError(t, err) + require.NotEqual(t, res0.Job.ID, res2.Job.ID) + require.False(t, res2.UniqueSkippedAsDuplicate) + } + }) + + t.Run("UniqueJobContention", func(t *testing.T) { + t.Parallel() + + inserter, bundle := setup(t) + require.NoError(t, bundle.tx.Rollback(ctx)) + bundle.driver = riverpgxv5.New(riverinternaltest.TestDB(ctx, t)) + bundle.exec = bundle.driver.GetExecutor() + + insertParams := makeInsertParams() + uniqueOpts := &UniqueOpts{ + ByPeriod: 15 * time.Minute, + } + + var ( + numContendingJobs = runtime.NumCPU() // max allowed test manager connections + insertedJobs = make([]*rivertype.JobRow, numContendingJobs) + insertedJobsMu sync.Mutex + wg sync.WaitGroup + ) + + for i := 0; i < numContendingJobs; i++ { + jobNum := i + wg.Add(1) + + go func() { + _, err := dbutil.WithTxV(ctx, bundle.exec, func(ctx context.Context, exec riverdriver.ExecutorTx) (struct{}, error) { + res, err := inserter.JobInsert(ctx, exec, insertParams, uniqueOpts) + require.NoError(t, err) + + insertedJobsMu.Lock() + insertedJobs[jobNum] = res.Job + insertedJobsMu.Unlock() + + return struct{}{}, nil + }) + require.NoError(t, err) + + wg.Done() + }() + } + + wg.Wait() + + firstJobID := insertedJobs[0].ID + for i := 1; i < numContendingJobs; i++ { + require.Equal(t, firstJobID, insertedJobs[i].ID) + } + }) +} diff --git a/internal/dbadapter/main_test.go b/internal/dbunique/main_test.go similarity index 89% rename from internal/dbadapter/main_test.go rename to internal/dbunique/main_test.go index 86a64005..5b626da1 100644 --- a/internal/dbadapter/main_test.go +++ b/internal/dbunique/main_test.go @@ -1,4 +1,4 @@ -package dbadapter +package dbunique import ( "testing" diff --git a/internal/jobcompleter/job_completer.go b/internal/jobcompleter/job_completer.go index 4eb35455..17d227df 100644 --- a/internal/jobcompleter/job_completer.go +++ b/internal/jobcompleter/job_completer.go @@ -9,16 +9,16 @@ import ( "golang.org/x/sync/errgroup" "github.com/riverqueue/river/internal/baseservice" - "github.com/riverqueue/river/internal/dbadapter" - "github.com/riverqueue/river/internal/dbsqlc" "github.com/riverqueue/river/internal/jobstats" "github.com/riverqueue/river/internal/util/timeutil" + "github.com/riverqueue/river/riverdriver" + "github.com/riverqueue/river/rivertype" ) type JobCompleter interface { // JobSetState sets a new state for the given job, as long as it's // still running (i.e. its state has not changed to something else already). - JobSetStateIfRunning(stats *jobstats.JobStatistics, params *dbadapter.JobSetStateIfRunningParams) error + JobSetStateIfRunning(stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error // Subscribe injects a callback which will be invoked whenever a job is // updated. @@ -30,14 +30,21 @@ type JobCompleter interface { } type CompleterJobUpdated struct { - Job *dbsqlc.RiverJob + Job *rivertype.JobRow JobStats *jobstats.JobStatistics } +// PartialExecutor is always a riverdriver.Executor under normal circumstances, +// but is a minimal interface with the functions needed for completers to work +// to more easily facilitate mocking. +type PartialExecutor interface { + JobSetStateIfRunning(ctx context.Context, params *riverdriver.JobSetStateIfRunningParams) (*rivertype.JobRow, error) +} + type InlineJobCompleter struct { baseservice.BaseService - adapter dbadapter.Adapter + exec PartialExecutor subscribeFunc func(update CompleterJobUpdated) subscribeFuncMu sync.Mutex @@ -49,15 +56,15 @@ type InlineJobCompleter struct { wg sync.WaitGroup } -func NewInlineCompleter(archetype *baseservice.Archetype, adapter dbadapter.Adapter) *InlineJobCompleter { +func NewInlineCompleter(archetype *baseservice.Archetype, exec PartialExecutor) *InlineJobCompleter { return baseservice.Init(archetype, &InlineJobCompleter{ - adapter: adapter, + exec: exec, }) } -func (c *InlineJobCompleter) JobSetStateIfRunning(stats *jobstats.JobStatistics, params *dbadapter.JobSetStateIfRunningParams) error { - return c.doOperation(stats, func(ctx context.Context) (*dbsqlc.RiverJob, error) { - return c.adapter.JobSetStateIfRunning(ctx, params) +func (c *InlineJobCompleter) JobSetStateIfRunning(stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error { + return c.doOperation(stats, func(ctx context.Context) (*rivertype.JobRow, error) { + return c.exec.JobSetStateIfRunning(ctx, params) }) } @@ -72,7 +79,7 @@ func (c *InlineJobCompleter) Wait() { c.wg.Wait() } -func (c *InlineJobCompleter) doOperation(stats *jobstats.JobStatistics, f func(ctx context.Context) (*dbsqlc.RiverJob, error)) error { +func (c *InlineJobCompleter) doOperation(stats *jobstats.JobStatistics, f func(ctx context.Context) (*rivertype.JobRow, error)) error { c.wg.Add(1) defer c.wg.Done() @@ -100,32 +107,32 @@ func (c *InlineJobCompleter) doOperation(stats *jobstats.JobStatistics, f func(c type AsyncJobCompleter struct { baseservice.BaseService - adapter dbadapter.Adapter concurrency uint32 + exec PartialExecutor eg *errgroup.Group subscribeFunc func(update CompleterJobUpdated) subscribeFuncMu sync.Mutex } -func NewAsyncCompleter(archetype *baseservice.Archetype, adapter dbadapter.Adapter, concurrency uint32) *AsyncJobCompleter { +func NewAsyncCompleter(archetype *baseservice.Archetype, exec PartialExecutor, concurrency uint32) *AsyncJobCompleter { eg := &errgroup.Group{} // TODO: int concurrency may feel more natural than uint32 eg.SetLimit(int(concurrency)) return baseservice.Init(archetype, &AsyncJobCompleter{ - adapter: adapter, + exec: exec, concurrency: concurrency, eg: eg, }) } -func (c *AsyncJobCompleter) JobSetStateIfRunning(stats *jobstats.JobStatistics, params *dbadapter.JobSetStateIfRunningParams) error { - return c.doOperation(stats, func(ctx context.Context) (*dbsqlc.RiverJob, error) { - return c.adapter.JobSetStateIfRunning(ctx, params) +func (c *AsyncJobCompleter) JobSetStateIfRunning(stats *jobstats.JobStatistics, params *riverdriver.JobSetStateIfRunningParams) error { + return c.doOperation(stats, func(ctx context.Context) (*rivertype.JobRow, error) { + return c.exec.JobSetStateIfRunning(ctx, params) }) } -func (c *AsyncJobCompleter) doOperation(stats *jobstats.JobStatistics, f func(ctx context.Context) (*dbsqlc.RiverJob, error)) error { +func (c *AsyncJobCompleter) doOperation(stats *jobstats.JobStatistics, f func(ctx context.Context) (*rivertype.JobRow, error)) error { c.eg.Go(func() error { start := c.TimeNowUTC() @@ -167,7 +174,7 @@ func (c *AsyncJobCompleter) Wait() { // may want to rethink these numbers and strategy. const numRetries = 3 -func withRetries(c *baseservice.BaseService, f func(ctx context.Context) (*dbsqlc.RiverJob, error)) (*dbsqlc.RiverJob, error) { //nolint:varnamelen +func withRetries(c *baseservice.BaseService, f func(ctx context.Context) (*rivertype.JobRow, error)) (*rivertype.JobRow, error) { //nolint:varnamelen retrySecondsWithoutJitter := func(attempt int) float64 { // Uses a different algorithm (2 ** N) compared to retry policies (4 ** // N) so we can get more retries sooner: 1, 2, 4, 8 @@ -183,7 +190,7 @@ func withRetries(c *baseservice.BaseService, f func(ctx context.Context) (*dbsql return retrySeconds } - tryOnce := func() (*dbsqlc.RiverJob, error) { + tryOnce := func() (*rivertype.JobRow, error) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 5*time.Second) diff --git a/internal/jobcompleter/job_completer_test.go b/internal/jobcompleter/job_completer_test.go index abd3b390..c6fbbda1 100644 --- a/internal/jobcompleter/job_completer_test.go +++ b/internal/jobcompleter/job_completer_test.go @@ -3,25 +3,39 @@ package jobcompleter import ( "context" "errors" + "sync" "testing" "time" "github.com/stretchr/testify/require" - "github.com/riverqueue/river/internal/dbadapter" - "github.com/riverqueue/river/internal/dbadaptertest" - "github.com/riverqueue/river/internal/dbsqlc" "github.com/riverqueue/river/internal/jobstats" "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdriver" + "github.com/riverqueue/river/rivertype" ) +type executorMock struct { + JobSetStateIfRunningCalled bool + JobSetStateIfRunningFunc func(ctx context.Context, params *riverdriver.JobSetStateIfRunningParams) (*rivertype.JobRow, error) + mu sync.Mutex +} + +func (m *executorMock) JobSetStateIfRunning(ctx context.Context, params *riverdriver.JobSetStateIfRunningParams) (*rivertype.JobRow, error) { + m.mu.Lock() + m.JobSetStateIfRunningCalled = true + m.mu.Unlock() + + return m.JobSetStateIfRunningFunc(ctx, params) +} + func TestInlineJobCompleter_Complete(t *testing.T) { t.Parallel() var attempt int expectedErr := errors.New("an error from the completer") - adapter := &dbadaptertest.TestAdapter{ - JobSetStateIfRunningFunc: func(ctx context.Context, params *dbadapter.JobSetStateIfRunningParams) (*dbsqlc.RiverJob, error) { + adapter := &executorMock{ + JobSetStateIfRunningFunc: func(ctx context.Context, params *riverdriver.JobSetStateIfRunningParams) (*rivertype.JobRow, error) { require.Equal(t, int64(1), params.ID) attempt++ return nil, expectedErr @@ -31,7 +45,7 @@ func TestInlineJobCompleter_Complete(t *testing.T) { completer := NewInlineCompleter(riverinternaltest.BaseServiceArchetype(t), adapter) t.Cleanup(completer.Wait) - err := completer.JobSetStateIfRunning(&jobstats.JobStatistics{}, dbadapter.JobSetStateCompleted(1, time.Now())) + err := completer.JobSetStateIfRunning(&jobstats.JobStatistics{}, riverdriver.JobSetStateCompleted(1, time.Now())) if !errors.Is(err, expectedErr) { t.Errorf("expected %v, got %v", expectedErr, err) } @@ -43,16 +57,16 @@ func TestInlineJobCompleter_Complete(t *testing.T) { func TestInlineJobCompleter_Subscribe(t *testing.T) { t.Parallel() - testCompleterSubscribe(t, func(a dbadapter.Adapter) JobCompleter { - return NewInlineCompleter(riverinternaltest.BaseServiceArchetype(t), a) + testCompleterSubscribe(t, func(exec PartialExecutor) JobCompleter { + return NewInlineCompleter(riverinternaltest.BaseServiceArchetype(t), exec) }) } func TestInlineJobCompleter_Wait(t *testing.T) { t.Parallel() - testCompleterWait(t, func(a dbadapter.Adapter) JobCompleter { - return NewInlineCompleter(riverinternaltest.BaseServiceArchetype(t), a) + testCompleterWait(t, func(exec PartialExecutor) JobCompleter { + return NewInlineCompleter(riverinternaltest.BaseServiceArchetype(t), exec) }) } @@ -75,8 +89,8 @@ func TestAsyncJobCompleter_Complete(t *testing.T) { resultCh <- expectedErr }() - adapter := &dbadaptertest.TestAdapter{ - JobSetStateIfRunningFunc: func(ctx context.Context, params *dbadapter.JobSetStateIfRunningParams) (*dbsqlc.RiverJob, error) { + adapter := &executorMock{ + JobSetStateIfRunningFunc: func(ctx context.Context, params *riverdriver.JobSetStateIfRunningParams) (*rivertype.JobRow, error) { inputCh <- jobInput{ctx: ctx, jobID: params.ID} err := <-resultCh return nil, err @@ -87,14 +101,14 @@ func TestAsyncJobCompleter_Complete(t *testing.T) { // launch 4 completions, only 2 can be inline due to the concurrency limit: for i := int64(0); i < 2; i++ { - if err := completer.JobSetStateIfRunning(&jobstats.JobStatistics{}, dbadapter.JobSetStateCompleted(i, time.Now())); err != nil { + if err := completer.JobSetStateIfRunning(&jobstats.JobStatistics{}, riverdriver.JobSetStateCompleted(i, time.Now())); err != nil { t.Errorf("expected nil err, got %v", err) } } bgCompletionsStarted := make(chan struct{}) go func() { for i := int64(2); i < 4; i++ { - if err := completer.JobSetStateIfRunning(&jobstats.JobStatistics{}, dbadapter.JobSetStateCompleted(i, time.Now())); err != nil { + if err := completer.JobSetStateIfRunning(&jobstats.JobStatistics{}, riverdriver.JobSetStateCompleted(i, time.Now())); err != nil { t.Errorf("expected nil err, got %v", err) } } @@ -144,31 +158,31 @@ func TestAsyncJobCompleter_Complete(t *testing.T) { func TestAsyncJobCompleter_Subscribe(t *testing.T) { t.Parallel() - testCompleterSubscribe(t, func(a dbadapter.Adapter) JobCompleter { - return NewAsyncCompleter(riverinternaltest.BaseServiceArchetype(t), a, 4) + testCompleterSubscribe(t, func(exec PartialExecutor) JobCompleter { + return NewAsyncCompleter(riverinternaltest.BaseServiceArchetype(t), exec, 4) }) } func TestAsyncJobCompleter_Wait(t *testing.T) { t.Parallel() - testCompleterWait(t, func(a dbadapter.Adapter) JobCompleter { - return NewAsyncCompleter(riverinternaltest.BaseServiceArchetype(t), a, 4) + testCompleterWait(t, func(exec PartialExecutor) JobCompleter { + return NewAsyncCompleter(riverinternaltest.BaseServiceArchetype(t), exec, 4) }) } -func testCompleterSubscribe(t *testing.T, constructor func(dbadapter.Adapter) JobCompleter) { +func testCompleterSubscribe(t *testing.T, constructor func(PartialExecutor) JobCompleter) { t.Helper() - adapter := &dbadaptertest.TestAdapter{ - JobSetStateIfRunningFunc: func(ctx context.Context, params *dbadapter.JobSetStateIfRunningParams) (*dbsqlc.RiverJob, error) { - return &dbsqlc.RiverJob{ - State: dbsqlc.JobStateCompleted, + exec := &executorMock{ + JobSetStateIfRunningFunc: func(ctx context.Context, params *riverdriver.JobSetStateIfRunningParams) (*rivertype.JobRow, error) { + return &rivertype.JobRow{ + State: rivertype.JobStateCompleted, }, nil }, } - completer := constructor(adapter) + completer := constructor(exec) jobUpdates := make(chan CompleterJobUpdated, 10) completer.Subscribe(func(update CompleterJobUpdated) { @@ -176,37 +190,37 @@ func testCompleterSubscribe(t *testing.T, constructor func(dbadapter.Adapter) Jo }) for i := 0; i < 4; i++ { - require.NoError(t, completer.JobSetStateIfRunning(&jobstats.JobStatistics{}, dbadapter.JobSetStateCompleted(int64(i), time.Now()))) + require.NoError(t, completer.JobSetStateIfRunning(&jobstats.JobStatistics{}, riverdriver.JobSetStateCompleted(int64(i), time.Now()))) } completer.Wait() updates := riverinternaltest.WaitOrTimeoutN(t, jobUpdates, 4) for i := 0; i < 4; i++ { - require.Equal(t, dbsqlc.JobStateCompleted, updates[0].Job.State) + require.Equal(t, rivertype.JobStateCompleted, updates[0].Job.State) } } -func testCompleterWait(t *testing.T, constructor func(dbadapter.Adapter) JobCompleter) { +func testCompleterWait(t *testing.T, constructor func(PartialExecutor) JobCompleter) { t.Helper() resultCh := make(chan error) completeStartedCh := make(chan struct{}) - adapter := &dbadaptertest.TestAdapter{ - JobSetStateIfRunningFunc: func(ctx context.Context, params *dbadapter.JobSetStateIfRunningParams) (*dbsqlc.RiverJob, error) { + exec := &executorMock{ + JobSetStateIfRunningFunc: func(ctx context.Context, params *riverdriver.JobSetStateIfRunningParams) (*rivertype.JobRow, error) { completeStartedCh <- struct{}{} err := <-resultCh return nil, err }, } - completer := constructor(adapter) + completer := constructor(exec) // launch 4 completions: for i := 0; i < 4; i++ { i := i go func() { - require.NoError(t, completer.JobSetStateIfRunning(&jobstats.JobStatistics{}, dbadapter.JobSetStateCompleted(int64(i), time.Now()))) + require.NoError(t, completer.JobSetStateIfRunning(&jobstats.JobStatistics{}, riverdriver.JobSetStateCompleted(int64(i), time.Now()))) }() <-completeStartedCh // wait for func to actually start } diff --git a/internal/leadership/elector.go b/internal/leadership/elector.go index e8e51646..5a10e09e 100644 --- a/internal/leadership/elector.go +++ b/internal/leadership/elector.go @@ -8,8 +8,9 @@ import ( "sync" "time" - "github.com/riverqueue/river/internal/dbadapter" "github.com/riverqueue/river/internal/notifier" + "github.com/riverqueue/river/internal/util/dbutil" + "github.com/riverqueue/river/riverdriver" ) type pgNotification struct { @@ -42,7 +43,7 @@ func (s *Subscription) Unlisten() { } type Elector struct { - adapter dbadapter.Adapter + exec riverdriver.Executor id string interval time.Duration name string @@ -57,10 +58,10 @@ type Elector struct { // NewElector returns an Elector using the given adapter. The name should correspond // to the name of the database + schema combo and should be shared across all Clients // running with that combination. The id should be unique to the Client. -func NewElector(adapter dbadapter.Adapter, notifier *notifier.Notifier, name, id string, interval time.Duration, logger *slog.Logger) (*Elector, error) { +func NewElector(exec riverdriver.Executor, notifier *notifier.Notifier, name, id string, interval time.Duration, logger *slog.Logger) (*Elector, error) { // TODO: validate name + id length/format, interval, etc return &Elector{ - adapter: adapter, + exec: exec, id: id, interval: interval, name: name, @@ -163,7 +164,11 @@ func (e *Elector) gainLeadership(ctx context.Context, leadershipNotificationChan } func (e *Elector) attemptElect(ctx context.Context) (bool, error) { - elected, err := e.adapter.LeadershipAttemptElect(ctx, false, e.name, e.id, e.interval) + elected, err := LeaderAttemptElect(ctx, e.exec, false, &riverdriver.LeaderElectParams{ + LeaderID: e.id, + Name: e.name, + TTL: e.interval, + }) if err != nil { return false, err } @@ -189,7 +194,11 @@ func (e *Elector) keepLeadership(ctx context.Context, leadershipNotificationChan // We don't care about notifications when we know we're the leader, do we? case <-time.After(e.interval): // TODO: this leaks timers if we're receiving notifications - reelected, err := e.adapter.LeadershipAttemptElect(ctx, true, e.name, e.id, e.interval) + reelected, err := LeaderAttemptElect(ctx, e.exec, true, &riverdriver.LeaderElectParams{ + LeaderID: e.id, + Name: e.name, + TTL: e.interval, + }) if err != nil { if errors.Is(err, context.Canceled) { return err @@ -222,6 +231,8 @@ func (e *Elector) giveUpLeadership() { } } +// attemptResign attempts to resign any currently held leaderships for the +// elector's name and leader ID. func (e *Elector) attemptResign(attempt int) error { // Wait one second longer each time we try to resign: timeout := time.Duration(attempt+1) * time.Second @@ -230,7 +241,12 @@ func (e *Elector) attemptResign(attempt int) error { ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() - return e.adapter.LeadershipResign(ctx, e.name, e.id) + _, err := e.exec.LeaderResign(ctx, &riverdriver.LeaderResignParams{ + LeaderID: e.id, + LeadershipTopic: string(notifier.NotificationTopicLeadership), + Name: e.name, + }) + return err } func (e *Elector) Listen() *Subscription { @@ -290,3 +306,39 @@ func (e *Elector) notifySubscribers(isLeader bool) { } } } + +const deadlineTimeout = 5 * time.Second + +// LeaderAttemptElect attempts to elect a leader for the given name. The +// bool alreadyElected indicates whether this is a potential reelection of +// an already-elected leader. If the election is successful because there is +// no leader or the previous leader expired, the provided leaderID will be +// set as the new leader with a TTL of ttl. +// +// Returns whether this leader was successfully elected or an error if one +// occurred. +func LeaderAttemptElect(ctx context.Context, exec riverdriver.Executor, alreadyElected bool, params *riverdriver.LeaderElectParams) (bool, error) { + ctx, cancel := context.WithTimeout(ctx, deadlineTimeout) + defer cancel() + + return dbutil.WithTxV(ctx, exec, func(ctx context.Context, exec riverdriver.ExecutorTx) (bool, error) { + if _, err := exec.LeaderDeleteExpired(ctx, params.Name); err != nil { + return false, err + } + + var ( + elected bool + err error + ) + if alreadyElected { + elected, err = exec.LeaderAttemptReelect(ctx, params) + } else { + elected, err = exec.LeaderAttemptElect(ctx, params) + } + if err != nil { + return false, err + } + + return elected, nil + }) +} diff --git a/internal/leadership/elector_test.go b/internal/leadership/elector_test.go new file mode 100644 index 00000000..7ac1ce12 --- /dev/null +++ b/internal/leadership/elector_test.go @@ -0,0 +1,114 @@ +package leadership + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/internal/riverinternaltest/testfactory" + "github.com/riverqueue/river/internal/util/ptrutil" + "github.com/riverqueue/river/riverdriver" + "github.com/riverqueue/river/riverdriver/riverpgxv5" +) + +func TestLeaderAttemptElect(t *testing.T) { + t.Parallel() + + const ( + clientID = "client-id" + leaderInstanceName = "default" + leaderTTL = 10 * time.Second + ) + + ctx := context.Background() + + type testBundle struct { + exec riverdriver.Executor + } + + setup := func(t *testing.T) *testBundle { + t.Helper() + + driver := riverpgxv5.New(nil) + + return &testBundle{ + exec: driver.UnwrapExecutor(riverinternaltest.TestTx(ctx, t)), + } + } + + t.Run("ElectsLeader", func(t *testing.T) { + t.Parallel() + + bundle := setup(t) + + elected, err := LeaderAttemptElect(ctx, bundle.exec, false, &riverdriver.LeaderElectParams{ + LeaderID: clientID, + Name: leaderInstanceName, + TTL: leaderTTL, + }) + require.NoError(t, err) + require.True(t, elected) // won election + + leader, err := bundle.exec.LeaderGetElectedLeader(ctx, leaderInstanceName) + require.NoError(t, err) + require.WithinDuration(t, time.Now(), leader.ElectedAt, 100*time.Millisecond) + require.WithinDuration(t, time.Now().Add(leaderTTL), leader.ExpiresAt, 100*time.Millisecond) + }) + + t.Run("ReelectsSameLeader", func(t *testing.T) { + t.Parallel() + + bundle := setup(t) + + leader := testfactory.Leader(ctx, t, bundle.exec, &testfactory.LeaderOpts{ + LeaderID: ptrutil.Ptr(clientID), + Name: ptrutil.Ptr(leaderInstanceName), + }) + + // Re-elect the same leader. Use a larger TTL to see if time is updated, + // because we are in a test transaction and the time is frozen at the start of + // the transaction. + elected, err := LeaderAttemptElect(ctx, bundle.exec, true, &riverdriver.LeaderElectParams{ + LeaderID: clientID, + Name: leaderInstanceName, + TTL: 30 * time.Second, + }) + require.NoError(t, err) + require.True(t, elected) // won re-election + + // expires_at should be incremented because this is the same leader that won + // previously and we specified that we're already elected: + updatedLeader, err := bundle.exec.LeaderGetElectedLeader(ctx, leaderInstanceName) + require.NoError(t, err) + require.Greater(t, updatedLeader.ExpiresAt, leader.ExpiresAt) + }) + + t.Run("CannotElectDifferentLeader", func(t *testing.T) { + t.Parallel() + + bundle := setup(t) + + leader := testfactory.Leader(ctx, t, bundle.exec, &testfactory.LeaderOpts{ + LeaderID: ptrutil.Ptr(clientID), + Name: ptrutil.Ptr(leaderInstanceName), + }) + + elected, err := LeaderAttemptElect(ctx, bundle.exec, true, &riverdriver.LeaderElectParams{ + LeaderID: "different-client-id", + Name: leaderInstanceName, + TTL: leaderTTL, + }) + require.NoError(t, err) + require.False(t, elected) // lost election + + // The time should not have changed because we specified that we were not + // already elected, and the elect query is a no-op if there's already a + // updatedLeader: + updatedLeader, err := bundle.exec.LeaderGetElectedLeader(ctx, leaderInstanceName) + require.NoError(t, err) + require.Equal(t, leader.ExpiresAt, updatedLeader.ExpiresAt) + }) +} diff --git a/internal/maintenance/job_cleaner.go b/internal/maintenance/job_cleaner.go index fda6dfb9..19b3942d 100644 --- a/internal/maintenance/job_cleaner.go +++ b/internal/maintenance/job_cleaner.go @@ -8,12 +8,11 @@ import ( "time" "github.com/riverqueue/river/internal/baseservice" - "github.com/riverqueue/river/internal/dbsqlc" "github.com/riverqueue/river/internal/maintenance/startstop" "github.com/riverqueue/river/internal/rivercommon" - "github.com/riverqueue/river/internal/util/dbutil" "github.com/riverqueue/river/internal/util/timeutil" "github.com/riverqueue/river/internal/util/valutil" + "github.com/riverqueue/river/riverdriver" ) const ( @@ -76,12 +75,11 @@ type JobCleaner struct { Config *JobCleanerConfig TestSignals JobCleanerTestSignals - batchSize int64 // configurable for test purposes - dbExecutor dbutil.Executor - queries *dbsqlc.Queries + batchSize int // configurable for test purposes + exec riverdriver.Executor } -func NewJobCleaner(archetype *baseservice.Archetype, config *JobCleanerConfig, executor dbutil.Executor) *JobCleaner { +func NewJobCleaner(archetype *baseservice.Archetype, config *JobCleanerConfig, exec riverdriver.Executor) *JobCleaner { return baseservice.Init(archetype, &JobCleaner{ Config: (&JobCleanerConfig{ CancelledJobRetentionPeriod: valutil.ValOrDefault(config.CancelledJobRetentionPeriod, CancelledJobRetentionPeriodDefault), @@ -90,9 +88,8 @@ func NewJobCleaner(archetype *baseservice.Archetype, config *JobCleanerConfig, e Interval: valutil.ValOrDefault(config.Interval, JobCleanerIntervalDefault), }).mustValidate(), - batchSize: BatchSizeDefault, - dbExecutor: executor, - queries: dbsqlc.New(), + batchSize: BatchSizeDefault, + exec: exec, }) } @@ -131,7 +128,7 @@ func (s *JobCleaner) Start(ctx context.Context) error { //nolint:dupl } s.Logger.InfoContext(ctx, s.Name+logPrefixRanSuccessfully, - slog.Int64("num_jobs_deleted", res.NumJobsDeleted), + slog.Int("num_jobs_deleted", res.NumJobsDeleted), ) } }() @@ -140,7 +137,7 @@ func (s *JobCleaner) Start(ctx context.Context) error { //nolint:dupl } type jobCleanerRunOnceResult struct { - NumJobsDeleted int64 + NumJobsDeleted int } func (s *JobCleaner) runOnce(ctx context.Context) (*jobCleanerRunOnceResult, error) { @@ -148,11 +145,11 @@ func (s *JobCleaner) runOnce(ctx context.Context) (*jobCleanerRunOnceResult, err for { // Wrapped in a function so that defers run as expected. - numDeleted, err := func() (int64, error) { + numDeleted, err := func() (int, error) { ctx, cancelFunc := context.WithTimeout(ctx, 30*time.Second) defer cancelFunc() - numDeleted, err := s.queries.JobDeleteBefore(ctx, s.dbExecutor, dbsqlc.JobDeleteBeforeParams{ + numDeleted, err := s.exec.JobDeleteBefore(ctx, &riverdriver.JobDeleteBeforeParams{ CancelledFinalizedAtHorizon: time.Now().Add(-s.Config.CancelledJobRetentionPeriod), CompletedFinalizedAtHorizon: time.Now().Add(-s.Config.CompletedJobRetentionPeriod), DiscardedFinalizedAtHorizon: time.Now().Add(-s.Config.DiscardedJobRetentionPeriod), @@ -177,7 +174,7 @@ func (s *JobCleaner) runOnce(ctx context.Context) (*jobCleanerRunOnceResult, err } s.Logger.InfoContext(ctx, s.Name+": Deleted batch of jobs", - slog.Int64("num_jobs_deleted", numDeleted), + slog.Int("num_jobs_deleted", numDeleted), ) s.CancellableSleepRandomBetween(ctx, BatchBackoffMin, BatchBackoffMax) diff --git a/internal/maintenance/job_cleaner_test.go b/internal/maintenance/job_cleaner_test.go index 669e5f14..c0609ec8 100644 --- a/internal/maintenance/job_cleaner_test.go +++ b/internal/maintenance/job_cleaner_test.go @@ -5,56 +5,55 @@ import ( "testing" "time" - "github.com/jackc/pgx/v5" "github.com/stretchr/testify/require" - "github.com/riverqueue/river/internal/dbsqlc" "github.com/riverqueue/river/internal/rivercommon" "github.com/riverqueue/river/internal/riverinternaltest" "github.com/riverqueue/river/internal/util/ptrutil" + "github.com/riverqueue/river/riverdriver" + "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivertype" ) func TestJobCleaner(t *testing.T) { t.Parallel() - var ( - ctx = context.Background() - queries = dbsqlc.New() - ) + ctx := context.Background() type testBundle struct { cancelledDeleteHorizon time.Time completedDeleteHorizon time.Time + exec riverdriver.Executor discardedDeleteHorizon time.Time - tx pgx.Tx } type insertJobParams struct { FinalizedAt *time.Time - State dbsqlc.JobState + State rivertype.JobState } - insertJob := func(ctx context.Context, dbtx dbsqlc.DBTX, params insertJobParams) *dbsqlc.RiverJob { - job, err := queries.JobInsert(ctx, dbtx, dbsqlc.JobInsertParams{ + insertJob := func(ctx context.Context, exec riverdriver.Executor, params insertJobParams) *rivertype.JobRow { + jobRow, err := exec.JobInsert(ctx, &riverdriver.JobInsertParams{ FinalizedAt: params.FinalizedAt, Kind: "test_kind", - MaxAttempts: int16(rivercommon.MaxAttemptsDefault), - Priority: int16(rivercommon.PriorityDefault), + MaxAttempts: rivercommon.MaxAttemptsDefault, + Priority: rivercommon.PriorityDefault, Queue: rivercommon.QueueDefault, State: params.State, }) require.NoError(t, err) - return job + return jobRow } setup := func(t *testing.T) (*JobCleaner, *testBundle) { t.Helper() + tx := riverinternaltest.TestTx(ctx, t) bundle := &testBundle{ cancelledDeleteHorizon: time.Now().Add(-CancelledJobRetentionPeriodDefault), completedDeleteHorizon: time.Now().Add(-CompletedJobRetentionPeriodDefault), + exec: riverpgxv5.New(nil).UnwrapExecutor(tx), discardedDeleteHorizon: time.Now().Add(-DiscardedJobRetentionPeriodDefault), - tx: riverinternaltest.TestTx(ctx, t), } cleaner := NewJobCleaner( @@ -65,7 +64,7 @@ func TestJobCleaner(t *testing.T) { DiscardedJobRetentionPeriod: DiscardedJobRetentionPeriodDefault, Interval: JobCleanerIntervalDefault, }, - bundle.tx) + bundle.exec) cleaner.TestSignals.Init() t.Cleanup(cleaner.Stop) @@ -99,54 +98,54 @@ func TestJobCleaner(t *testing.T) { cleaner, bundle := setup(t) // none of these get removed - job1 := insertJob(ctx, bundle.tx, insertJobParams{State: dbsqlc.JobStateAvailable}) - job2 := insertJob(ctx, bundle.tx, insertJobParams{State: dbsqlc.JobStateRunning}) - job3 := insertJob(ctx, bundle.tx, insertJobParams{State: dbsqlc.JobStateScheduled}) + job1 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateAvailable}) + job2 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateRunning}) + job3 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateScheduled}) - cancelledJob1 := insertJob(ctx, bundle.tx, insertJobParams{State: dbsqlc.JobStateCancelled, FinalizedAt: ptrutil.Ptr(bundle.cancelledDeleteHorizon.Add(-1 * time.Hour))}) - cancelledJob2 := insertJob(ctx, bundle.tx, insertJobParams{State: dbsqlc.JobStateCancelled, FinalizedAt: ptrutil.Ptr(bundle.cancelledDeleteHorizon.Add(-1 * time.Minute))}) - cancelledJob3 := insertJob(ctx, bundle.tx, insertJobParams{State: dbsqlc.JobStateCancelled, FinalizedAt: ptrutil.Ptr(bundle.cancelledDeleteHorizon.Add(1 * time.Minute))}) // won't be deleted + cancelledJob1 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateCancelled, FinalizedAt: ptrutil.Ptr(bundle.cancelledDeleteHorizon.Add(-1 * time.Hour))}) + cancelledJob2 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateCancelled, FinalizedAt: ptrutil.Ptr(bundle.cancelledDeleteHorizon.Add(-1 * time.Minute))}) + cancelledJob3 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateCancelled, FinalizedAt: ptrutil.Ptr(bundle.cancelledDeleteHorizon.Add(1 * time.Minute))}) // won't be deleted - completedJob1 := insertJob(ctx, bundle.tx, insertJobParams{State: dbsqlc.JobStateCompleted, FinalizedAt: ptrutil.Ptr(bundle.completedDeleteHorizon.Add(-1 * time.Hour))}) - completedJob2 := insertJob(ctx, bundle.tx, insertJobParams{State: dbsqlc.JobStateCompleted, FinalizedAt: ptrutil.Ptr(bundle.completedDeleteHorizon.Add(-1 * time.Minute))}) - completedJob3 := insertJob(ctx, bundle.tx, insertJobParams{State: dbsqlc.JobStateCompleted, FinalizedAt: ptrutil.Ptr(bundle.completedDeleteHorizon.Add(1 * time.Minute))}) // won't be deleted + completedJob1 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateCompleted, FinalizedAt: ptrutil.Ptr(bundle.completedDeleteHorizon.Add(-1 * time.Hour))}) + completedJob2 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateCompleted, FinalizedAt: ptrutil.Ptr(bundle.completedDeleteHorizon.Add(-1 * time.Minute))}) + completedJob3 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateCompleted, FinalizedAt: ptrutil.Ptr(bundle.completedDeleteHorizon.Add(1 * time.Minute))}) // won't be deleted - discardedJob1 := insertJob(ctx, bundle.tx, insertJobParams{State: dbsqlc.JobStateDiscarded, FinalizedAt: ptrutil.Ptr(bundle.discardedDeleteHorizon.Add(-1 * time.Hour))}) - discardedJob2 := insertJob(ctx, bundle.tx, insertJobParams{State: dbsqlc.JobStateDiscarded, FinalizedAt: ptrutil.Ptr(bundle.discardedDeleteHorizon.Add(-1 * time.Minute))}) - discardedJob3 := insertJob(ctx, bundle.tx, insertJobParams{State: dbsqlc.JobStateDiscarded, FinalizedAt: ptrutil.Ptr(bundle.discardedDeleteHorizon.Add(1 * time.Minute))}) // won't be deleted + discardedJob1 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateDiscarded, FinalizedAt: ptrutil.Ptr(bundle.discardedDeleteHorizon.Add(-1 * time.Hour))}) + discardedJob2 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateDiscarded, FinalizedAt: ptrutil.Ptr(bundle.discardedDeleteHorizon.Add(-1 * time.Minute))}) + discardedJob3 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateDiscarded, FinalizedAt: ptrutil.Ptr(bundle.discardedDeleteHorizon.Add(1 * time.Minute))}) // won't be deleted require.NoError(t, cleaner.Start(ctx)) cleaner.TestSignals.DeletedBatch.WaitOrTimeout() var err error - _, err = queries.JobGetByID(ctx, bundle.tx, job1.ID) - require.NotErrorIs(t, err, pgx.ErrNoRows) // still there - _, err = queries.JobGetByID(ctx, bundle.tx, job2.ID) - require.NotErrorIs(t, err, pgx.ErrNoRows) // still there - _, err = queries.JobGetByID(ctx, bundle.tx, job3.ID) - require.NotErrorIs(t, err, pgx.ErrNoRows) // still there - - _, err = queries.JobGetByID(ctx, bundle.tx, cancelledJob1.ID) - require.ErrorIs(t, err, pgx.ErrNoRows) - _, err = queries.JobGetByID(ctx, bundle.tx, cancelledJob2.ID) - require.ErrorIs(t, err, pgx.ErrNoRows) - _, err = queries.JobGetByID(ctx, bundle.tx, cancelledJob3.ID) - require.NotErrorIs(t, err, pgx.ErrNoRows) // still there - - _, err = queries.JobGetByID(ctx, bundle.tx, completedJob1.ID) - require.ErrorIs(t, err, pgx.ErrNoRows) - _, err = queries.JobGetByID(ctx, bundle.tx, completedJob2.ID) - require.ErrorIs(t, err, pgx.ErrNoRows) - _, err = queries.JobGetByID(ctx, bundle.tx, completedJob3.ID) - require.NotErrorIs(t, err, pgx.ErrNoRows) // still there - - _, err = queries.JobGetByID(ctx, bundle.tx, discardedJob1.ID) - require.ErrorIs(t, err, pgx.ErrNoRows) - _, err = queries.JobGetByID(ctx, bundle.tx, discardedJob2.ID) - require.ErrorIs(t, err, pgx.ErrNoRows) - _, err = queries.JobGetByID(ctx, bundle.tx, discardedJob3.ID) - require.NotErrorIs(t, err, pgx.ErrNoRows) // still there + _, err = bundle.exec.JobGetByID(ctx, job1.ID) + require.NotErrorIs(t, err, rivertype.ErrNotFound) // still there + _, err = bundle.exec.JobGetByID(ctx, job2.ID) + require.NotErrorIs(t, err, rivertype.ErrNotFound) // still there + _, err = bundle.exec.JobGetByID(ctx, job3.ID) + require.NotErrorIs(t, err, rivertype.ErrNotFound) // still there + + _, err = bundle.exec.JobGetByID(ctx, cancelledJob1.ID) + require.ErrorIs(t, err, rivertype.ErrNotFound) + _, err = bundle.exec.JobGetByID(ctx, cancelledJob2.ID) + require.ErrorIs(t, err, rivertype.ErrNotFound) + _, err = bundle.exec.JobGetByID(ctx, cancelledJob3.ID) + require.NotErrorIs(t, err, rivertype.ErrNotFound) // still there + + _, err = bundle.exec.JobGetByID(ctx, completedJob1.ID) + require.ErrorIs(t, err, rivertype.ErrNotFound) + _, err = bundle.exec.JobGetByID(ctx, completedJob2.ID) + require.ErrorIs(t, err, rivertype.ErrNotFound) + _, err = bundle.exec.JobGetByID(ctx, completedJob3.ID) + require.NotErrorIs(t, err, rivertype.ErrNotFound) // still there + + _, err = bundle.exec.JobGetByID(ctx, discardedJob1.ID) + require.ErrorIs(t, err, rivertype.ErrNotFound) + _, err = bundle.exec.JobGetByID(ctx, discardedJob2.ID) + require.ErrorIs(t, err, rivertype.ErrNotFound) + _, err = bundle.exec.JobGetByID(ctx, discardedJob3.ID) + require.NotErrorIs(t, err, rivertype.ErrNotFound) // still there }) t.Run("DeletesInBatches", func(t *testing.T) { @@ -159,10 +158,10 @@ func TestJobCleaner(t *testing.T) { // one extra batch, ensuring that we've tested working multiple. numJobs := cleaner.batchSize + 1 - jobs := make([]*dbsqlc.RiverJob, numJobs) + jobs := make([]*rivertype.JobRow, numJobs) - for i := 0; i < int(numJobs); i++ { - job := insertJob(ctx, bundle.tx, insertJobParams{State: dbsqlc.JobStateCompleted, FinalizedAt: ptrutil.Ptr(bundle.completedDeleteHorizon.Add(-1 * time.Hour))}) + for i := 0; i < numJobs; i++ { + job := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateCompleted, FinalizedAt: ptrutil.Ptr(bundle.completedDeleteHorizon.Add(-1 * time.Hour))}) jobs[i] = job } @@ -173,8 +172,8 @@ func TestJobCleaner(t *testing.T) { cleaner.TestSignals.DeletedBatch.WaitOrTimeout() for _, job := range jobs { - _, err := queries.JobGetByID(ctx, bundle.tx, job.ID) - require.ErrorIs(t, err, pgx.ErrNoRows) + _, err := bundle.exec.JobGetByID(ctx, job.ID) + require.ErrorIs(t, err, rivertype.ErrNotFound) } }) @@ -228,7 +227,7 @@ func TestJobCleaner(t *testing.T) { cleaner, bundle := setup(t) cleaner.Config.Interval = time.Minute // should only trigger once for the initial run - job1 := insertJob(ctx, bundle.tx, insertJobParams{State: dbsqlc.JobStateCompleted, FinalizedAt: ptrutil.Ptr(bundle.completedDeleteHorizon.Add(-1 * time.Hour))}) + job1 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateCompleted, FinalizedAt: ptrutil.Ptr(bundle.completedDeleteHorizon.Add(-1 * time.Hour))}) require.NoError(t, cleaner.Start(ctx)) @@ -236,16 +235,16 @@ func TestJobCleaner(t *testing.T) { cleaner.Stop() - job2 := insertJob(ctx, bundle.tx, insertJobParams{State: dbsqlc.JobStateCompleted, FinalizedAt: ptrutil.Ptr(bundle.completedDeleteHorizon.Add(-1 * time.Minute))}) + job2 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateCompleted, FinalizedAt: ptrutil.Ptr(bundle.completedDeleteHorizon.Add(-1 * time.Minute))}) require.NoError(t, cleaner.Start(ctx)) cleaner.TestSignals.DeletedBatch.WaitOrTimeout() var err error - _, err = queries.JobGetByID(ctx, bundle.tx, job1.ID) - require.ErrorIs(t, err, pgx.ErrNoRows) - _, err = queries.JobGetByID(ctx, bundle.tx, job2.ID) - require.ErrorIs(t, err, pgx.ErrNoRows) + _, err = bundle.exec.JobGetByID(ctx, job1.ID) + require.ErrorIs(t, err, rivertype.ErrNotFound) + _, err = bundle.exec.JobGetByID(ctx, job2.ID) + require.ErrorIs(t, err, rivertype.ErrNotFound) }) } diff --git a/internal/maintenance/periodic_job_enqueuer.go b/internal/maintenance/periodic_job_enqueuer.go index 90f91709..57c8b460 100644 --- a/internal/maintenance/periodic_job_enqueuer.go +++ b/internal/maintenance/periodic_job_enqueuer.go @@ -6,9 +6,10 @@ import ( "time" "github.com/riverqueue/river/internal/baseservice" - "github.com/riverqueue/river/internal/dbadapter" + "github.com/riverqueue/river/internal/dbunique" "github.com/riverqueue/river/internal/maintenance/startstop" "github.com/riverqueue/river/internal/rivercommon" + "github.com/riverqueue/river/riverdriver" ) // ErrNoJobToInsert can be returned by a PeriodicJob's JobToInsertFunc to @@ -32,7 +33,7 @@ func (ts *PeriodicJobEnqueuerTestSignals) Init() { // river.PeriodicJobArgs, but needs a separate type because the enqueuer is in a // subpackage. type PeriodicJob struct { - ConstructorFunc func() (*dbadapter.JobInsertParams, error) + ConstructorFunc func() (*riverdriver.JobInsertParams, *dbunique.UniqueOpts, error) RunOnStart bool ScheduleFunc func(time.Time) time.Time @@ -51,6 +52,8 @@ func (j *PeriodicJob) mustValidate() *PeriodicJob { } type PeriodicJobEnqueuerConfig struct { + AdvisoryLockPrefix int32 + // PeriodicJobs are the periodic jobs with which to configure the enqueuer. PeriodicJobs []*PeriodicJob } @@ -69,23 +72,31 @@ type PeriodicJobEnqueuer struct { Config *PeriodicJobEnqueuerConfig TestSignals PeriodicJobEnqueuerTestSignals - dbAdapter dbadapter.Adapter - periodicJobs []*PeriodicJob + exec riverdriver.Executor + periodicJobs []*PeriodicJob + uniqueInserter *dbunique.UniqueInserter } -func NewPeriodicJobEnqueuer(archetype *baseservice.Archetype, config *PeriodicJobEnqueuerConfig, dbAdapter dbadapter.Adapter) *PeriodicJobEnqueuer { +func NewPeriodicJobEnqueuer(archetype *baseservice.Archetype, config *PeriodicJobEnqueuerConfig, exec riverdriver.Executor) *PeriodicJobEnqueuer { svc := baseservice.Init(archetype, &PeriodicJobEnqueuer{ Config: (&PeriodicJobEnqueuerConfig{ - PeriodicJobs: config.PeriodicJobs, + AdvisoryLockPrefix: config.AdvisoryLockPrefix, + PeriodicJobs: config.PeriodicJobs, }).mustValidate(), - dbAdapter: dbAdapter, - periodicJobs: config.PeriodicJobs, + exec: exec, + periodicJobs: config.PeriodicJobs, + uniqueInserter: baseservice.Init(archetype, &dbunique.UniqueInserter{AdvisoryLockPrefix: config.AdvisoryLockPrefix}), }) return svc } +type insertParamsAndUniqueOpts struct { + InsertParams *riverdriver.JobInsertParams + UniqueOpts *dbunique.UniqueOpts +} + func (s *PeriodicJobEnqueuer) Start(ctx context.Context) error { ctx, shouldStart, stopped := s.StartInit(ctx) if !shouldStart { @@ -108,8 +119,8 @@ func (s *PeriodicJobEnqueuer) Start(ctx context.Context) error { // queues any jobs that should run immediately. { var ( - insertParamsMany []*dbadapter.JobInsertParams - insertParamsUnique []*dbadapter.JobInsertParams + insertParamsMany []*riverdriver.JobInsertParams + insertParamsUnique []*insertParamsAndUniqueOpts ) now := s.TimeNowUTC() @@ -121,9 +132,9 @@ func (s *PeriodicJobEnqueuer) Start(ctx context.Context) error { periodicJob.nextRunAt = periodicJob.ScheduleFunc(now) if periodicJob.RunOnStart { - if insertParams, ok := s.insertParamsFromConstructor(ctx, periodicJob.ConstructorFunc); ok { - if insertParams.Unique { - insertParamsUnique = append(insertParamsUnique, insertParams) + if insertParams, uniqueOpts, ok := s.insertParamsFromConstructor(ctx, periodicJob.ConstructorFunc); ok { + if !uniqueOpts.IsEmpty() { + insertParamsUnique = append(insertParamsUnique, &insertParamsAndUniqueOpts{insertParams, uniqueOpts}) } else { insertParamsMany = append(insertParamsMany, insertParams) } @@ -149,8 +160,8 @@ func (s *PeriodicJobEnqueuer) Start(ctx context.Context) error { select { case <-timerUntilNextRun.C: var ( - insertParamsMany []*dbadapter.JobInsertParams - insertParamsUnique []*dbadapter.JobInsertParams + insertParamsMany []*riverdriver.JobInsertParams + insertParamsUnique []*insertParamsAndUniqueOpts ) now := s.TimeNowUTC() @@ -167,9 +178,9 @@ func (s *PeriodicJobEnqueuer) Start(ctx context.Context) error { periodicJob.nextRunAt = periodicJob.ScheduleFunc(now) - if insertParams, ok := s.insertParamsFromConstructor(ctx, periodicJob.ConstructorFunc); ok { - if insertParams.Unique { - insertParamsUnique = append(insertParamsUnique, insertParams) + if insertParams, uniqueOpts, ok := s.insertParamsFromConstructor(ctx, periodicJob.ConstructorFunc); ok { + if !uniqueOpts.IsEmpty() { + insertParamsUnique = append(insertParamsUnique, &insertParamsAndUniqueOpts{insertParams, uniqueOpts}) } else { insertParamsMany = append(insertParamsMany, insertParams) } @@ -193,9 +204,9 @@ func (s *PeriodicJobEnqueuer) Start(ctx context.Context) error { return nil } -func (s *PeriodicJobEnqueuer) insertBatch(ctx context.Context, insertParamsMany, insertParamsUnique []*dbadapter.JobInsertParams) { +func (s *PeriodicJobEnqueuer) insertBatch(ctx context.Context, insertParamsMany []*riverdriver.JobInsertParams, insertParamsUnique []*insertParamsAndUniqueOpts) { if len(insertParamsMany) > 0 { - if _, err := s.dbAdapter.JobInsertMany(ctx, insertParamsMany); err != nil { + if _, err := s.exec.JobInsertMany(ctx, insertParamsMany); err != nil { s.Logger.ErrorContext(ctx, s.Name+": Error inserting periodic jobs", "error", err.Error(), "num_jobs", len(insertParamsMany)) } @@ -206,10 +217,10 @@ func (s *PeriodicJobEnqueuer) insertBatch(ctx context.Context, insertParamsMany, // so we still maintain an insert many fast path above for programs that // aren't inserting any unique jobs periodically (which we expect is most). if len(insertParamsUnique) > 0 { - for _, insertParams := range insertParamsUnique { - if _, err := s.dbAdapter.JobInsert(ctx, insertParams); err != nil { + for _, params := range insertParamsUnique { + if _, err := s.uniqueInserter.JobInsert(ctx, s.exec, params.InsertParams, params.UniqueOpts); err != nil { s.Logger.ErrorContext(ctx, s.Name+": Error inserting unique periodic job", - "error", err.Error(), "kind", insertParams.Kind) + "error", err.Error(), "kind", params.InsertParams.Kind) } } } @@ -219,19 +230,19 @@ func (s *PeriodicJobEnqueuer) insertBatch(ctx context.Context, insertParamsMany, } } -func (s *PeriodicJobEnqueuer) insertParamsFromConstructor(ctx context.Context, constructorFunc func() (*dbadapter.JobInsertParams, error)) (*dbadapter.JobInsertParams, bool) { - job, err := constructorFunc() +func (s *PeriodicJobEnqueuer) insertParamsFromConstructor(ctx context.Context, constructorFunc func() (*riverdriver.JobInsertParams, *dbunique.UniqueOpts, error)) (*riverdriver.JobInsertParams, *dbunique.UniqueOpts, bool) { + insertParams, uniqueOpts, err := constructorFunc() if err != nil { if errors.Is(err, ErrNoJobToInsert) { s.Logger.InfoContext(ctx, s.Name+": nil returned from periodic job constructor, skipping") s.TestSignals.SkippedJob.Signal(struct{}{}) - return nil, false + return nil, nil, false } s.Logger.ErrorContext(ctx, s.Name+": Internal error generating periodic job", "error", err.Error()) - return nil, false + return nil, nil, false } - return job, true + return insertParams, uniqueOpts, true } func (s *PeriodicJobEnqueuer) timeUntilNextRun() time.Duration { diff --git a/internal/maintenance/periodic_job_enqueuer_test.go b/internal/maintenance/periodic_job_enqueuer_test.go index 6d8bb7b7..5f3e44d7 100644 --- a/internal/maintenance/periodic_job_enqueuer_test.go +++ b/internal/maintenance/periodic_job_enqueuer_test.go @@ -8,39 +8,36 @@ import ( "testing" "time" - "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/require" - "github.com/riverqueue/river/internal/dbadapter" - "github.com/riverqueue/river/internal/dbsqlc" + "github.com/riverqueue/river/internal/dbunique" "github.com/riverqueue/river/internal/rivercommon" "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdriver" + "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivertype" ) func TestPeriodicJobEnqueuer(t *testing.T) { t.Parallel() - var ( - ctx = context.Background() - queries = dbsqlc.New() - ) + ctx := context.Background() type testBundle struct { - dbPool *pgxpool.Pool + exec riverdriver.Executor waitChan chan (struct{}) } - jobConstructorFunc := func(name string, unique bool) func() (*dbadapter.JobInsertParams, error) { - return func() (*dbadapter.JobInsertParams, error) { - return &dbadapter.JobInsertParams{ + jobConstructorFunc := func(name string, unique bool) func() (*riverdriver.JobInsertParams, *dbunique.UniqueOpts, error) { + return func() (*riverdriver.JobInsertParams, *dbunique.UniqueOpts, error) { + return &riverdriver.JobInsertParams{ EncodedArgs: []byte("{}"), Kind: name, MaxAttempts: rivercommon.MaxAttemptsDefault, Priority: rivercommon.PriorityDefault, Queue: rivercommon.QueueDefault, - State: dbsqlc.JobStateAvailable, - Unique: unique, - }, nil + State: rivertype.JobStateAvailable, + }, &dbunique.UniqueOpts{ByArgs: unique}, nil } } @@ -54,7 +51,7 @@ func TestPeriodicJobEnqueuer(t *testing.T) { t.Helper() bundle := &testBundle{ - dbPool: riverinternaltest.TestDB(ctx, t), + exec: riverpgxv5.New(riverinternaltest.TestDB(ctx, t)).GetExecutor(), waitChan: make(chan struct{}), } @@ -67,17 +64,17 @@ func TestPeriodicJobEnqueuer(t *testing.T) { {ScheduleFunc: periodicIntervalSchedule(500 * time.Millisecond), ConstructorFunc: jobConstructorFunc("periodic_job_500ms", false)}, {ScheduleFunc: periodicIntervalSchedule(1500 * time.Millisecond), ConstructorFunc: jobConstructorFunc("periodic_job_1500ms", false)}, }, - }, dbadapter.NewStandardAdapter(archetype, &dbadapter.StandardAdapterConfig{Executor: bundle.dbPool})) + }, bundle.exec) svc.TestSignals.Init() t.Cleanup(svc.Stop) return svc, bundle } - requireNJobs := func(t *testing.T, pool *pgxpool.Pool, kind string, n int) { + requireNJobs := func(t *testing.T, exec riverdriver.Executor, kind string, n int) { t.Helper() - jobs, err := queries.JobGetByKind(ctx, pool, kind) + jobs, err := exec.JobGetByKindMany(ctx, []string{kind}) require.NoError(t, err) require.Len(t, jobs, n, fmt.Sprintf("Expected to find exactly %d job(s) of kind: %s, but found %d", n, kind, len(jobs))) } @@ -100,17 +97,17 @@ func TestPeriodicJobEnqueuer(t *testing.T) { require.NoError(t, svc.Start(ctx)) // Should be no jobs to start. - requireNJobs(t, bundle.dbPool, "periodic_job_500ms", 0) + requireNJobs(t, bundle.exec, "periodic_job_500ms", 0) svc.TestSignals.InsertedJobs.WaitOrTimeout() - requireNJobs(t, bundle.dbPool, "periodic_job_500ms", 1) + requireNJobs(t, bundle.exec, "periodic_job_500ms", 1) svc.TestSignals.InsertedJobs.WaitOrTimeout() - requireNJobs(t, bundle.dbPool, "periodic_job_500ms", 2) + requireNJobs(t, bundle.exec, "periodic_job_500ms", 2) svc.TestSignals.InsertedJobs.WaitOrTimeout() - requireNJobs(t, bundle.dbPool, "periodic_job_500ms", 3) - requireNJobs(t, bundle.dbPool, "periodic_job_1500ms", 1) + requireNJobs(t, bundle.exec, "periodic_job_500ms", 3) + requireNJobs(t, bundle.exec, "periodic_job_1500ms", 1) }) t.Run("RespectsJobUniqueness", func(t *testing.T) { @@ -125,18 +122,18 @@ func TestPeriodicJobEnqueuer(t *testing.T) { require.NoError(t, svc.Start(ctx)) // Should be no jobs to start. - requireNJobs(t, bundle.dbPool, "unique_periodic_job_500ms", 0) + requireNJobs(t, bundle.exec, "unique_periodic_job_500ms", 0) svc.TestSignals.InsertedJobs.WaitOrTimeout() - requireNJobs(t, bundle.dbPool, "unique_periodic_job_500ms", 1) + requireNJobs(t, bundle.exec, "unique_periodic_job_500ms", 1) // Another insert was attempted, but there's still only one job due to // uniqueness conditions. svc.TestSignals.InsertedJobs.WaitOrTimeout() - requireNJobs(t, bundle.dbPool, "unique_periodic_job_500ms", 1) + requireNJobs(t, bundle.exec, "unique_periodic_job_500ms", 1) svc.TestSignals.InsertedJobs.WaitOrTimeout() - requireNJobs(t, bundle.dbPool, "unique_periodic_job_500ms", 1) + requireNJobs(t, bundle.exec, "unique_periodic_job_500ms", 1) }) t.Run("RunOnStart", func(t *testing.T) { @@ -153,8 +150,8 @@ func TestPeriodicJobEnqueuer(t *testing.T) { require.NoError(t, svc.Start(ctx)) svc.TestSignals.InsertedJobs.WaitOrTimeout() - requireNJobs(t, bundle.dbPool, "periodic_job_5s", 1) - requireNJobs(t, bundle.dbPool, "unique_periodic_job_5s", 1) + requireNJobs(t, bundle.exec, "periodic_job_5s", 1) + requireNJobs(t, bundle.exec, "unique_periodic_job_5s", 1) // Should've happened quite quickly. require.WithinDuration(t, time.Now(), start, 1*time.Second) @@ -167,7 +164,9 @@ func TestPeriodicJobEnqueuer(t *testing.T) { svc.periodicJobs = []*PeriodicJob{ // skip this insert when it returns nil: - {ScheduleFunc: periodicIntervalSchedule(time.Second), ConstructorFunc: func() (*dbadapter.JobInsertParams, error) { return nil, ErrNoJobToInsert }, RunOnStart: true}, + {ScheduleFunc: periodicIntervalSchedule(time.Second), ConstructorFunc: func() (*riverdriver.JobInsertParams, *dbunique.UniqueOpts, error) { + return nil, nil, ErrNoJobToInsert + }, RunOnStart: true}, } require.NoError(t, svc.Start(ctx)) diff --git a/internal/maintenance/queue_maintainer_test.go b/internal/maintenance/queue_maintainer_test.go index e110a288..2e312148 100644 --- a/internal/maintenance/queue_maintainer_test.go +++ b/internal/maintenance/queue_maintainer_test.go @@ -10,11 +10,13 @@ import ( "github.com/stretchr/testify/require" "github.com/riverqueue/river/internal/baseservice" - "github.com/riverqueue/river/internal/dbadapter" + "github.com/riverqueue/river/internal/dbunique" "github.com/riverqueue/river/internal/maintenance/startstop" "github.com/riverqueue/river/internal/rivercommon" "github.com/riverqueue/river/internal/riverinternaltest" "github.com/riverqueue/river/internal/riverinternaltest/sharedtx" + "github.com/riverqueue/river/riverdriver" + "github.com/riverqueue/river/riverdriver/riverpgxv5" ) type testService struct { @@ -94,19 +96,23 @@ func TestQueueMaintainer(t *testing.T) { archetype := riverinternaltest.BaseServiceArchetype(t) archetype.Logger = riverinternaltest.LoggerWarn(t) // loop started/stop log is very noisy; suppress + driver := riverpgxv5.New(nil).UnwrapExecutor(sharedTx) + // Use realistic services in this one so we can verify stress not only // on the queue maintainer, but it and all its subservices together. maintainer := setup(t, []Service{ - NewJobCleaner(archetype, &JobCleanerConfig{}, sharedTx), + NewJobCleaner(archetype, &JobCleanerConfig{}, driver), NewPeriodicJobEnqueuer(archetype, &PeriodicJobEnqueuerConfig{ PeriodicJobs: []*PeriodicJob{ { - ConstructorFunc: func() (*dbadapter.JobInsertParams, error) { return nil, ErrNoJobToInsert }, - ScheduleFunc: cron.Every(15 * time.Minute).Next, + ConstructorFunc: func() (*riverdriver.JobInsertParams, *dbunique.UniqueOpts, error) { + return nil, nil, ErrNoJobToInsert + }, + ScheduleFunc: cron.Every(15 * time.Minute).Next, }, }, - }, dbadapter.NewStandardAdapter(archetype, &dbadapter.StandardAdapterConfig{Executor: sharedTx})), - NewScheduler(archetype, &SchedulerConfig{}, sharedTx), + }, driver), + NewScheduler(archetype, &SchedulerConfig{}, driver), }) maintainer.Logger = riverinternaltest.LoggerWarn(t) // loop started/stop log is very noisy; suppress runStartStopStress(ctx, t, maintainer) diff --git a/internal/maintenance/reindexer.go b/internal/maintenance/reindexer.go index 9052e7a8..0318664a 100644 --- a/internal/maintenance/reindexer.go +++ b/internal/maintenance/reindexer.go @@ -9,8 +9,8 @@ import ( "github.com/riverqueue/river/internal/baseservice" "github.com/riverqueue/river/internal/maintenance/startstop" "github.com/riverqueue/river/internal/rivercommon" - "github.com/riverqueue/river/internal/util/dbutil" "github.com/riverqueue/river/internal/util/valutil" + "github.com/riverqueue/river/riverdriver" ) const ( @@ -62,11 +62,11 @@ type Reindexer struct { Config *ReindexerConfig TestSignals ReindexerTestSignals - batchSize int64 // configurable for test purposes - dbExecutor dbutil.Executor + batchSize int64 // configurable for test purposes + exec riverdriver.Executor } -func NewReindexer(archetype *baseservice.Archetype, config *ReindexerConfig, dbExecutor dbutil.Executor) *Reindexer { +func NewReindexer(archetype *baseservice.Archetype, config *ReindexerConfig, exec riverdriver.Executor) *Reindexer { indexNames := defaultIndexNames if config.IndexNames != nil { indexNames = config.IndexNames @@ -84,8 +84,8 @@ func NewReindexer(archetype *baseservice.Archetype, config *ReindexerConfig, dbE Timeout: valutil.ValOrDefault(config.Timeout, ReindexerTimeoutDefault), }).mustValidate(), - batchSize: BatchSizeDefault, - dbExecutor: dbExecutor, + batchSize: BatchSizeDefault, + exec: exec, }) } @@ -145,7 +145,7 @@ func (s *Reindexer) reindexOne(ctx context.Context, indexName string) error { ctx, cancel := context.WithTimeout(ctx, s.Config.Timeout) defer cancel() - _, err := s.dbExecutor.Exec(ctx, "REINDEX INDEX CONCURRENTLY "+indexName) + _, err := s.exec.Exec(ctx, "REINDEX INDEX CONCURRENTLY "+indexName) if err != nil { return err } diff --git a/internal/maintenance/reindexer_test.go b/internal/maintenance/reindexer_test.go index defdda1a..e82fa662 100644 --- a/internal/maintenance/reindexer_test.go +++ b/internal/maintenance/reindexer_test.go @@ -5,10 +5,11 @@ import ( "testing" "time" - "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/require" "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdriver" + "github.com/riverqueue/river/riverdriver/riverpgxv5" ) func TestReindexer(t *testing.T) { @@ -17,16 +18,17 @@ func TestReindexer(t *testing.T) { ctx := context.Background() type testBundle struct { - now time.Time - dbPool *pgxpool.Pool + exec riverdriver.Executor + now time.Time } setup := func(t *testing.T) (*Reindexer, *testBundle) { t.Helper() + dbPool := riverinternaltest.TestDB(ctx, t) bundle := &testBundle{ - now: time.Now(), - dbPool: riverinternaltest.TestDB(ctx, t), + exec: riverpgxv5.New(dbPool).GetExecutor(), + now: time.Now(), } archetype := riverinternaltest.BaseServiceArchetype(t) @@ -38,7 +40,9 @@ func TestReindexer(t *testing.T) { } } - svc := NewReindexer(archetype, &ReindexerConfig{ScheduleFunc: fromNow(500 * time.Millisecond)}, bundle.dbPool) + svc := NewReindexer(archetype, &ReindexerConfig{ + ScheduleFunc: fromNow(500 * time.Millisecond), + }, bundle.exec) svc.TestSignals.Init() t.Cleanup(svc.Stop) @@ -110,7 +114,7 @@ func TestReindexer(t *testing.T) { t.Parallel() svc, bundle := setup(t) - svc = NewReindexer(&svc.Archetype, &ReindexerConfig{}, bundle.dbPool) + svc = NewReindexer(&svc.Archetype, &ReindexerConfig{}, bundle.exec) require.Equal(t, defaultIndexNames, svc.Config.IndexNames) require.Equal(t, ReindexerTimeoutDefault, svc.Config.Timeout) diff --git a/internal/maintenance/rescuer.go b/internal/maintenance/rescuer.go index d9a5af7d..53760991 100644 --- a/internal/maintenance/rescuer.go +++ b/internal/maintenance/rescuer.go @@ -9,13 +9,12 @@ import ( "time" "github.com/riverqueue/river/internal/baseservice" - "github.com/riverqueue/river/internal/dbsqlc" "github.com/riverqueue/river/internal/maintenance/startstop" "github.com/riverqueue/river/internal/rivercommon" - "github.com/riverqueue/river/internal/util/dbutil" "github.com/riverqueue/river/internal/util/timeutil" "github.com/riverqueue/river/internal/util/valutil" "github.com/riverqueue/river/internal/workunit" + "github.com/riverqueue/river/riverdriver" "github.com/riverqueue/river/rivertype" ) @@ -81,12 +80,11 @@ type Rescuer struct { Config *RescuerConfig TestSignals RescuerTestSignals - batchSize int // configurable for test purposes - dbExecutor dbutil.Executor - queries *dbsqlc.Queries + batchSize int // configurable for test purposes + exec riverdriver.Executor } -func NewRescuer(archetype *baseservice.Archetype, config *RescuerConfig, executor dbutil.Executor) *Rescuer { +func NewRescuer(archetype *baseservice.Archetype, config *RescuerConfig, exec riverdriver.Executor) *Rescuer { return baseservice.Init(archetype, &Rescuer{ Config: (&RescuerConfig{ ClientRetryPolicy: config.ClientRetryPolicy, @@ -95,9 +93,8 @@ func NewRescuer(archetype *baseservice.Archetype, config *RescuerConfig, executo WorkUnitFactoryFunc: config.WorkUnitFactoryFunc, }).mustValidate(), - batchSize: BatchSizeDefault, - dbExecutor: executor, - queries: dbsqlc.New(), + batchSize: BatchSizeDefault, + exec: exec, }) } @@ -168,7 +165,7 @@ func (s *Rescuer) runOnce(ctx context.Context) (*rescuerRunOnceResult, error) { now := time.Now().UTC() - rescueManyParams := dbsqlc.JobRescueManyParams{ + rescueManyParams := riverdriver.JobRescueManyParams{ ID: make([]int64, len(stuckJobs)), Error: make([][]byte, len(stuckJobs)), FinalizedAt: make([]time.Time, len(stuckJobs)), @@ -186,7 +183,7 @@ func (s *Rescuer) runOnce(ctx context.Context) (*rescuerRunOnceResult, error) { rescueManyParams.Error[i], err = json.Marshal(rivertype.AttemptError{ At: now, - Attempt: max(int(job.Attempt), 0), + Attempt: max(job.Attempt, 0), Error: "Stuck job rescued by Rescuer", Trace: "TODO", }) @@ -198,23 +195,23 @@ func (s *Rescuer) runOnce(ctx context.Context) (*rescuerRunOnceResult, error) { res.NumJobsCancelled++ rescueManyParams.FinalizedAt[i] = now rescueManyParams.ScheduledAt[i] = job.ScheduledAt // reuse previous value - rescueManyParams.State[i] = string(dbsqlc.JobStateCancelled) + rescueManyParams.State[i] = string(rivertype.JobStateCancelled) continue } shouldRetry, retryAt := s.makeRetryDecision(ctx, job) if shouldRetry { res.NumJobsRetried++ rescueManyParams.ScheduledAt[i] = retryAt - rescueManyParams.State[i] = string(dbsqlc.JobStateRetryable) + rescueManyParams.State[i] = string(rivertype.JobStateRetryable) } else { res.NumJobsDiscarded++ rescueManyParams.FinalizedAt[i] = now rescueManyParams.ScheduledAt[i] = job.ScheduledAt // reuse previous value - rescueManyParams.State[i] = string(dbsqlc.JobStateDiscarded) + rescueManyParams.State[i] = string(rivertype.JobStateDiscarded) } } - err = s.queries.JobRescueMany(ctx, s.dbExecutor, rescueManyParams) + _, err = s.exec.JobRescueMany(ctx, &rescueManyParams) if err != nil { return nil, fmt.Errorf("error rescuing stuck jobs: %w", err) } @@ -238,23 +235,21 @@ func (s *Rescuer) runOnce(ctx context.Context) (*rescuerRunOnceResult, error) { return res, nil } -func (s *Rescuer) getStuckJobs(ctx context.Context) ([]*dbsqlc.RiverJob, error) { +func (s *Rescuer) getStuckJobs(ctx context.Context) ([]*rivertype.JobRow, error) { ctx, cancelFunc := context.WithTimeout(ctx, 30*time.Second) defer cancelFunc() stuckHorizon := time.Now().Add(-s.Config.RescueAfter) - return s.queries.JobGetStuck(ctx, s.dbExecutor, dbsqlc.JobGetStuckParams{ + return s.exec.JobGetStuck(ctx, &riverdriver.JobGetStuckParams{ + Max: s.batchSize, StuckHorizon: stuckHorizon, - LimitCount: int32(s.batchSize), }) } // makeRetryDecision decides whether or not a rescued job should be retried, and if so, // when. -func (s *Rescuer) makeRetryDecision(ctx context.Context, internalJob *dbsqlc.RiverJob) (bool, time.Time) { - job := dbsqlc.JobRowFromInternal(internalJob) - +func (s *Rescuer) makeRetryDecision(ctx context.Context, job *rivertype.JobRow) (bool, time.Time) { workUnitFactory := s.Config.WorkUnitFactoryFunc(job.Kind) if workUnitFactory == nil { s.Logger.ErrorContext(ctx, s.Name+": Attempted to rescue unhandled job kind, discarding", @@ -272,5 +267,5 @@ func (s *Rescuer) makeRetryDecision(ctx context.Context, internalJob *dbsqlc.Riv if nextRetry.IsZero() { nextRetry = s.Config.ClientRetryPolicy.NextRetry(job) } - return job.Attempt < max(int(internalJob.MaxAttempts), 0), nextRetry + return job.Attempt < max(job.MaxAttempts, 0), nextRetry } diff --git a/internal/maintenance/rescuer_test.go b/internal/maintenance/rescuer_test.go index e3028074..f3f9a987 100644 --- a/internal/maintenance/rescuer_test.go +++ b/internal/maintenance/rescuer_test.go @@ -7,15 +7,15 @@ import ( "testing" "time" - "github.com/jackc/pgx/v5" "github.com/stretchr/testify/require" - "github.com/riverqueue/river/internal/dbsqlc" "github.com/riverqueue/river/internal/rivercommon" "github.com/riverqueue/river/internal/riverinternaltest" "github.com/riverqueue/river/internal/util/ptrutil" "github.com/riverqueue/river/internal/util/timeutil" "github.com/riverqueue/river/internal/workunit" + "github.com/riverqueue/river/riverdriver" + "github.com/riverqueue/river/riverdriver/riverpgxv5" "github.com/riverqueue/river/rivertype" ) @@ -52,46 +52,44 @@ func TestRescuer(t *testing.T) { const rescuerJobKind = "rescuer" - var ( - ctx = context.Background() - queries = dbsqlc.New() - ) + ctx := context.Background() type testBundle struct { + exec riverdriver.Executor rescueHorizon time.Time - tx pgx.Tx } type insertJobParams struct { - Attempt int16 + Attempt int AttemptedAt *time.Time MaxAttempts int16 Metadata []byte - State dbsqlc.JobState + State rivertype.JobState } - insertJob := func(ctx context.Context, dbtx dbsqlc.DBTX, params insertJobParams) *dbsqlc.RiverJob { - job, err := queries.JobInsert(ctx, dbtx, dbsqlc.JobInsertParams{ + insertJob := func(ctx context.Context, exec riverdriver.Executor, params insertJobParams) *rivertype.JobRow { + jobRow, err := exec.JobInsert(ctx, &riverdriver.JobInsertParams{ Attempt: params.Attempt, AttemptedAt: params.AttemptedAt, - Args: []byte("{}"), + EncodedArgs: []byte("{}"), Kind: rescuerJobKind, MaxAttempts: 5, Metadata: params.Metadata, - Priority: int16(rivercommon.PriorityDefault), + Priority: rivercommon.PriorityDefault, Queue: rivercommon.QueueDefault, State: params.State, }) require.NoError(t, err) - return job + return jobRow } setup := func(t *testing.T) (*Rescuer, *testBundle) { t.Helper() + tx := riverinternaltest.TestTx(ctx, t) bundle := &testBundle{ + exec: riverpgxv5.New(nil).UnwrapExecutor(tx), rescueHorizon: time.Now().Add(-RescueAfterDefault), - tx: riverinternaltest.TestTx(ctx, t), } rescuer := NewRescuer( @@ -107,7 +105,7 @@ func TestRescuer(t *testing.T) { panic("unhandled kind: " + kind) }, }, - bundle.tx) + bundle.exec) rescuer.TestSignals.Init() t.Cleanup(rescuer.Stop) @@ -146,71 +144,71 @@ func TestRescuer(t *testing.T) { cleaner, bundle := setup(t) - stuckToRetryJob1 := insertJob(ctx, bundle.tx, insertJobParams{State: dbsqlc.JobStateRunning, AttemptedAt: ptrutil.Ptr(bundle.rescueHorizon.Add(-1 * time.Hour))}) - stuckToRetryJob2 := insertJob(ctx, bundle.tx, insertJobParams{State: dbsqlc.JobStateRunning, AttemptedAt: ptrutil.Ptr(bundle.rescueHorizon.Add(-1 * time.Minute))}) - stuckToRetryJob3 := insertJob(ctx, bundle.tx, insertJobParams{State: dbsqlc.JobStateRunning, AttemptedAt: ptrutil.Ptr(bundle.rescueHorizon.Add(1 * time.Minute))}) // won't be rescued + stuckToRetryJob1 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateRunning, AttemptedAt: ptrutil.Ptr(bundle.rescueHorizon.Add(-1 * time.Hour))}) + stuckToRetryJob2 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateRunning, AttemptedAt: ptrutil.Ptr(bundle.rescueHorizon.Add(-1 * time.Minute))}) + stuckToRetryJob3 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateRunning, AttemptedAt: ptrutil.Ptr(bundle.rescueHorizon.Add(1 * time.Minute))}) // won't be rescued // Already at max attempts: - stuckToDiscardJob1 := insertJob(ctx, bundle.tx, insertJobParams{State: dbsqlc.JobStateRunning, Attempt: 5, AttemptedAt: ptrutil.Ptr(bundle.rescueHorizon.Add(-1 * time.Hour))}) - stuckToDiscardJob2 := insertJob(ctx, bundle.tx, insertJobParams{State: dbsqlc.JobStateRunning, Attempt: 5, AttemptedAt: ptrutil.Ptr(bundle.rescueHorizon.Add(1 * time.Minute))}) // won't be rescued + stuckToDiscardJob1 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateRunning, Attempt: 5, AttemptedAt: ptrutil.Ptr(bundle.rescueHorizon.Add(-1 * time.Hour))}) + stuckToDiscardJob2 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateRunning, Attempt: 5, AttemptedAt: ptrutil.Ptr(bundle.rescueHorizon.Add(1 * time.Minute))}) // won't be rescued // Marked as cancelled by query: cancelTime := time.Now().UTC().Format(time.RFC3339Nano) - stuckToCancelJob1 := insertJob(ctx, bundle.tx, insertJobParams{State: dbsqlc.JobStateRunning, AttemptedAt: ptrutil.Ptr(bundle.rescueHorizon.Add(-1 * time.Hour)), Metadata: []byte(fmt.Sprintf(`{"cancel_attempted_at": %q}`, cancelTime))}) - stuckToCancelJob2 := insertJob(ctx, bundle.tx, insertJobParams{State: dbsqlc.JobStateRunning, AttemptedAt: ptrutil.Ptr(bundle.rescueHorizon.Add(1 * time.Minute)), Metadata: []byte(fmt.Sprintf(`{"cancel_attempted_at": %q}`, cancelTime))}) // won't be rescued + stuckToCancelJob1 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateRunning, AttemptedAt: ptrutil.Ptr(bundle.rescueHorizon.Add(-1 * time.Hour)), Metadata: []byte(fmt.Sprintf(`{"cancel_attempted_at": %q}`, cancelTime))}) + stuckToCancelJob2 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateRunning, AttemptedAt: ptrutil.Ptr(bundle.rescueHorizon.Add(1 * time.Minute)), Metadata: []byte(fmt.Sprintf(`{"cancel_attempted_at": %q}`, cancelTime))}) // won't be rescued // these aren't touched: - notRunningJob1 := insertJob(ctx, bundle.tx, insertJobParams{State: dbsqlc.JobStateCompleted, AttemptedAt: ptrutil.Ptr(bundle.rescueHorizon.Add(-1 * time.Hour))}) - notRunningJob2 := insertJob(ctx, bundle.tx, insertJobParams{State: dbsqlc.JobStateDiscarded, AttemptedAt: ptrutil.Ptr(bundle.rescueHorizon.Add(-1 * time.Hour))}) - notRunningJob3 := insertJob(ctx, bundle.tx, insertJobParams{State: dbsqlc.JobStateCancelled, AttemptedAt: ptrutil.Ptr(bundle.rescueHorizon.Add(-1 * time.Hour))}) + notRunningJob1 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateCompleted, AttemptedAt: ptrutil.Ptr(bundle.rescueHorizon.Add(-1 * time.Hour))}) + notRunningJob2 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateDiscarded, AttemptedAt: ptrutil.Ptr(bundle.rescueHorizon.Add(-1 * time.Hour))}) + notRunningJob3 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateCancelled, AttemptedAt: ptrutil.Ptr(bundle.rescueHorizon.Add(-1 * time.Hour))}) require.NoError(cleaner.Start(ctx)) cleaner.TestSignals.FetchedBatch.WaitOrTimeout() cleaner.TestSignals.UpdatedBatch.WaitOrTimeout() - confirmRetried := func(jobBefore *dbsqlc.RiverJob) { - jobAfter, err := queries.JobGetByID(ctx, bundle.tx, jobBefore.ID) + confirmRetried := func(jobBefore *rivertype.JobRow) { + jobAfter, err := bundle.exec.JobGetByID(ctx, jobBefore.ID) require.NoError(err) - require.Equal(dbsqlc.JobStateRetryable, jobAfter.State) + require.Equal(rivertype.JobStateRetryable, jobAfter.State) } var err error confirmRetried(stuckToRetryJob1) confirmRetried(stuckToRetryJob2) - job3After, err := queries.JobGetByID(ctx, bundle.tx, stuckToRetryJob3.ID) + job3After, err := bundle.exec.JobGetByID(ctx, stuckToRetryJob3.ID) require.NoError(err) require.Equal(stuckToRetryJob3.State, job3After.State) // not rescued - discard1After, err := queries.JobGetByID(ctx, bundle.tx, stuckToDiscardJob1.ID) + discard1After, err := bundle.exec.JobGetByID(ctx, stuckToDiscardJob1.ID) require.NoError(err) - require.Equal(dbsqlc.JobStateDiscarded, discard1After.State) + require.Equal(rivertype.JobStateDiscarded, discard1After.State) require.WithinDuration(time.Now(), *discard1After.FinalizedAt, 5*time.Second) require.Len(discard1After.Errors, 1) - discard2After, err := queries.JobGetByID(ctx, bundle.tx, stuckToDiscardJob2.ID) + discard2After, err := bundle.exec.JobGetByID(ctx, stuckToDiscardJob2.ID) require.NoError(err) - require.Equal(dbsqlc.JobStateRunning, discard2After.State) + require.Equal(rivertype.JobStateRunning, discard2After.State) require.Nil(discard2After.FinalizedAt) - cancel1After, err := queries.JobGetByID(ctx, bundle.tx, stuckToCancelJob1.ID) + cancel1After, err := bundle.exec.JobGetByID(ctx, stuckToCancelJob1.ID) require.NoError(err) - require.Equal(dbsqlc.JobStateCancelled, cancel1After.State) + require.Equal(rivertype.JobStateCancelled, cancel1After.State) require.WithinDuration(time.Now(), *cancel1After.FinalizedAt, 5*time.Second) require.Len(cancel1After.Errors, 1) - cancel2After, err := queries.JobGetByID(ctx, bundle.tx, stuckToCancelJob2.ID) + cancel2After, err := bundle.exec.JobGetByID(ctx, stuckToCancelJob2.ID) require.NoError(err) - require.Equal(dbsqlc.JobStateRunning, cancel2After.State) + require.Equal(rivertype.JobStateRunning, cancel2After.State) require.Nil(cancel2After.FinalizedAt) - notRunning1After, err := queries.JobGetByID(ctx, bundle.tx, notRunningJob1.ID) + notRunning1After, err := bundle.exec.JobGetByID(ctx, notRunningJob1.ID) require.NoError(err) require.Equal(notRunning1After.State, notRunningJob1.State) - notRunning2After, err := queries.JobGetByID(ctx, bundle.tx, notRunningJob2.ID) + notRunning2After, err := bundle.exec.JobGetByID(ctx, notRunningJob2.ID) require.NoError(err) require.Equal(notRunning2After.State, notRunningJob2.State) - notRunning3After, err := queries.JobGetByID(ctx, bundle.tx, notRunningJob3.ID) + notRunning3After, err := bundle.exec.JobGetByID(ctx, notRunningJob3.ID) require.NoError(err) require.Equal(notRunning3After.State, notRunningJob3.State) }) @@ -225,10 +223,10 @@ func TestRescuer(t *testing.T) { // one extra batch, ensuring that we've tested working multiple. numJobs := cleaner.batchSize + 1 - jobs := make([]*dbsqlc.RiverJob, numJobs) + jobs := make([]*rivertype.JobRow, numJobs) for i := 0; i < numJobs; i++ { - job := insertJob(ctx, bundle.tx, insertJobParams{State: dbsqlc.JobStateRunning, AttemptedAt: ptrutil.Ptr(bundle.rescueHorizon.Add(-1 * time.Hour))}) + job := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateRunning, AttemptedAt: ptrutil.Ptr(bundle.rescueHorizon.Add(-1 * time.Hour))}) jobs[i] = job } @@ -241,9 +239,9 @@ func TestRescuer(t *testing.T) { cleaner.TestSignals.UpdatedBatch.WaitOrTimeout() // need to wait until after this for the conn to be free for _, job := range jobs { - jobUpdated, err := queries.JobGetByID(ctx, bundle.tx, job.ID) + jobUpdated, err := bundle.exec.JobGetByID(ctx, job.ID) require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateRetryable, jobUpdated.State) + require.Equal(t, rivertype.JobStateRetryable, jobUpdated.State) } }) @@ -297,7 +295,7 @@ func TestRescuer(t *testing.T) { rescuer, bundle := setup(t) rescuer.Config.Interval = time.Minute // should only trigger once for the initial run - job1 := insertJob(ctx, bundle.tx, insertJobParams{State: dbsqlc.JobStateRunning, Attempt: 5, AttemptedAt: ptrutil.Ptr(bundle.rescueHorizon.Add(-1 * time.Hour))}) + job1 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateRunning, Attempt: 5, AttemptedAt: ptrutil.Ptr(bundle.rescueHorizon.Add(-1 * time.Hour))}) require.NoError(t, rescuer.Start(ctx)) @@ -306,18 +304,18 @@ func TestRescuer(t *testing.T) { rescuer.Stop() - job2 := insertJob(ctx, bundle.tx, insertJobParams{State: dbsqlc.JobStateRunning, Attempt: 5, AttemptedAt: ptrutil.Ptr(bundle.rescueHorizon.Add(-1 * time.Minute))}) + job2 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateRunning, Attempt: 5, AttemptedAt: ptrutil.Ptr(bundle.rescueHorizon.Add(-1 * time.Minute))}) require.NoError(t, rescuer.Start(ctx)) rescuer.TestSignals.FetchedBatch.WaitOrTimeout() rescuer.TestSignals.UpdatedBatch.WaitOrTimeout() - job1After, err := queries.JobGetByID(ctx, bundle.tx, job1.ID) + job1After, err := bundle.exec.JobGetByID(ctx, job1.ID) require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateDiscarded, job1After.State) - job2After, err := queries.JobGetByID(ctx, bundle.tx, job2.ID) + require.Equal(t, rivertype.JobStateDiscarded, job1After.State) + job2After, err := bundle.exec.JobGetByID(ctx, job2.ID) require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateDiscarded, job2After.State) + require.Equal(t, rivertype.JobStateDiscarded, job2After.State) }) } diff --git a/internal/maintenance/scheduler.go b/internal/maintenance/scheduler.go index 82e01892..3a4bd1d5 100644 --- a/internal/maintenance/scheduler.go +++ b/internal/maintenance/scheduler.go @@ -8,13 +8,12 @@ import ( "time" "github.com/riverqueue/river/internal/baseservice" - "github.com/riverqueue/river/internal/dbsqlc" "github.com/riverqueue/river/internal/maintenance/startstop" "github.com/riverqueue/river/internal/notifier" "github.com/riverqueue/river/internal/rivercommon" - "github.com/riverqueue/river/internal/util/dbutil" "github.com/riverqueue/river/internal/util/timeutil" "github.com/riverqueue/river/internal/util/valutil" + "github.com/riverqueue/river/riverdriver" ) const ( @@ -62,25 +61,17 @@ type Scheduler struct { // exported for test purposes TestSignals SchedulerTestSignals - config *SchedulerConfig - dbExecutor dbutil.Executor - queries *dbsqlc.Queries + config *SchedulerConfig + exec riverdriver.Executor } -func NewScheduler(archetype *baseservice.Archetype, config *SchedulerConfig, executor dbutil.Executor) *Scheduler { +func NewScheduler(archetype *baseservice.Archetype, config *SchedulerConfig, exec riverdriver.Executor) *Scheduler { return baseservice.Init(archetype, &Scheduler{ config: (&SchedulerConfig{ Interval: valutil.ValOrDefault(config.Interval, SchedulerIntervalDefault), Limit: valutil.ValOrDefault(config.Limit, SchedulerLimitDefault), }).mustValidate(), - - // TODO(bgentry): should Adapter be moved to a shared internal package - // (rivercommon) so that it can be accessed from here, instead of needing - // the Pool + Queries separately? The intention was to allocate the Adapter - // once and use it everywhere. This is particularly important for Pro stuff - // where we won't have direct access to its internal queries package. - dbExecutor: executor, - queries: dbsqlc.New(), + exec: exec, }) } @@ -118,7 +109,7 @@ func (s *Scheduler) Start(ctx context.Context) error { //nolint:dupl continue } s.Logger.InfoContext(ctx, s.Name+logPrefixRanSuccessfully, - slog.Int64("num_jobs_scheduled", res.NumCompletedJobsScheduled), + slog.Int("num_jobs_scheduled", res.NumCompletedJobsScheduled), ) } }() @@ -127,7 +118,7 @@ func (s *Scheduler) Start(ctx context.Context) error { //nolint:dupl } type schedulerRunOnceResult struct { - NumCompletedJobsScheduled int64 + NumCompletedJobsScheduled int } func (s *Scheduler) runOnce(ctx context.Context) (*schedulerRunOnceResult, error) { @@ -135,17 +126,17 @@ func (s *Scheduler) runOnce(ctx context.Context) (*schedulerRunOnceResult, error for { // Wrapped in a function so that defers run as expected. - numScheduled, err := func() (int64, error) { + numScheduled, err := func() (int, error) { ctx, cancelFunc := context.WithTimeout(ctx, 30*time.Second) defer cancelFunc() - numScheduled, err := s.queries.JobSchedule(ctx, s.dbExecutor, dbsqlc.JobScheduleParams{ + numScheduled, err := s.exec.JobSchedule(ctx, &riverdriver.JobScheduleParams{ InsertTopic: string(notifier.NotificationTopicInsert), - Max: int64(s.config.Limit), + Max: s.config.Limit, Now: s.TimeNowUTC(), }) if err != nil { - return 0, fmt.Errorf("error deleting completed jobs: %w", err) + return 0, fmt.Errorf("error scheduling jobs: %w", err) } return numScheduled, nil @@ -158,12 +149,12 @@ func (s *Scheduler) runOnce(ctx context.Context) (*schedulerRunOnceResult, error res.NumCompletedJobsScheduled += numScheduled // Scheduled was less than query `LIMIT` which means work is done. - if int(numScheduled) < s.config.Limit { + if numScheduled < s.config.Limit { break } s.Logger.InfoContext(ctx, s.Name+": Scheduled batch of jobs", - slog.Int64("num_completed_jobs_scheduled", numScheduled), + slog.Int("num_completed_jobs_scheduled", numScheduled), ) s.CancellableSleepRandomBetween(ctx, BatchBackoffMin, BatchBackoffMax) diff --git a/internal/maintenance/scheduler_test.go b/internal/maintenance/scheduler_test.go index 40057e33..c8307067 100644 --- a/internal/maintenance/scheduler_test.go +++ b/internal/maintenance/scheduler_test.go @@ -10,51 +10,49 @@ import ( "github.com/stretchr/testify/require" "github.com/riverqueue/river/internal/componentstatus" - "github.com/riverqueue/river/internal/dbsqlc" "github.com/riverqueue/river/internal/notifier" "github.com/riverqueue/river/internal/rivercommon" "github.com/riverqueue/river/internal/riverinternaltest" - "github.com/riverqueue/river/internal/util/dbutil" "github.com/riverqueue/river/internal/util/ptrutil" "github.com/riverqueue/river/internal/util/valutil" + "github.com/riverqueue/river/riverdriver" + "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivertype" ) func TestScheduler(t *testing.T) { t.Parallel() - var ( - ctx = context.Background() - queries = dbsqlc.New() - ) + ctx := context.Background() type testBundle struct { - ex dbutil.Executor + exec riverdriver.Executor } type insertJobParams struct { Queue string ScheduledAt *time.Time - State dbsqlc.JobState + State rivertype.JobState } - insertJob := func(ctx context.Context, dbtx dbsqlc.DBTX, params insertJobParams) *dbsqlc.RiverJob { - job, err := queries.JobInsert(ctx, dbtx, dbsqlc.JobInsertParams{ + insertJob := func(ctx context.Context, exec riverdriver.Executor, params insertJobParams) *rivertype.JobRow { + jobRow, err := exec.JobInsert(ctx, &riverdriver.JobInsertParams{ Kind: "test_kind", - MaxAttempts: int16(rivercommon.MaxAttemptsDefault), - Priority: int16(rivercommon.PriorityDefault), + MaxAttempts: rivercommon.MaxAttemptsDefault, + Priority: rivercommon.PriorityDefault, Queue: valutil.ValOrDefault(params.Queue, "default"), ScheduledAt: params.ScheduledAt, State: params.State, }) require.NoError(t, err) - return job + return jobRow } - setup := func(t *testing.T, ex dbutil.Executor) (*Scheduler, *testBundle) { + setup := func(t *testing.T, ex riverdriver.Executor) (*Scheduler, *testBundle) { t.Helper() bundle := &testBundle{ - ex: ex, + exec: ex, } scheduler := NewScheduler( @@ -63,7 +61,7 @@ func TestScheduler(t *testing.T) { Interval: SchedulerIntervalDefault, Limit: 10, }, - bundle.ex) + bundle.exec) scheduler.TestSignals.Init() t.Cleanup(scheduler.Stop) @@ -72,21 +70,22 @@ func TestScheduler(t *testing.T) { setupTx := func(t *testing.T) (*Scheduler, *testBundle) { t.Helper() - return setup(t, riverinternaltest.TestTx(ctx, t)) + tx := riverinternaltest.TestTx(ctx, t) + return setup(t, riverpgxv5.New(nil).UnwrapExecutor(tx)) } - requireJobStateUnchanged := func(t *testing.T, ex dbutil.Executor, job *dbsqlc.RiverJob) *dbsqlc.RiverJob { + requireJobStateUnchanged := func(t *testing.T, exec riverdriver.Executor, job *rivertype.JobRow) *rivertype.JobRow { t.Helper() - newJob, err := queries.JobGetByID(ctx, ex, job.ID) + newJob, err := exec.JobGetByID(ctx, job.ID) require.NoError(t, err) require.Equal(t, job.State, newJob.State) return newJob } - requireJobStateAvailable := func(t *testing.T, ex dbutil.Executor, job *dbsqlc.RiverJob) *dbsqlc.RiverJob { + requireJobStateAvailable := func(t *testing.T, exec riverdriver.Executor, job *rivertype.JobRow) *rivertype.JobRow { t.Helper() - newJob, err := queries.JobGetByID(ctx, ex, job.ID) + newJob, err := exec.JobGetByID(ctx, job.ID) require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateAvailable, newJob.State) + require.Equal(t, rivertype.JobStateAvailable, newJob.State) return newJob } @@ -115,39 +114,39 @@ func TestScheduler(t *testing.T) { scheduler, bundle := setupTx(t) // none of these should get updated - job1 := insertJob(ctx, bundle.ex, insertJobParams{State: dbsqlc.JobStateCompleted}) - job2 := insertJob(ctx, bundle.ex, insertJobParams{State: dbsqlc.JobStateRunning}) - job3 := insertJob(ctx, bundle.ex, insertJobParams{State: dbsqlc.JobStateCancelled}) - job4 := insertJob(ctx, bundle.ex, insertJobParams{State: dbsqlc.JobStateDiscarded}) - job5 := insertJob(ctx, bundle.ex, insertJobParams{State: dbsqlc.JobStateAvailable}) + job1 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateCompleted}) + job2 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateRunning}) + job3 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateCancelled}) + job4 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateDiscarded}) + job5 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateAvailable}) now := time.Now().UTC() - scheduledJob1 := insertJob(ctx, bundle.ex, insertJobParams{State: dbsqlc.JobStateScheduled, ScheduledAt: ptrutil.Ptr(now.Add(-1 * time.Hour))}) - scheduledJob2 := insertJob(ctx, bundle.ex, insertJobParams{State: dbsqlc.JobStateScheduled, ScheduledAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) - scheduledJob3 := insertJob(ctx, bundle.ex, insertJobParams{State: dbsqlc.JobStateScheduled, ScheduledAt: ptrutil.Ptr(now.Add(30 * time.Second))}) // won't be scheduled + scheduledJob1 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateScheduled, ScheduledAt: ptrutil.Ptr(now.Add(-1 * time.Hour))}) + scheduledJob2 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateScheduled, ScheduledAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) + scheduledJob3 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateScheduled, ScheduledAt: ptrutil.Ptr(now.Add(30 * time.Second))}) // won't be scheduled - retryableJob1 := insertJob(ctx, bundle.ex, insertJobParams{State: dbsqlc.JobStateRetryable, ScheduledAt: ptrutil.Ptr(now.Add(-1 * time.Hour))}) - retryableJob2 := insertJob(ctx, bundle.ex, insertJobParams{State: dbsqlc.JobStateRetryable, ScheduledAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) - retryableJob3 := insertJob(ctx, bundle.ex, insertJobParams{State: dbsqlc.JobStateRetryable, ScheduledAt: ptrutil.Ptr(now.Add(30 * time.Second))}) // won't be scheduled + retryableJob1 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateRetryable, ScheduledAt: ptrutil.Ptr(now.Add(-1 * time.Hour))}) + retryableJob2 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateRetryable, ScheduledAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) + retryableJob3 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateRetryable, ScheduledAt: ptrutil.Ptr(now.Add(30 * time.Second))}) // won't be scheduled require.NoError(t, scheduler.Start(ctx)) scheduler.TestSignals.ScheduledBatch.WaitOrTimeout() - requireJobStateUnchanged(t, bundle.ex, job1) - requireJobStateUnchanged(t, bundle.ex, job2) - requireJobStateUnchanged(t, bundle.ex, job3) - requireJobStateUnchanged(t, bundle.ex, job4) - requireJobStateUnchanged(t, bundle.ex, job5) + requireJobStateUnchanged(t, bundle.exec, job1) + requireJobStateUnchanged(t, bundle.exec, job2) + requireJobStateUnchanged(t, bundle.exec, job3) + requireJobStateUnchanged(t, bundle.exec, job4) + requireJobStateUnchanged(t, bundle.exec, job5) - requireJobStateAvailable(t, bundle.ex, scheduledJob1) - requireJobStateAvailable(t, bundle.ex, scheduledJob2) - requireJobStateUnchanged(t, bundle.ex, scheduledJob3) // still scheduled + requireJobStateAvailable(t, bundle.exec, scheduledJob1) + requireJobStateAvailable(t, bundle.exec, scheduledJob2) + requireJobStateUnchanged(t, bundle.exec, scheduledJob3) // still scheduled - requireJobStateAvailable(t, bundle.ex, retryableJob1) - requireJobStateAvailable(t, bundle.ex, retryableJob2) - requireJobStateUnchanged(t, bundle.ex, retryableJob3) // still retryable + requireJobStateAvailable(t, bundle.exec, retryableJob1) + requireJobStateAvailable(t, bundle.exec, retryableJob2) + requireJobStateUnchanged(t, bundle.exec, retryableJob3) // still retryable }) t.Run("SchedulesInBatches", func(t *testing.T) { @@ -162,14 +161,14 @@ func TestScheduler(t *testing.T) { // one extra batch, ensuring that we've tested working multiple. numJobs := scheduler.config.Limit + 1 - jobs := make([]*dbsqlc.RiverJob, numJobs) + jobs := make([]*rivertype.JobRow, numJobs) for i := 0; i < numJobs; i++ { - jobState := dbsqlc.JobStateScheduled + jobState := rivertype.JobStateScheduled if i%2 == 0 { - jobState = dbsqlc.JobStateRetryable + jobState = rivertype.JobStateRetryable } - job := insertJob(ctx, bundle.ex, insertJobParams{State: jobState, ScheduledAt: ptrutil.Ptr(now.Add(-1 * time.Hour))}) + job := insertJob(ctx, bundle.exec, insertJobParams{State: jobState, ScheduledAt: ptrutil.Ptr(now.Add(-1 * time.Hour))}) jobs[i] = job } @@ -180,7 +179,7 @@ func TestScheduler(t *testing.T) { scheduler.TestSignals.ScheduledBatch.WaitOrTimeout() for _, job := range jobs { - requireJobStateAvailable(t, bundle.ex, job) + requireJobStateAvailable(t, bundle.exec, job) } }) @@ -235,7 +234,7 @@ func TestScheduler(t *testing.T) { scheduler.config.Interval = time.Minute // should only trigger once for the initial run now := time.Now().UTC() - job1 := insertJob(ctx, bundle.ex, insertJobParams{State: dbsqlc.JobStateScheduled, ScheduledAt: ptrutil.Ptr(now.Add(-1 * time.Hour))}) + job1 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateScheduled, ScheduledAt: ptrutil.Ptr(now.Add(-1 * time.Hour))}) require.NoError(t, scheduler.Start(ctx)) @@ -243,21 +242,25 @@ func TestScheduler(t *testing.T) { scheduler.Stop() - job2 := insertJob(ctx, bundle.ex, insertJobParams{State: dbsqlc.JobStateRetryable, ScheduledAt: ptrutil.Ptr(now.Add(-1 * time.Minute))}) + job2 := insertJob(ctx, bundle.exec, insertJobParams{State: rivertype.JobStateRetryable, ScheduledAt: ptrutil.Ptr(now.Add(-1 * time.Minute))}) require.NoError(t, scheduler.Start(ctx)) scheduler.TestSignals.ScheduledBatch.WaitOrTimeout() - requireJobStateAvailable(t, bundle.ex, job1) - requireJobStateAvailable(t, bundle.ex, job2) + requireJobStateAvailable(t, bundle.exec, job1) + requireJobStateAvailable(t, bundle.exec, job2) }) t.Run("TriggersNotificationsOnEachQueueWithNewlyAvailableJobs", func(t *testing.T) { t.Parallel() dbPool := riverinternaltest.TestDB(ctx, t) - scheduler, _ := setup(t, dbPool) + driver := riverpgxv5.New(dbPool) + exec := driver.GetExecutor() + listener := driver.GetListener() + + scheduler, _ := setup(t, exec) scheduler.config.Interval = time.Minute // should only trigger once for the initial run now := time.Now().UTC() @@ -265,7 +268,7 @@ func TestScheduler(t *testing.T) { statusUpdate := func(status componentstatus.Status) { statusUpdateCh <- status } - notify := notifier.New(&scheduler.Archetype, dbPool.Config().ConnConfig, statusUpdate, riverinternaltest.Logger(t)) + notify := notifier.New(&scheduler.Archetype, listener, statusUpdate, riverinternaltest.Logger(t)) // Scope in so we can reuse ctx without the cancel embedded. { @@ -297,19 +300,19 @@ func TestScheduler(t *testing.T) { } } - addJob := func(queue string, fromNow time.Duration, state dbsqlc.JobState) { + addJob := func(queue string, fromNow time.Duration, state rivertype.JobState) { t.Helper() - insertJob(ctx, dbPool, insertJobParams{Queue: queue, State: state, ScheduledAt: ptrutil.Ptr(now.Add(fromNow))}) + insertJob(ctx, exec, insertJobParams{Queue: queue, State: state, ScheduledAt: ptrutil.Ptr(now.Add(fromNow))}) } - addJob("queue1", -1*time.Hour, dbsqlc.JobStateScheduled) - addJob("queue2", -1*time.Minute, dbsqlc.JobStateScheduled) - addJob("queue3", -30*time.Second, dbsqlc.JobStateRetryable) + addJob("queue1", -1*time.Hour, rivertype.JobStateScheduled) + addJob("queue2", -1*time.Minute, rivertype.JobStateScheduled) + addJob("queue3", -30*time.Second, rivertype.JobStateRetryable) // these shouldn't cause notifications: - addJob("queue2", -5*time.Second, dbsqlc.JobStateScheduled) // it's a duplicate - addJob("future_queue", time.Minute, dbsqlc.JobStateScheduled) // it's in the future - addJob("other_status_queue", time.Minute, dbsqlc.JobStateCancelled) // it's cancelled + addJob("queue2", -5*time.Second, rivertype.JobStateScheduled) // it's a duplicate + addJob("future_queue", time.Minute, rivertype.JobStateScheduled) // it's in the future + addJob("other_status_queue", time.Minute, rivertype.JobStateCancelled) // it's cancelled // Run the scheduler and wait for it to execute once: require.NoError(t, scheduler.Start(ctx)) diff --git a/internal/notifier/notifier.go b/internal/notifier/notifier.go index a0b9802e..ea0d8e47 100644 --- a/internal/notifier/notifier.go +++ b/internal/notifier/notifier.go @@ -3,22 +3,16 @@ package notifier import ( "context" "errors" - "fmt" "log/slog" - "strconv" "sync" "time" - "github.com/jackc/pgx/v5" - "github.com/jackc/pgx/v5/pgconn" - "github.com/riverqueue/river/internal/baseservice" "github.com/riverqueue/river/internal/componentstatus" "github.com/riverqueue/river/internal/rivercommon" + "github.com/riverqueue/river/riverdriver" ) -const statementTimeout = 5 * time.Second - type NotificationTopic string const ( @@ -52,8 +46,8 @@ type subscriptionChange struct { type Notifier struct { baseservice.BaseService - connConfig *pgx.ConnConfig - notificationBuf chan *pgconn.Notification + listener riverdriver.Listener + notificationBuf chan *riverdriver.Notification statusChangeFunc func(componentstatus.Status) logger *slog.Logger @@ -63,20 +57,16 @@ type Notifier struct { subChangeCh chan *subscriptionChange } -func New(archetype *baseservice.Archetype, connConfig *pgx.ConnConfig, statusChangeFunc func(componentstatus.Status), logger *slog.Logger) *Notifier { - copiedConfig := connConfig.Copy() - // Rely on an overall statement timeout instead of setting identical context timeouts on every query: - copiedConfig.RuntimeParams["statement_timeout"] = strconv.Itoa(int(statementTimeout.Milliseconds())) +func New(archetype *baseservice.Archetype, listener riverdriver.Listener, statusChangeFunc func(componentstatus.Status), logger *slog.Logger) *Notifier { notifier := baseservice.Init(archetype, &Notifier{ - connConfig: copiedConfig, - notificationBuf: make(chan *pgconn.Notification, 1000), + listener: listener, + notificationBuf: make(chan *riverdriver.Notification, 1000), statusChangeFunc: statusChangeFunc, logger: logger.WithGroup("notifier"), subs: make(map[NotificationTopic][]*Subscription), subChangeCh: make(chan *subscriptionChange, 1000), }) - copiedConfig.OnNotification = notifier.handleNotification return notifier } @@ -115,18 +105,18 @@ func (n *Notifier) deliverNotifications(ctx context.Context) { } } -func (n *Notifier) deliverNotification(notif *pgconn.Notification) { +func (n *Notifier) deliverNotification(notif *riverdriver.Notification) { n.mu.Lock() - fns := make([]NotifyFunc, len(n.subs[NotificationTopic(notif.Channel)])) - for i, sub := range n.subs[NotificationTopic(notif.Channel)] { + fns := make([]NotifyFunc, len(n.subs[NotificationTopic(notif.Topic)])) + for i, sub := range n.subs[NotificationTopic(notif.Topic)] { fns[i] = sub.notifyFunc } n.mu.Unlock() for _, fn := range fns { // TODO: panic recovery on delivery attempts - fn(NotificationTopic(notif.Channel), notif.Payload) + fn(NotificationTopic(notif.Topic), notif.Payload) } } @@ -134,8 +124,7 @@ func (n *Notifier) getConnAndRun(ctx context.Context) { ctx, cancel := context.WithCancel(ctx) defer cancel() - conn, err := n.establishConn(ctx) - if err != nil { + if err := n.listener.Connect(ctx); err != nil { if !errors.Is(context.Cause(ctx), rivercommon.ErrShutdown) { n.logger.Error("error establishing connection from pool", "err", err) } @@ -145,7 +134,10 @@ func (n *Notifier) getConnAndRun(ctx context.Context) { // use an already-canceled context here so conn.Close() does not block the run loop ctx, cancel := context.WithDeadline(ctx, time.Now()) defer cancel() - conn.Close(ctx) + + if err := n.listener.Close(ctx); err != nil { + n.logger.Error("error closing listener", "err", err) + } }() startingTopics := n.setConnActive() @@ -155,7 +147,7 @@ func (n *Notifier) getConnAndRun(ctx context.Context) { // active = false) just to ensure nobody is blocking on sending to it for _, topic := range startingTopics { - if err := n.execListen(ctx, conn, topic); err != nil { + if err := n.listener.Listen(ctx, string(topic)); err != nil { // TODO: log? return } @@ -171,7 +163,7 @@ func (n *Notifier) getConnAndRun(ctx context.Context) { default: } - err := n.runOnce(ctx, conn) + err := n.runOnce(ctx) if err != nil { if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { n.statusChangeFunc(componentstatus.ShuttingDown) @@ -183,15 +175,26 @@ func (n *Notifier) getConnAndRun(ctx context.Context) { } } -func (n *Notifier) runOnce(ctx context.Context, conn *pgx.Conn) error { +func (n *Notifier) runOnce(ctx context.Context) error { ctx, cancel := context.WithCancel(ctx) defer cancel() errCh := make(chan error) go func() { - err := conn.PgConn().WaitForNotification(ctx) - errCh <- err + for { + notif, err := n.listener.WaitForNotification(ctx) + if err != nil { + errCh <- err + return + } + + select { + case n.notificationBuf <- notif: + default: + n.logger.Warn("dropping notification due to full buffer", "payload", notif.Payload) + } + } }() drainErrCh := func() error { @@ -225,7 +228,7 @@ func (n *Notifier) runOnce(ctx context.Context, conn *pgx.Conn) error { return err } // Ping the conn to see if it's still alive - if err := conn.Ping(ctx); err != nil { + if err := n.listener.Ping(ctx); err != nil { return err } case err := <-errCh: @@ -242,28 +245,14 @@ func (n *Notifier) runOnce(ctx context.Context, conn *pgx.Conn) error { } // Apply the subscription change if subChange.isNewTopic { - return n.execListen(ctx, conn, subChange.topic) + return n.listener.Listen(ctx, string(subChange.topic)) } else { - return n.execUnlisten(ctx, conn, subChange.topic) + return n.listener.Unlisten(ctx, string(subChange.topic)) } } return nil } -func (n *Notifier) execListen(ctx context.Context, conn *pgx.Conn, topic NotificationTopic) error { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - _, err := conn.Exec(ctx, fmt.Sprintf("LISTEN %s", topic)) - return err -} - -func (n *Notifier) execUnlisten(ctx context.Context, conn *pgx.Conn, topic NotificationTopic) error { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - _, err := conn.Exec(ctx, fmt.Sprintf("UNLISTEN %s", topic)) - return err -} - func (n *Notifier) setConnActive() []NotificationTopic { n.mu.Lock() defer n.mu.Unlock() @@ -292,21 +281,6 @@ func (n *Notifier) setConnInactive() { } } -func (n *Notifier) establishConn(ctx context.Context) (*pgx.Conn, error) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - return pgx.ConnectConfig(ctx, n.connConfig) -} - -func (n *Notifier) handleNotification(conn *pgconn.PgConn, notification *pgconn.Notification) { - select { - case n.notificationBuf <- notification: - default: - n.logger.Warn("dropping notification due to full buffer", "payload", notification.Payload) - } -} - func (n *Notifier) Listen(topic NotificationTopic, notifyFunc NotifyFunc) *Subscription { n.mu.Lock() defer n.mu.Unlock() diff --git a/internal/notifier/notifier_test.go b/internal/notifier/notifier_test.go index ddd1dfc0..308ba052 100644 --- a/internal/notifier/notifier_test.go +++ b/internal/notifier/notifier_test.go @@ -5,11 +5,12 @@ import ( "testing" "time" + "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/require" "github.com/riverqueue/river/internal/componentstatus" - "github.com/riverqueue/river/internal/dbsqlc" "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdriver/riverpgxv5" ) func expectReceiveStatus(t *testing.T, statusCh <-chan componentstatus.Status, expected componentstatus.Status) { @@ -27,14 +28,15 @@ func TestNotifierReceivesNotification(t *testing.T) { ctx := context.Background() require := require.New(t) - db := riverinternaltest.TestDB(ctx, t) + dbPool := riverinternaltest.TestDB(ctx, t) + listener := riverpgxv5.New(dbPool).GetListener() statusUpdateCh := make(chan componentstatus.Status, 10) statusUpdate := func(status componentstatus.Status) { statusUpdateCh <- status } - notifier := New(riverinternaltest.BaseServiceArchetype(t), db.Config().ConnConfig, statusUpdate, riverinternaltest.Logger(t)) + notifier := New(riverinternaltest.BaseServiceArchetype(t), listener, statusUpdate, riverinternaltest.Logger(t)) ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() @@ -55,7 +57,7 @@ func TestNotifierReceivesNotification(t *testing.T) { expectReceiveStatus(t, statusUpdateCh, componentstatus.Initializing) expectReceiveStatus(t, statusUpdateCh, componentstatus.Healthy) - sendNotification(t, db, string(NotificationTopicInsert), "a_queue_name") + sendNotification(t, dbPool, string(NotificationTopicInsert), "a_queue_name") select { case payload := <-sub1Ch: @@ -73,7 +75,7 @@ func TestNotifierReceivesNotification(t *testing.T) { sub2 := notifier.Listen(NotificationTopicInsert, fn2) defer sub2.Unlisten() - sendNotification(t, db, string(NotificationTopicInsert), "a_queue_name_b") + sendNotification(t, dbPool, string(NotificationTopicInsert), "a_queue_name_b") receivedOn1 := false receivedOn2 := false @@ -100,7 +102,7 @@ Loop: // remove a subscription: sub1.Unlisten() - sendNotification(t, db, string(NotificationTopicInsert), "a_queue_name_b") + sendNotification(t, dbPool, string(NotificationTopicInsert), "a_queue_name_b") select { case payload := <-sub2Ch: @@ -115,16 +117,18 @@ Loop: case <-time.After(20 * time.Millisecond): } + t.Log("Canceling context") cancel() + expectReceiveStatus(t, statusUpdateCh, componentstatus.ShuttingDown) expectReceiveStatus(t, statusUpdateCh, componentstatus.Stopped) } -func sendNotification(t *testing.T, db dbsqlc.DBTX, topic string, payload string) { +func sendNotification(t *testing.T, dbPool *pgxpool.Pool, topic string, payload string) { t.Helper() ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - _, err := db.Exec(ctx, "SELECT pg_notify($1, $2)", topic, payload) + _, err := dbPool.Exec(ctx, "SELECT pg_notify($1, $2)", topic, payload) require.NoError(t, err) } diff --git a/internal/rivercommon/river_common.go b/internal/rivercommon/river_common.go index 60914999..554efb2d 100644 --- a/internal/rivercommon/river_common.go +++ b/internal/rivercommon/river_common.go @@ -1,6 +1,8 @@ package rivercommon -import "errors" +import ( + "errors" +) // These constants are made available in rivercommon so that they're accessible // by internal packages, but the top-level river package re-exports them, and diff --git a/internal/riverinternaltest/riverdrivertest/riverdrivertest.go b/internal/riverinternaltest/riverdrivertest/riverdrivertest.go new file mode 100644 index 00000000..78afd42a --- /dev/null +++ b/internal/riverinternaltest/riverdrivertest/riverdrivertest.go @@ -0,0 +1,1118 @@ +package riverdrivertest + +import ( + "context" + "encoding/json" + "fmt" + "slices" + "sort" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/riverqueue/river/internal/notifier" + "github.com/riverqueue/river/internal/rivercommon" + "github.com/riverqueue/river/internal/riverinternaltest/testfactory" //nolint:depguard + "github.com/riverqueue/river/internal/util/ptrutil" + "github.com/riverqueue/river/riverdriver" + "github.com/riverqueue/river/rivertype" +) + +type testBundle struct{} + +func setupExecutor[TTx any](ctx context.Context, t *testing.T, driver riverdriver.Driver[TTx], beginTx func(ctx context.Context, t *testing.T) TTx) (riverdriver.Executor, *testBundle) { + t.Helper() + + tx := beginTx(ctx, t) + return driver.UnwrapExecutor(tx), &testBundle{} +} + +// ExerciseExecutorFull exercises a driver that's expected to provide full +// functionality. +func ExerciseExecutorFull[TTx any](ctx context.Context, t *testing.T, driver riverdriver.Driver[TTx], beginTx func(ctx context.Context, t *testing.T) TTx) { + t.Helper() + + const clientID = "test-client-id" + + makeInsertParams := func() *riverdriver.JobInsertParams { + return &riverdriver.JobInsertParams{ + EncodedArgs: []byte(`{}`), + Kind: "fake_job", + MaxAttempts: rivercommon.MaxAttemptsDefault, + Metadata: []byte(`{}`), + Priority: rivercommon.PriorityDefault, + Queue: rivercommon.QueueDefault, + ScheduledAt: nil, + State: rivertype.JobStateAvailable, + } + } + + // Expect no pool. We'll be using transactions only throughout these tests. + require.False(t, driver.HasPool()) + + // Encompasses all minimal functionality. + ExerciseExecutorMigrationOnly[TTx](ctx, t, driver, beginTx) + + t.Run("Begin", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + execTx, err := exec.Begin(ctx) + require.NoError(t, err) + t.Cleanup(func() { _ = execTx.Rollback(ctx) }) + + // Job visible in subtransaction, but not parent. + job := testfactory.Job(ctx, t, execTx, &testfactory.JobOpts{}) + + _, err = execTx.JobGetByID(ctx, job.ID) + require.NoError(t, err) + + require.NoError(t, execTx.Rollback(ctx)) + + _, err = exec.JobGetByID(ctx, job.ID) + require.ErrorIs(t, err, rivertype.ErrNotFound) + }) + + t.Run("JobCancel", func(t *testing.T) { + t.Parallel() + + for _, startingState := range []rivertype.JobState{ + rivertype.JobStateAvailable, + rivertype.JobStateRetryable, + rivertype.JobStateScheduled, + } { + startingState := startingState + + t.Run(fmt.Sprintf("CancelsJobIn%sState", startingState), func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + now := time.Now().UTC() + nowStr := now.Format(time.RFC3339Nano) + + job := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{ + State: &startingState, + }) + require.Equal(t, startingState, job.State) + + jobAfter, err := exec.JobCancel(ctx, &riverdriver.JobCancelParams{ + ID: job.ID, + CancelAttemptedAt: now, + JobControlTopic: string(notifier.NotificationTopicJobControl), + }) + require.NoError(t, err) + require.NotNil(t, jobAfter) + + require.Equal(t, rivertype.JobStateCancelled, jobAfter.State) + require.WithinDuration(t, time.Now(), *jobAfter.FinalizedAt, 2*time.Second) + require.JSONEq(t, fmt.Sprintf(`{"cancel_attempted_at":%q}`, nowStr), string(jobAfter.Metadata)) + }) + } + + t.Run("RunningJobIsNotImmediatelyCancelled", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + now := time.Now().UTC() + nowStr := now.Format(time.RFC3339Nano) + + job := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{ + State: ptrutil.Ptr(rivertype.JobStateRunning), + }) + require.Equal(t, rivertype.JobStateRunning, job.State) + + jobAfter, err := exec.JobCancel(ctx, &riverdriver.JobCancelParams{ + ID: job.ID, + CancelAttemptedAt: now, + JobControlTopic: string(notifier.NotificationTopicJobControl), + }) + require.NoError(t, err) + require.NotNil(t, jobAfter) + require.Equal(t, rivertype.JobStateRunning, jobAfter.State) + require.Nil(t, jobAfter.FinalizedAt) + require.JSONEq(t, fmt.Sprintf(`{"cancel_attempted_at":%q}`, nowStr), string(jobAfter.Metadata)) + }) + + for _, startingState := range []rivertype.JobState{ + rivertype.JobStateCancelled, + rivertype.JobStateCompleted, + rivertype.JobStateDiscarded, + } { + startingState := startingState + + t.Run(fmt.Sprintf("DoesNotAlterFinalizedJobIn%sState", startingState), func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + job := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{ + FinalizedAt: ptrutil.Ptr(time.Now()), + State: &startingState, + }) + + jobAfter, err := exec.JobCancel(ctx, &riverdriver.JobCancelParams{ + ID: job.ID, + CancelAttemptedAt: time.Now(), + JobControlTopic: string(notifier.NotificationTopicJobControl), + }) + require.NoError(t, err) + require.Equal(t, startingState, jobAfter.State) + require.WithinDuration(t, *job.FinalizedAt, *jobAfter.FinalizedAt, time.Microsecond) + require.JSONEq(t, `{}`, string(jobAfter.Metadata)) + }) + } + + t.Run("ReturnsErrNotFoundIfJobDoesNotExist", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + jobAfter, err := exec.JobCancel(ctx, &riverdriver.JobCancelParams{ + ID: 1234567890, + CancelAttemptedAt: time.Now(), + JobControlTopic: string(notifier.NotificationTopicJobControl), + }) + require.ErrorIs(t, err, rivertype.ErrNotFound) + require.Nil(t, jobAfter) + }) + }) + + t.Run("JobDeleteBefore", func(t *testing.T) { + t.Parallel() + }) + + t.Run("JobGetAvailable", func(t *testing.T) { + t.Parallel() + + t.Run("Success", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + _ = testfactory.Job(ctx, t, exec, &testfactory.JobOpts{}) + + jobRows, err := exec.JobGetAvailable(ctx, &riverdriver.JobGetAvailableParams{ + AttemptedBy: clientID, + Max: 100, + Queue: rivercommon.QueueDefault, + }) + require.NoError(t, err) + require.Len(t, jobRows, 1) + + jobRow := jobRows[0] + require.Equal(t, []string{clientID}, jobRow.AttemptedBy) + }) + + t.Run("ConstrainedToLimit", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + _ = testfactory.Job(ctx, t, exec, &testfactory.JobOpts{}) + _ = testfactory.Job(ctx, t, exec, &testfactory.JobOpts{}) + + // Two rows inserted but only one found because of the added limit. + jobRows, err := exec.JobGetAvailable(ctx, &riverdriver.JobGetAvailableParams{ + AttemptedBy: clientID, + Max: 1, + Queue: rivercommon.QueueDefault, + }) + require.NoError(t, err) + require.Len(t, jobRows, 1) + }) + + t.Run("ConstrainedToQueue", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + _ = testfactory.Job(ctx, t, exec, &testfactory.JobOpts{ + Queue: ptrutil.Ptr("other-queue"), + }) + + // Job is in a non-default queue so it's not found. + jobRows, err := exec.JobGetAvailable(ctx, &riverdriver.JobGetAvailableParams{ + AttemptedBy: clientID, + Max: 100, + Queue: rivercommon.QueueDefault, + }) + require.NoError(t, err) + require.Empty(t, jobRows) + }) + + t.Run("ConstrainedToScheduledAtBeforeNow", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + _ = testfactory.Job(ctx, t, exec, &testfactory.JobOpts{ + ScheduledAt: ptrutil.Ptr(time.Now().Add(1 * time.Minute)), + }) + + // Job is scheduled a while from now so it's not found. + jobRows, err := exec.JobGetAvailable(ctx, &riverdriver.JobGetAvailableParams{ + AttemptedBy: clientID, + Max: 100, + Queue: rivercommon.QueueDefault, + }) + require.NoError(t, err) + require.Empty(t, jobRows) + }) + + t.Run("Prioritized", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + // Insert jobs with decreasing priority numbers (3, 2, 1) which means increasing priority. + for i := 3; i > 0; i-- { + _ = testfactory.Job(ctx, t, exec, &testfactory.JobOpts{ + Priority: &i, + }) + } + + jobRows, err := exec.JobGetAvailable(ctx, &riverdriver.JobGetAvailableParams{ + AttemptedBy: clientID, + Max: 2, + Queue: rivercommon.QueueDefault, + }) + require.NoError(t, err) + require.Len(t, jobRows, 2, "expected to fetch exactly 2 jobs") + + // Because the jobs are ordered within the fetch query's CTE but *not* within + // the final query, the final result list may not actually be sorted. This is + // fine, because we've already ensured that we've fetched the jobs we wanted + // to fetch via that ORDER BY. For testing we'll need to sort the list after + // fetch to easily assert that the expected jobs are in it. + sort.Slice(jobRows, func(i, j int) bool { return jobRows[i].Priority < jobRows[j].Priority }) + + require.Equal(t, 1, jobRows[0].Priority, "expected first job to have priority 1") + require.Equal(t, 2, jobRows[1].Priority, "expected second job to have priority 2") + + // Should fetch the one remaining job on the next attempt: + jobRows, err = exec.JobGetAvailable(ctx, &riverdriver.JobGetAvailableParams{ + AttemptedBy: clientID, + Max: 1, + Queue: rivercommon.QueueDefault, + }) + require.NoError(t, err) + require.NoError(t, err) + require.Len(t, jobRows, 1, "expected to fetch exactly 1 job") + require.Equal(t, 3, jobRows[0].Priority, "expected final job to have priority 3") + }) + }) + + t.Run("JobGetByID", func(t *testing.T) { + t.Parallel() + + t.Run("FetchesAnExistingJob", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + now := time.Now().UTC() + + job := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{}) + + job, err := exec.JobGetByID(ctx, job.ID) + require.NoError(t, err) + require.NotNil(t, job) + + require.Equal(t, job.ID, job.ID) + require.Equal(t, rivertype.JobStateAvailable, job.State) + require.WithinDuration(t, now, job.CreatedAt, 100*time.Millisecond) + require.WithinDuration(t, now, job.ScheduledAt, 100*time.Millisecond) + }) + + t.Run("ReturnsErrNoRowsIfJobDoesntExist", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + job, err := exec.JobGetByID(ctx, 99999) + require.Error(t, err) + require.ErrorIs(t, err, rivertype.ErrNotFound) + require.Nil(t, job) + }) + }) + + t.Run("JobGetByIDMany", func(t *testing.T) { + t.Parallel() + }) + + t.Run("JobGetByKindAndUniqueProperties", func(t *testing.T) { + t.Parallel() + }) + + t.Run("JobGetByKindMany", func(t *testing.T) { + t.Parallel() + }) + + t.Run("JobGetStuck", func(t *testing.T) { + t.Parallel() + }) + + t.Run("JobInsert", func(t *testing.T) { + t.Parallel() + }) + + t.Run("JobInsertMany", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + // This test needs to use a time from before the transaction begins, otherwise + // the newly-scheduled jobs won't yet show as available because their + // scheduled_at (which gets a default value from time.Now() in code) will be + // after the start of the transaction. + now := time.Now().UTC().Add(-1 * time.Minute) + + insertParams := make([]*riverdriver.JobInsertParams, 10) + for i := 0; i < len(insertParams); i++ { + insertParams[i] = makeInsertParams() + insertParams[i].ScheduledAt = &now + } + + count, err := exec.JobInsertMany(ctx, insertParams) + require.NoError(t, err) + require.Len(t, insertParams, int(count)) + + jobsAfter, err := exec.JobGetAvailable(ctx, &riverdriver.JobGetAvailableParams{ + AttemptedBy: clientID, + Max: len(insertParams), + Queue: rivercommon.QueueDefault, + }) + require.NoError(t, err) + require.Len(t, jobsAfter, len(insertParams)) + }) + + t.Run("JobList", func(t *testing.T) { + t.Parallel() + }) + + t.Run("JobListFields", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + require.Equal(t, "id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags", + exec.JobListFields()) + }) + + t.Run("JobRescueMany", func(t *testing.T) { + t.Parallel() + }) + + t.Run("JobRetry", func(t *testing.T) { + t.Parallel() + + t.Run("DoesNotUpdateARunningJob", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + job := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{ + State: ptrutil.Ptr(rivertype.JobStateRunning), + }) + + jobAfter, err := exec.JobRetry(ctx, job.ID) + require.NoError(t, err) + require.Equal(t, rivertype.JobStateRunning, jobAfter.State) + require.WithinDuration(t, job.ScheduledAt, jobAfter.ScheduledAt, time.Microsecond) + + jobUpdated, err := exec.JobGetByID(ctx, job.ID) + require.NoError(t, err) + require.Equal(t, rivertype.JobStateRunning, jobUpdated.State) + }) + + for _, state := range []rivertype.JobState{ + rivertype.JobStateAvailable, + rivertype.JobStateCancelled, + rivertype.JobStateCompleted, + rivertype.JobStateDiscarded, + // TODO(bgentry): add Pending to this list when it's added: + rivertype.JobStateRetryable, + rivertype.JobStateScheduled, + } { + state := state + + t.Run(fmt.Sprintf("UpdatesA_%s_JobToBeScheduledImmediately", state), func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + now := time.Now().UTC() + + setFinalized := slices.Contains([]rivertype.JobState{ + rivertype.JobStateCancelled, + rivertype.JobStateCompleted, + rivertype.JobStateDiscarded, + }, state) + + var finalizedAt *time.Time + if setFinalized { + finalizedAt = &now + } + + job := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{ + FinalizedAt: finalizedAt, + ScheduledAt: ptrutil.Ptr(now.Add(1 * time.Hour)), + State: &state, + }) + + jobAfter, err := exec.JobRetry(ctx, job.ID) + require.NoError(t, err) + require.Equal(t, rivertype.JobStateAvailable, jobAfter.State) + require.WithinDuration(t, time.Now().UTC(), jobAfter.ScheduledAt, 100*time.Millisecond) + + jobUpdated, err := exec.JobGetByID(ctx, job.ID) + require.NoError(t, err) + require.Equal(t, rivertype.JobStateAvailable, jobUpdated.State) + require.Nil(t, jobUpdated.FinalizedAt) + }) + } + + t.Run("AltersScheduledAtForAlreadyCompletedJob", func(t *testing.T) { + // A job which has already completed will have a ScheduledAt that could be + // long in the past. Now that we're re-scheduling it, we should update that + // to the current time to slot it in alongside other recently-scheduled jobs + // and not skip the line; also, its wait duration can't be calculated + // accurately if we don't reset the scheduled_at. + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + now := time.Now().UTC() + + job := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{ + FinalizedAt: &now, + ScheduledAt: ptrutil.Ptr(now.Add(-1 * time.Hour)), + State: ptrutil.Ptr(rivertype.JobStateCompleted), + }) + + jobAfter, err := exec.JobRetry(ctx, job.ID) + require.NoError(t, err) + require.Equal(t, rivertype.JobStateAvailable, jobAfter.State) + require.WithinDuration(t, now, jobAfter.ScheduledAt, 5*time.Second) + }) + + t.Run("DoesNotAlterScheduledAtIfInThePastAndJobAlreadyAvailable", func(t *testing.T) { + // We don't want to update ScheduledAt if the job was already available + // because doing so can make it lose its place in line. + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + now := time.Now().UTC() + + job := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{ + ScheduledAt: ptrutil.Ptr(now.Add(-1 * time.Hour)), + }) + + jobAfter, err := exec.JobRetry(ctx, job.ID) + require.NoError(t, err) + require.Equal(t, rivertype.JobStateAvailable, jobAfter.State) + require.WithinDuration(t, job.ScheduledAt, jobAfter.ScheduledAt, time.Microsecond) + + jobUpdated, err := exec.JobGetByID(ctx, job.ID) + require.NoError(t, err) + require.Equal(t, rivertype.JobStateAvailable, jobUpdated.State) + }) + + t.Run("ReturnsErrNoRowsIfJobNotFound", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + _, err := exec.JobRetry(ctx, 999999) + require.Error(t, err) + require.ErrorIs(t, err, rivertype.ErrNotFound) + }) + }) + + t.Run("JobSchedule", func(t *testing.T) { + t.Parallel() + }) + + t.Run("JobSetStateIfRunning_JobSetStateCompleted", func(t *testing.T) { + t.Parallel() + + t.Run("CompletesARunningJob", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + now := time.Now().UTC() + + job := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{ + State: ptrutil.Ptr(rivertype.JobStateRunning), + }) + + jobAfter, err := exec.JobSetStateIfRunning(ctx, riverdriver.JobSetStateCompleted(job.ID, now)) + require.NoError(t, err) + require.Equal(t, rivertype.JobStateCompleted, jobAfter.State) + require.WithinDuration(t, now, *jobAfter.FinalizedAt, time.Microsecond) + + jobUpdated, err := exec.JobGetByID(ctx, job.ID) + require.NoError(t, err) + require.Equal(t, rivertype.JobStateCompleted, jobUpdated.State) + }) + + t.Run("DoesNotCompleteARetryableJob", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + now := time.Now().UTC() + + job := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{ + State: ptrutil.Ptr(rivertype.JobStateRetryable), + }) + + jobAfter, err := exec.JobSetStateIfRunning(ctx, riverdriver.JobSetStateCompleted(job.ID, now)) + require.NoError(t, err) + require.Equal(t, rivertype.JobStateRetryable, jobAfter.State) + require.Nil(t, jobAfter.FinalizedAt) + + jobUpdated, err := exec.JobGetByID(ctx, job.ID) + require.NoError(t, err) + require.Equal(t, rivertype.JobStateRetryable, jobUpdated.State) + }) + }) + + t.Run("JobSetStateIfRunning_JobSetStateErrored", func(t *testing.T) { + t.Parallel() + + makeErrPayload := func(t *testing.T, now time.Time) []byte { + t.Helper() + + errPayload, err := json.Marshal(rivertype.AttemptError{ + Attempt: 1, At: now, Error: "fake error", Trace: "foo.go:123\nbar.go:456", + }) + require.NoError(t, err) + return errPayload + } + + t.Run("SetsARunningJobToRetryable", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + now := time.Now().UTC() + + job := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{ + State: ptrutil.Ptr(rivertype.JobStateRunning), + }) + + jobAfter, err := exec.JobSetStateIfRunning(ctx, riverdriver.JobSetStateErrorRetryable(job.ID, now, makeErrPayload(t, now))) + require.NoError(t, err) + require.Equal(t, rivertype.JobStateRetryable, jobAfter.State) + require.WithinDuration(t, now, jobAfter.ScheduledAt, time.Microsecond) + + jobUpdated, err := exec.JobGetByID(ctx, job.ID) + require.NoError(t, err) + require.Equal(t, rivertype.JobStateRetryable, jobUpdated.State) + + // validate error payload: + require.Len(t, jobAfter.Errors, 1) + require.Equal(t, now, jobAfter.Errors[0].At) + require.Equal(t, 1, jobAfter.Errors[0].Attempt) + require.Equal(t, "fake error", jobAfter.Errors[0].Error) + require.Equal(t, "foo.go:123\nbar.go:456", jobAfter.Errors[0].Trace) + }) + + t.Run("DoesNotTouchAlreadyRetryableJob", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + now := time.Now().UTC() + + job := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{ + State: ptrutil.Ptr(rivertype.JobStateRetryable), + ScheduledAt: ptrutil.Ptr(now.Add(10 * time.Second)), + }) + + jobAfter, err := exec.JobSetStateIfRunning(ctx, riverdriver.JobSetStateErrorRetryable(job.ID, now, makeErrPayload(t, now))) + require.NoError(t, err) + require.Equal(t, rivertype.JobStateRetryable, jobAfter.State) + require.WithinDuration(t, job.ScheduledAt, jobAfter.ScheduledAt, time.Microsecond) + + jobUpdated, err := exec.JobGetByID(ctx, job.ID) + require.NoError(t, err) + require.Equal(t, rivertype.JobStateRetryable, jobUpdated.State) + require.WithinDuration(t, job.ScheduledAt, jobAfter.ScheduledAt, time.Microsecond) + }) + + t.Run("SetsAJobWithCancelAttemptedAtToCancelled", func(t *testing.T) { + // If a job has cancel_attempted_at in its metadata, it means that the user + // tried to cancel the job with the Cancel API but that the job + // finished/errored before the producer received the cancel notification. + // + // In this case, we want to move the job to cancelled instead of retryable + // so that the job is not retried. + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + now := time.Now().UTC() + + job := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{ + Metadata: []byte(fmt.Sprintf(`{"cancel_attempted_at":"%s"}`, time.Now().UTC().Format(time.RFC3339))), + State: ptrutil.Ptr(rivertype.JobStateRunning), + ScheduledAt: ptrutil.Ptr(now.Add(-10 * time.Second)), + }) + + jobAfter, err := exec.JobSetStateIfRunning(ctx, riverdriver.JobSetStateErrorRetryable(job.ID, now, makeErrPayload(t, now))) + require.NoError(t, err) + require.Equal(t, rivertype.JobStateCancelled, jobAfter.State) + require.NotNil(t, jobAfter.FinalizedAt) + // Loose assertion against FinalizedAt just to make sure it was set (it uses + // the database's now() instead of a passed-in time): + require.WithinDuration(t, time.Now().UTC(), *jobAfter.FinalizedAt, 2*time.Second) + // ScheduledAt should not be touched: + require.WithinDuration(t, job.ScheduledAt, jobAfter.ScheduledAt, time.Microsecond) + // Errors should still be appended to: + require.Len(t, jobAfter.Errors, 1) + require.Contains(t, jobAfter.Errors[0].Error, "fake error") + + jobUpdated, err := exec.JobGetByID(ctx, job.ID) + require.NoError(t, err) + require.Equal(t, rivertype.JobStateCancelled, jobUpdated.State) + require.WithinDuration(t, job.ScheduledAt, jobAfter.ScheduledAt, time.Microsecond) + }) + }) + + t.Run("JobUpdate", func(t *testing.T) { + t.Parallel() + }) + + const ( + leaderInstanceName = "default" + leaderTTL = 10 * time.Second + ) + + t.Run("LeaderDeleteExpired", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + now := time.Now().UTC() + + { + numDeleted, err := exec.LeaderDeleteExpired(ctx, leaderInstanceName) + require.NoError(t, err) + require.Zero(t, numDeleted) + } + + _ = testfactory.Leader(ctx, t, exec, &testfactory.LeaderOpts{ + ElectedAt: ptrutil.Ptr(now.Add(-2 * time.Hour)), + ExpiresAt: ptrutil.Ptr(now.Add(-1 * time.Hour)), + LeaderID: ptrutil.Ptr(clientID), + Name: ptrutil.Ptr(leaderInstanceName), + }) + + { + numDeleted, err := exec.LeaderDeleteExpired(ctx, leaderInstanceName) + require.NoError(t, err) + require.Equal(t, 1, numDeleted) + } + }) + + t.Run("LeaderAttemptElect", func(t *testing.T) { + t.Parallel() + + t.Run("ElectsLeader", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + elected, err := exec.LeaderAttemptElect(ctx, &riverdriver.LeaderElectParams{ + LeaderID: clientID, + Name: leaderInstanceName, + TTL: leaderTTL, + }) + require.NoError(t, err) + require.True(t, elected) // won election + + leader, err := exec.LeaderGetElectedLeader(ctx, leaderInstanceName) + require.NoError(t, err) + require.WithinDuration(t, time.Now(), leader.ElectedAt, 100*time.Millisecond) + require.WithinDuration(t, time.Now().Add(leaderTTL), leader.ExpiresAt, 100*time.Millisecond) + }) + + t.Run("CannotElectTwiceInARow", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + leader := testfactory.Leader(ctx, t, exec, &testfactory.LeaderOpts{ + LeaderID: ptrutil.Ptr(clientID), + Name: ptrutil.Ptr(leaderInstanceName), + }) + + elected, err := exec.LeaderAttemptElect(ctx, &riverdriver.LeaderElectParams{ + LeaderID: "different-client-id", + Name: leaderInstanceName, + TTL: leaderTTL, + }) + require.NoError(t, err) + require.False(t, elected) // lost election + + // The time should not have changed because we specified that we were not + // already elected, and the elect query is a no-op if there's already a + // updatedLeader: + updatedLeader, err := exec.LeaderGetElectedLeader(ctx, leaderInstanceName) + require.NoError(t, err) + require.Equal(t, leader.ExpiresAt, updatedLeader.ExpiresAt) + }) + }) + + t.Run("LeaderAttemptReelect", func(t *testing.T) { + t.Parallel() + + t.Run("ElectsLeader", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + elected, err := exec.LeaderAttemptReelect(ctx, &riverdriver.LeaderElectParams{ + LeaderID: clientID, + Name: leaderInstanceName, + TTL: leaderTTL, + }) + require.NoError(t, err) + require.True(t, elected) // won election + + leader, err := exec.LeaderGetElectedLeader(ctx, leaderInstanceName) + require.NoError(t, err) + require.WithinDuration(t, time.Now(), leader.ElectedAt, 100*time.Millisecond) + require.WithinDuration(t, time.Now().Add(leaderTTL), leader.ExpiresAt, 100*time.Millisecond) + }) + + t.Run("ReelectsSameLeader", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + leader := testfactory.Leader(ctx, t, exec, &testfactory.LeaderOpts{ + LeaderID: ptrutil.Ptr(clientID), + Name: ptrutil.Ptr(leaderInstanceName), + }) + + // Re-elect the same leader. Use a larger TTL to see if time is updated, + // because we are in a test transaction and the time is frozen at the start of + // the transaction. + elected, err := exec.LeaderAttemptReelect(ctx, &riverdriver.LeaderElectParams{ + LeaderID: clientID, + Name: leaderInstanceName, + TTL: 30 * time.Second, + }) + require.NoError(t, err) + require.True(t, elected) // won re-election + + // expires_at should be incremented because this is the same leader that won + // previously and we specified that we're already elected: + updatedLeader, err := exec.LeaderGetElectedLeader(ctx, leaderInstanceName) + require.NoError(t, err) + require.Greater(t, updatedLeader.ExpiresAt, leader.ExpiresAt) + }) + }) + + t.Run("LeaderInsert", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + leader, err := exec.LeaderInsert(ctx, &riverdriver.LeaderInsertParams{ + LeaderID: clientID, + Name: leaderInstanceName, + TTL: leaderTTL, + }) + require.NoError(t, err) + require.WithinDuration(t, time.Now(), leader.ElectedAt, 100*time.Millisecond) + require.WithinDuration(t, time.Now().Add(leaderTTL), leader.ExpiresAt, 100*time.Millisecond) + require.Equal(t, leaderInstanceName, leader.Name) + require.Equal(t, clientID, leader.LeaderID) + }) + + t.Run("LeaderGetElectedLeader", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + _ = testfactory.Leader(ctx, t, exec, &testfactory.LeaderOpts{ + LeaderID: ptrutil.Ptr(clientID), + Name: ptrutil.Ptr(leaderInstanceName), + }) + + leader, err := exec.LeaderGetElectedLeader(ctx, leaderInstanceName) + require.NoError(t, err) + require.WithinDuration(t, time.Now(), leader.ElectedAt, 100*time.Millisecond) + require.WithinDuration(t, time.Now().Add(leaderTTL), leader.ExpiresAt, 100*time.Millisecond) + require.Equal(t, leaderInstanceName, leader.Name) + require.Equal(t, clientID, leader.LeaderID) + }) + + t.Run("LeaderResign", func(t *testing.T) { + t.Parallel() + + t.Run("Success", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + { + resigned, err := exec.LeaderResign(ctx, &riverdriver.LeaderResignParams{ + LeaderID: clientID, + LeadershipTopic: string(notifier.NotificationTopicLeadership), + Name: leaderInstanceName, + }) + require.NoError(t, err) + require.False(t, resigned) + } + + _ = testfactory.Leader(ctx, t, exec, &testfactory.LeaderOpts{ + LeaderID: ptrutil.Ptr(clientID), + Name: ptrutil.Ptr(leaderInstanceName), + }) + + { + resigned, err := exec.LeaderResign(ctx, &riverdriver.LeaderResignParams{ + LeaderID: clientID, + LeadershipTopic: string(notifier.NotificationTopicLeadership), + Name: leaderInstanceName, + }) + require.NoError(t, err) + require.True(t, resigned) + } + }) + + t.Run("DoesNotResignWithoutLeadership", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + _ = testfactory.Leader(ctx, t, exec, &testfactory.LeaderOpts{ + LeaderID: ptrutil.Ptr("other-client-id"), + Name: ptrutil.Ptr(leaderInstanceName), + }) + + resigned, err := exec.LeaderResign(ctx, &riverdriver.LeaderResignParams{ + LeaderID: clientID, + LeadershipTopic: string(notifier.NotificationTopicLeadership), + Name: leaderInstanceName, + }) + require.NoError(t, err) + require.False(t, resigned) + }) + }) + + t.Run("PGAdvisoryXactLock", func(t *testing.T) { + t.Parallel() + }) +} + +// ExerciseExecutorMigrationOnly exercises a driver that's expected to only be +// able to perform database migrations, and not full River functionality. +func ExerciseExecutorMigrationOnly[TTx any](ctx context.Context, t *testing.T, driver riverdriver.Driver[TTx], beginTx func(ctx context.Context, t *testing.T) TTx) { + t.Helper() + + // Truncates the migration table so we only have to work with test + // migration data. + truncateMigrations := func(ctx context.Context, t *testing.T, exec riverdriver.Executor) { + t.Helper() + + _, err := exec.Exec(ctx, "TRUNCATE TABLE river_migration") + require.NoError(t, err) + } + + // Expect no pool. We'll be using transactions only throughout these tests. + require.False(t, driver.HasPool()) + + t.Run("Exec", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + _, err := exec.Exec(ctx, "SELECT 1 + 2") + require.NoError(t, err) + }) + + t.Run("MigrationDeleteByVersionMany", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + truncateMigrations(ctx, t, exec) + + migration1 := testfactory.Migration(ctx, t, exec, &testfactory.MigrationOpts{}) + migration2 := testfactory.Migration(ctx, t, exec, &testfactory.MigrationOpts{}) + + migrations, err := exec.MigrationDeleteByVersionMany(ctx, []int{ + migration1.Version, + migration2.Version, + }) + require.NoError(t, err) + require.Len(t, migrations, 2) + slices.SortFunc(migrations, func(a, b *riverdriver.Migration) int { return a.Version - b.Version }) + require.Equal(t, migration1.Version, migrations[0].Version) + require.Equal(t, migration2.Version, migrations[1].Version) + }) + + t.Run("MigrationGetAll", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + truncateMigrations(ctx, t, exec) + + migration1 := testfactory.Migration(ctx, t, exec, &testfactory.MigrationOpts{}) + migration2 := testfactory.Migration(ctx, t, exec, &testfactory.MigrationOpts{}) + + migrations, err := exec.MigrationGetAll(ctx) + require.NoError(t, err) + require.Len(t, migrations, 2) + require.Equal(t, migration1.Version, migrations[0].Version) + require.Equal(t, migration2.Version, migrations[1].Version) + }) + + t.Run("MigrationInsertMany", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + truncateMigrations(ctx, t, exec) + + migrations, err := exec.MigrationInsertMany(ctx, []int{1, 2}) + require.NoError(t, err) + require.Len(t, migrations, 2) + require.Equal(t, 1, migrations[0].Version) + require.Equal(t, 2, migrations[1].Version) + }) + + t.Run("TableExists", func(t *testing.T) { + t.Parallel() + + exec, _ := setupExecutor(ctx, t, driver, beginTx) + + exists, err := exec.TableExists(ctx, "river_job") + require.NoError(t, err) + require.True(t, exists) + + exists, err = exec.TableExists(ctx, "does_not_exist") + require.NoError(t, err) + require.False(t, exists) + }) +} + +type testListenerBundle[TTx any] struct { + driver riverdriver.Driver[TTx] + exec riverdriver.Executor +} + +func setupListener[TTx any](ctx context.Context, t *testing.T, getDriverWithPool func(ctx context.Context, t *testing.T) riverdriver.Driver[TTx]) (riverdriver.Listener, *testListenerBundle[TTx]) { + t.Helper() + + driver := getDriverWithPool(ctx, t) + + listener := driver.GetListener() + t.Cleanup(func() { require.NoError(t, listener.Close(ctx)) }) + + require.NoError(t, listener.Connect(ctx)) + + return listener, &testListenerBundle[TTx]{ + driver: driver, + exec: driver.GetExecutor(), + } +} + +func ExerciseListener[TTx any](ctx context.Context, t *testing.T, getDriverWithPool func(ctx context.Context, t *testing.T) riverdriver.Driver[TTx]) { + t.Helper() + + requireNoNotification := func(ctx context.Context, t *testing.T, listener riverdriver.Listener) { + t.Helper() + + // Ugh, this is a little sketchy, but hard to test in another way. + ctx, cancel := context.WithTimeout(ctx, 50*time.Millisecond) + defer cancel() + + notification, err := listener.WaitForNotification(ctx) + require.ErrorIs(t, err, context.DeadlineExceeded, "Expected no notification, but got: %+v", notification) + } + + waitForNotification := func(ctx context.Context, t *testing.T, listener riverdriver.Listener) *riverdriver.Notification { + t.Helper() + + ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + defer cancel() + + notification, err := listener.WaitForNotification(ctx) + require.NoError(t, err) + + return notification + } + + t.Run("RoundTrip", func(t *testing.T) { + t.Parallel() + + listener, bundle := setupListener(ctx, t, getDriverWithPool) + + require.NoError(t, listener.Listen(ctx, "topic1")) + require.NoError(t, listener.Listen(ctx, "topic2")) + + require.NoError(t, listener.Ping(ctx)) // still alive + + { + require.NoError(t, bundle.exec.Notify(ctx, "topic1", "payload1_1")) + require.NoError(t, bundle.exec.Notify(ctx, "topic2", "payload2_1")) + + notification := waitForNotification(ctx, t, listener) + require.Equal(t, &riverdriver.Notification{Topic: "topic1", Payload: "payload1_1"}, notification) + notification = waitForNotification(ctx, t, listener) + require.Equal(t, &riverdriver.Notification{Topic: "topic2", Payload: "payload2_1"}, notification) + } + + require.NoError(t, listener.Unlisten(ctx, "topic2")) + + { + require.NoError(t, bundle.exec.Notify(ctx, "topic1", "payload1_2")) + require.NoError(t, bundle.exec.Notify(ctx, "topic2", "payload2_2")) + + notification := waitForNotification(ctx, t, listener) + require.Equal(t, &riverdriver.Notification{Topic: "topic1", Payload: "payload1_2"}, notification) + + requireNoNotification(ctx, t, listener) + } + + require.NoError(t, listener.Unlisten(ctx, "topic1")) + + require.NoError(t, listener.Close(ctx)) + }) + + t.Run("TransactionGated", func(t *testing.T) { + t.Parallel() + + listener, bundle := setupListener(ctx, t, getDriverWithPool) + + require.NoError(t, listener.Listen(ctx, "topic1")) + + execTx, err := bundle.exec.Begin(ctx) + require.NoError(t, err) + + require.NoError(t, execTx.Notify(ctx, "topic1", "payload1")) + + // No notification because the transaction hasn't committed yet. + requireNoNotification(ctx, t, listener) + + require.NoError(t, execTx.Commit(ctx)) + + // Notification received now that transaction has committed. + notification := waitForNotification(ctx, t, listener) + require.Equal(t, &riverdriver.Notification{Topic: "topic1", Payload: "payload1"}, notification) + }) +} diff --git a/internal/riverinternaltest/sharedtx/shared_tx.go b/internal/riverinternaltest/sharedtx/shared_tx.go index 5f0f7659..7c40a83b 100644 --- a/internal/riverinternaltest/sharedtx/shared_tx.go +++ b/internal/riverinternaltest/sharedtx/shared_tx.go @@ -7,8 +7,6 @@ import ( "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgconn" - - "github.com/riverqueue/river/internal/util/dbutil" ) // SharedTx can be used to wrap a test transaction in cases where multiple @@ -28,7 +26,7 @@ import ( // encountered by use of concurrent accesses will be more difficult to debug // than otherwise, so it's better to not go there at all if it can be avoided. type SharedTx struct { - inner dbutil.Executor + inner pgx.Tx wait chan struct{} } @@ -96,6 +94,19 @@ func (e *SharedTx) QueryRow(ctx context.Context, query string, args ...any) pgx. return &SharedTxRow{sharedTxDerivative{sharedTx: e}, row} } +// These are all implemented so that a SharedTx can be used as a pgx.Tx, but are +// all non-functional. +func (e *SharedTx) Conn() *pgx.Conn { panic("not implemented") } +func (e *SharedTx) Commit(ctx context.Context) error { panic("not implemented") } +func (e *SharedTx) LargeObjects() pgx.LargeObjects { panic("not implemented") } +func (e *SharedTx) Prepare(ctx context.Context, name, sql string) (*pgconn.StatementDescription, error) { + panic("not implemented") +} +func (e *SharedTx) Rollback(ctx context.Context) error { panic("not implemented") } +func (e *SharedTx) SendBatch(ctx context.Context, b *pgx.Batch) pgx.BatchResults { + panic("not implemented") +} + func (e *SharedTx) lock() { select { case <-e.wait: diff --git a/internal/riverinternaltest/testfactory/test_factory.go b/internal/riverinternaltest/testfactory/test_factory.go new file mode 100644 index 00000000..3309aaf4 --- /dev/null +++ b/internal/riverinternaltest/testfactory/test_factory.go @@ -0,0 +1,96 @@ +// Package testfactory provides low level helpers for inserting records directly +// into the database. +package testfactory + +import ( + "context" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/riverqueue/river/internal/rivercommon" + "github.com/riverqueue/river/internal/util/ptrutil" + "github.com/riverqueue/river/riverdriver" + "github.com/riverqueue/river/rivertype" +) + +type JobOpts struct { + EncodedArgs []byte + FinalizedAt *time.Time + Metadata []byte + Priority *int + Queue *string + ScheduledAt *time.Time + State *rivertype.JobState +} + +func Job(ctx context.Context, t *testing.T, exec riverdriver.Executor, opts *JobOpts) *rivertype.JobRow { + t.Helper() + + encodedArgs := []byte("{}") + if opts.EncodedArgs != nil { + encodedArgs = opts.EncodedArgs + } + + metadata := []byte("{}") + if len(opts.Metadata) > 0 { + metadata = opts.Metadata + } + + job, err := exec.JobInsert(ctx, &riverdriver.JobInsertParams{ + EncodedArgs: encodedArgs, + FinalizedAt: opts.FinalizedAt, + Kind: "fake_job", + MaxAttempts: rivercommon.MaxAttemptsDefault, + Metadata: metadata, + Priority: ptrutil.ValOrDefault(opts.Priority, rivercommon.PriorityDefault), + Queue: ptrutil.ValOrDefault(opts.Queue, rivercommon.QueueDefault), + ScheduledAt: opts.ScheduledAt, + State: ptrutil.ValOrDefault(opts.State, rivertype.JobStateAvailable), + }) + require.NoError(t, err) + return job +} + +type LeaderOpts struct { + ElectedAt *time.Time + ExpiresAt *time.Time + LeaderID *string + Name *string +} + +func Leader(ctx context.Context, t *testing.T, exec riverdriver.Executor, opts *LeaderOpts) *riverdriver.Leader { + t.Helper() + + leader, err := exec.LeaderInsert(ctx, &riverdriver.LeaderInsertParams{ + ElectedAt: opts.ElectedAt, + ExpiresAt: opts.ExpiresAt, + LeaderID: ptrutil.ValOrDefault(opts.LeaderID, "test-client-id"), + Name: ptrutil.ValOrDefault(opts.Name, "default"), + TTL: 10 * time.Second, + }) + require.NoError(t, err) + return leader +} + +type MigrationOpts struct { + Version *int +} + +func Migration(ctx context.Context, t *testing.T, exec riverdriver.Executor, opts *MigrationOpts) *riverdriver.Migration { + t.Helper() + + migration, err := exec.MigrationInsertMany(ctx, []int{ + ptrutil.ValOrDefaultFunc(opts.Version, nextSeq), + }) + require.NoError(t, err) + return migration[0] +} + +var seq int64 = 1 //nolint:gochecknoglobals + +func nextSeq() int { + return int(atomic.AddInt64(&seq, 1)) +} diff --git a/internal/util/dbutil/db_util.go b/internal/util/dbutil/db_util.go index bc50d4b4..7e42ce25 100644 --- a/internal/util/dbutil/db_util.go +++ b/internal/util/dbutil/db_util.go @@ -4,86 +4,35 @@ import ( "context" "fmt" - "github.com/jackc/pgx/v5" - - "github.com/riverqueue/river/internal/dbsqlc" "github.com/riverqueue/river/riverdriver" ) -// Executor is an interface for a type that can begin a transaction and also -// perform all the operations needed to be used in conjunction with sqlc. -// Implemented by all of pgx's `pgxpool.Pool`, `pgx.Conn`, and `pgx.Tx` -// (transactions can start subtransactions). -type Executor interface { - TxBeginner - dbsqlc.DBTX -} - -// TxBeginner is an interface to a type that can begin a transaction, like a pgx -// connection pool, connection, or transaction (the latter would begin a -// subtransaction). -type TxBeginner interface { - Begin(ctx context.Context) (pgx.Tx, error) -} - -// WithTx starts and commits a transaction around the given function, allowing -// the return of a generic value. -func WithTx(ctx context.Context, txBeginner TxBeginner, innerFunc func(ctx context.Context, tx pgx.Tx) error) error { - _, err := WithTxV(ctx, txBeginner, func(ctx context.Context, tx pgx.Tx) (struct{}, error) { - return struct{}{}, innerFunc(ctx, tx) - }) - return err -} - -// WithTxV starts and commits a transaction around the given function, allowing -// the return of a generic value. -func WithTxV[T any](ctx context.Context, txBeginner TxBeginner, innerFunc func(ctx context.Context, tx pgx.Tx) (T, error)) (T, error) { - var defaultRes T - - tx, err := txBeginner.Begin(ctx) - if err != nil { - return defaultRes, fmt.Errorf("error beginning transaction: %w", err) - } - defer tx.Rollback(ctx) - - res, err := innerFunc(ctx, tx) - if err != nil { - return defaultRes, err - } - - if err := tx.Commit(ctx); err != nil { - return defaultRes, fmt.Errorf("error committing transaction: %w", err) - } - - return res, nil -} - -// WithExecutorTx starts and commits a transaction on a driver executor around +// WithTx starts and commits a transaction on a driver executor around // the given function, allowing the return of a generic value. -func WithExecutorTx(ctx context.Context, exec riverdriver.Executor, innerFunc func(ctx context.Context, tx riverdriver.ExecutorTx) error) error { - _, err := WithExecutorTxV(ctx, exec, func(ctx context.Context, tx riverdriver.ExecutorTx) (struct{}, error) { +func WithTx(ctx context.Context, exec riverdriver.Executor, innerFunc func(ctx context.Context, exec riverdriver.ExecutorTx) error) error { + _, err := WithTxV(ctx, exec, func(ctx context.Context, tx riverdriver.ExecutorTx) (struct{}, error) { return struct{}{}, innerFunc(ctx, tx) }) return err } -// WithExecutorTxV starts and commits a transaction on a driver executor around +// WithTxV starts and commits a transaction on a driver executor around // the given function, allowing the return of a generic value. -func WithExecutorTxV[T any](ctx context.Context, exec riverdriver.Executor, innerFunc func(ctx context.Context, tx riverdriver.ExecutorTx) (T, error)) (T, error) { +func WithTxV[T any](ctx context.Context, exec riverdriver.Executor, innerFunc func(ctx context.Context, exec riverdriver.ExecutorTx) (T, error)) (T, error) { var defaultRes T - tx, err := exec.Begin(ctx) + execTx, err := exec.Begin(ctx) if err != nil { return defaultRes, fmt.Errorf("error beginning transaction: %w", err) } - defer tx.Rollback(ctx) + defer execTx.Rollback(ctx) - res, err := innerFunc(ctx, tx) + res, err := innerFunc(ctx, execTx) if err != nil { return defaultRes, err } - if err := tx.Commit(ctx); err != nil { + if err := execTx.Commit(ctx); err != nil { return defaultRes, fmt.Errorf("error committing transaction: %w", err) } diff --git a/internal/util/dbutil/db_util_test.go b/internal/util/dbutil/db_util_test.go index c0f45cf7..50f51033 100644 --- a/internal/util/dbutil/db_util_test.go +++ b/internal/util/dbutil/db_util_test.go @@ -4,7 +4,6 @@ import ( "context" "testing" - "github.com/jackc/pgx/v5" "github.com/stretchr/testify/require" "github.com/riverqueue/river/internal/riverinternaltest" @@ -15,43 +14,12 @@ import ( func TestWithTx(t *testing.T) { t.Parallel() - ctx := context.Background() - dbPool := riverinternaltest.TestDB(ctx, t) - - err := WithTx(ctx, dbPool, func(ctx context.Context, tx pgx.Tx) error { - _, err := tx.Exec(ctx, "SELECT 1") - require.NoError(t, err) - - return nil - }) - require.NoError(t, err) -} - -func TestWithTxV(t *testing.T) { - t.Parallel() - - ctx := context.Background() - dbPool := riverinternaltest.TestDB(ctx, t) - - ret, err := WithTxV(ctx, dbPool, func(ctx context.Context, tx pgx.Tx) (int, error) { - _, err := tx.Exec(ctx, "SELECT 1") - require.NoError(t, err) - - return 7, nil - }) - require.NoError(t, err) - require.Equal(t, 7, ret) -} - -func TestWithExecutorTx(t *testing.T) { - t.Parallel() - ctx := context.Background() dbPool := riverinternaltest.TestDB(ctx, t) driver := riverpgxv5.New(dbPool) - err := WithExecutorTx(ctx, driver.GetExecutor(), func(ctx context.Context, tx riverdriver.ExecutorTx) error { - _, err := tx.Exec(ctx, "SELECT 1") + err := WithTx(ctx, driver.GetExecutor(), func(ctx context.Context, exec riverdriver.ExecutorTx) error { + _, err := exec.Exec(ctx, "SELECT 1") require.NoError(t, err) return nil @@ -59,15 +27,15 @@ func TestWithExecutorTx(t *testing.T) { require.NoError(t, err) } -func TestWithExecutorTxV(t *testing.T) { +func TestWithTxV(t *testing.T) { t.Parallel() ctx := context.Background() dbPool := riverinternaltest.TestDB(ctx, t) driver := riverpgxv5.New(dbPool) - ret, err := WithExecutorTxV(ctx, driver.GetExecutor(), func(ctx context.Context, tx riverdriver.ExecutorTx) (int, error) { - _, err := tx.Exec(ctx, "SELECT 1") + ret, err := WithTxV(ctx, driver.GetExecutor(), func(ctx context.Context, exec riverdriver.ExecutorTx) (int, error) { + _, err := exec.Exec(ctx, "SELECT 1") require.NoError(t, err) return 7, nil diff --git a/job.go b/job.go index 9f9952b6..d0bd5424 100644 --- a/job.go +++ b/job.go @@ -6,8 +6,6 @@ import ( "errors" "time" - "github.com/riverqueue/river/internal/dbsqlc" - "github.com/riverqueue/river/internal/util/ptrutil" "github.com/riverqueue/river/riverdriver" "github.com/riverqueue/river/rivertype" ) @@ -55,22 +53,13 @@ func JobCompleteTx[TDriver riverdriver.Driver[TTx], TTx any, TArgs JobArgs](ctx return nil, errors.New("job must be running") } - var ( - driver TDriver - queries = &dbsqlc.Queries{} - ) - - internal, err := queries.JobSetState(ctx, driver.UnwrapTx(tx), dbsqlc.JobSetStateParams{ - ID: job.ID, - FinalizedAtDoUpdate: true, - FinalizedAt: ptrutil.Ptr(time.Now()), - State: dbsqlc.JobStateCompleted, - }) + var driver TDriver + jobRow, err := driver.UnwrapExecutor(tx).JobSetStateIfRunning(ctx, riverdriver.JobSetStateCompleted(job.ID, time.Now())) if err != nil { return nil, err } - updatedJob := &Job[TArgs]{JobRow: dbsqlc.JobRowFromInternal(internal)} + updatedJob := &Job[TArgs]{JobRow: jobRow} if err := json.Unmarshal(updatedJob.EncodedArgs, &updatedJob.Args); err != nil { return nil, err @@ -88,13 +77,3 @@ const ( JobStateRunning = rivertype.JobStateRunning JobStateScheduled = rivertype.JobStateScheduled ) - -var jobStateAll = []rivertype.JobState{ //nolint:gochecknoglobals - JobStateAvailable, - JobStateCancelled, - JobStateCompleted, - JobStateDiscarded, - JobStateRetryable, - JobStateRunning, - JobStateScheduled, -} diff --git a/job_executor.go b/job_executor.go index a7e3ffae..cbd3524d 100644 --- a/job_executor.go +++ b/job_executor.go @@ -10,10 +10,10 @@ import ( "time" "github.com/riverqueue/river/internal/baseservice" - "github.com/riverqueue/river/internal/dbadapter" "github.com/riverqueue/river/internal/jobcompleter" "github.com/riverqueue/river/internal/jobstats" "github.com/riverqueue/river/internal/workunit" + "github.com/riverqueue/river/riverdriver" "github.com/riverqueue/river/rivertype" ) @@ -117,7 +117,6 @@ func (r *jobExecutorResult) ErrorStr() string { type jobExecutor struct { baseservice.BaseService - Adapter dbadapter.Adapter CancelFunc context.CancelCauseFunc ClientJobTimeout time.Duration Completer jobcompleter.JobCompleter @@ -250,11 +249,11 @@ func (e *jobExecutor) reportResult(ctx context.Context, res *jobExecutorResult) // Just as with retryable jobs, this isn't friendly for short snooze times // so we instead make the job immediately `available` if the snooze time is // smaller than the scheduler's run interval. - var params *dbadapter.JobSetStateIfRunningParams + var params *riverdriver.JobSetStateIfRunningParams if nextAttemptScheduledAt.Sub(e.TimeNowUTC()) <= e.SchedulerInterval { - params = dbadapter.JobSetStateSnoozedAvailable(e.JobRow.ID, nextAttemptScheduledAt, e.JobRow.MaxAttempts+1) + params = riverdriver.JobSetStateSnoozedAvailable(e.JobRow.ID, nextAttemptScheduledAt, e.JobRow.MaxAttempts+1) } else { - params = dbadapter.JobSetStateSnoozed(e.JobRow.ID, nextAttemptScheduledAt, e.JobRow.MaxAttempts+1) + params = riverdriver.JobSetStateSnoozed(e.JobRow.ID, nextAttemptScheduledAt, e.JobRow.MaxAttempts+1) } if err := e.Completer.JobSetStateIfRunning(e.stats, params); err != nil { e.Logger.ErrorContext(ctx, e.Name+": Error snoozing job", @@ -269,7 +268,7 @@ func (e *jobExecutor) reportResult(ctx context.Context, res *jobExecutorResult) return } - if err := e.Completer.JobSetStateIfRunning(e.stats, dbadapter.JobSetStateCompleted(e.JobRow.ID, e.TimeNowUTC())); err != nil { + if err := e.Completer.JobSetStateIfRunning(e.stats, riverdriver.JobSetStateCompleted(e.JobRow.ID, e.TimeNowUTC())); err != nil { e.Logger.ErrorContext(ctx, e.Name+": Error completing job", slog.String("err", err.Error()), slog.Int64("job_id", e.JobRow.ID), @@ -320,14 +319,14 @@ func (e *jobExecutor) reportError(ctx context.Context, res *jobExecutorResult) { now := time.Now() if cancelJob { - if err := e.Completer.JobSetStateIfRunning(e.stats, dbadapter.JobSetStateCancelled(e.JobRow.ID, now, errData)); err != nil { + if err := e.Completer.JobSetStateIfRunning(e.stats, riverdriver.JobSetStateCancelled(e.JobRow.ID, now, errData)); err != nil { e.Logger.ErrorContext(ctx, e.Name+": Failed to cancel job and report error", logAttrs...) } return } if e.JobRow.Attempt >= e.JobRow.MaxAttempts { - if err := e.Completer.JobSetStateIfRunning(e.stats, dbadapter.JobSetStateDiscarded(e.JobRow.ID, now, errData)); err != nil { + if err := e.Completer.JobSetStateIfRunning(e.stats, riverdriver.JobSetStateDiscarded(e.JobRow.ID, now, errData)); err != nil { e.Logger.ErrorContext(ctx, e.Name+": Failed to discard job and report error", logAttrs...) } return @@ -355,11 +354,11 @@ func (e *jobExecutor) reportError(ctx context.Context, res *jobExecutorResult) { // effectively no retry time smaller than the scheduler's run interval is // respected. Here, we offset that with a branch that makes jobs immediately // `available` if their retry was smaller than the scheduler's run interval. - var params *dbadapter.JobSetStateIfRunningParams + var params *riverdriver.JobSetStateIfRunningParams if nextRetryScheduledAt.Sub(e.TimeNowUTC()) <= e.SchedulerInterval { - params = dbadapter.JobSetStateErrorAvailable(e.JobRow.ID, nextRetryScheduledAt, errData) + params = riverdriver.JobSetStateErrorAvailable(e.JobRow.ID, nextRetryScheduledAt, errData) } else { - params = dbadapter.JobSetStateErrorRetryable(e.JobRow.ID, nextRetryScheduledAt, errData) + params = riverdriver.JobSetStateErrorRetryable(e.JobRow.ID, nextRetryScheduledAt, errData) } if err := e.Completer.JobSetStateIfRunning(e.stats, params); err != nil { e.Logger.ErrorContext(ctx, e.Name+": Failed to report error for job", logAttrs...) diff --git a/job_executor_test.go b/job_executor_test.go index 6147969f..740a1919 100644 --- a/job_executor_test.go +++ b/job_executor_test.go @@ -7,18 +7,16 @@ import ( "testing" "time" - "github.com/jackc/pgx/v5" "github.com/stretchr/testify/require" "github.com/riverqueue/river/internal/baseservice" - "github.com/riverqueue/river/internal/dbadapter" - "github.com/riverqueue/river/internal/dbsqlc" "github.com/riverqueue/river/internal/jobcompleter" "github.com/riverqueue/river/internal/rivercommon" "github.com/riverqueue/river/internal/riverinternaltest" - "github.com/riverqueue/river/internal/util/ptrutil" "github.com/riverqueue/river/internal/util/timeutil" "github.com/riverqueue/river/internal/workunit" + "github.com/riverqueue/river/riverdriver" + "github.com/riverqueue/river/riverdriver/riverpgxv5" "github.com/riverqueue/river/rivertype" ) @@ -115,18 +113,14 @@ func (h *testErrorHandler) HandlePanic(ctx context.Context, job *rivertype.JobRo func TestJobExecutor_Execute(t *testing.T) { t.Parallel() - var ( - ctx = context.Background() - queries = dbsqlc.New() - ) + ctx := context.Background() type testBundle struct { - adapter *dbadapter.StandardAdapter completer *jobcompleter.InlineJobCompleter + exec riverdriver.Executor errorHandler *testErrorHandler getUpdatesAndStop func() []jobcompleter.CompleterJobUpdated jobRow *rivertype.JobRow - tx pgx.Tx } setup := func(t *testing.T) (*jobExecutor, *testBundle) { @@ -135,8 +129,8 @@ func TestJobExecutor_Execute(t *testing.T) { var ( tx = riverinternaltest.TestTx(ctx, t) archetype = riverinternaltest.BaseServiceArchetype(t) - adapter = dbadapter.NewStandardAdapter(archetype, &dbadapter.StandardAdapterConfig{Executor: tx}) - completer = jobcompleter.NewInlineCompleter(archetype, adapter) + exec = riverpgxv5.New(nil).UnwrapExecutor(tx) + completer = jobcompleter.NewInlineCompleter(archetype, exec) ) var updates []jobcompleter.CompleterJobUpdated @@ -152,22 +146,20 @@ func TestJobExecutor_Execute(t *testing.T) { workUnitFactory := newWorkUnitFactoryWithCustomRetry(func() error { return nil }, nil) - job, err := queries.JobInsert(ctx, tx, dbsqlc.JobInsertParams{ - Args: []byte("{}"), - Attempt: 0, - AttemptedAt: ptrutil.Ptr(archetype.TimeNowUTC()), + job, err := exec.JobInsert(ctx, &riverdriver.JobInsertParams{ + EncodedArgs: []byte("{}"), Kind: (callbackArgs{}).Kind(), - MaxAttempts: int16(rivercommon.MaxAttemptsDefault), - Priority: int16(rivercommon.PriorityDefault), + MaxAttempts: rivercommon.MaxAttemptsDefault, + Priority: rivercommon.PriorityDefault, Queue: rivercommon.QueueDefault, - State: dbsqlc.JobStateAvailable, + State: rivertype.JobStateAvailable, }) require.NoError(t, err) // Fetch the job to make sure it's marked as running: - jobs, err := queries.JobGetAvailable(ctx, tx, dbsqlc.JobGetAvailableParams{ - LimitCount: 1, - Queue: rivercommon.QueueDefault, + jobs, err := exec.JobGetAvailable(ctx, &riverdriver.JobGetAvailableParams{ + Max: 1, + Queue: rivercommon.QueueDefault, }) require.NoError(t, err) require.Len(t, jobs, 1) @@ -175,16 +167,14 @@ func TestJobExecutor_Execute(t *testing.T) { job = jobs[0] bundle := &testBundle{ - adapter: adapter, completer: completer, + exec: exec, errorHandler: newTestErrorHandler(), getUpdatesAndStop: getJobUpdates, - jobRow: dbsqlc.JobRowFromInternal(job), - tx: tx, + jobRow: job, } executor := baseservice.Init(archetype, &jobExecutor{ - Adapter: bundle.adapter, ClientRetryPolicy: &retryPolicyNoJitter{}, Completer: bundle.completer, ErrorHandler: bundle.errorHandler, @@ -213,9 +203,9 @@ func TestJobExecutor_Execute(t *testing.T) { executor.Execute(ctx) executor.Completer.Wait() - job, err := queries.JobGetByID(ctx, bundle.tx, bundle.jobRow.ID) + job, err := bundle.exec.JobGetByID(ctx, bundle.jobRow.ID) require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateCompleted, job.State) + require.Equal(t, rivertype.JobStateCompleted, job.State) jobUpdates := bundle.getUpdatesAndStop() require.Len(t, jobUpdates, 1) @@ -239,13 +229,13 @@ func TestJobExecutor_Execute(t *testing.T) { executor.Execute(ctx) executor.Completer.Wait() - job, err := queries.JobGetByID(ctx, bundle.tx, bundle.jobRow.ID) + job, err := bundle.exec.JobGetByID(ctx, bundle.jobRow.ID) require.NoError(t, err) require.WithinDuration(t, executor.ClientRetryPolicy.NextRetry(bundle.jobRow), job.ScheduledAt, 1*time.Second) - require.Equal(t, dbsqlc.JobStateRetryable, job.State) + require.Equal(t, rivertype.JobStateRetryable, job.State) require.Len(t, job.Errors, 1) require.Equal(t, baselineTime, job.Errors[0].At) - require.Equal(t, uint16(1), job.Errors[0].Attempt) + require.Equal(t, 1, job.Errors[0].Attempt) require.Equal(t, "job error", job.Errors[0].Error) require.Equal(t, "", job.Errors[0].Trace) }) @@ -263,10 +253,10 @@ func TestJobExecutor_Execute(t *testing.T) { executor.Execute(ctx) executor.Completer.Wait() - job, err := queries.JobGetByID(ctx, bundle.tx, bundle.jobRow.ID) + job, err := bundle.exec.JobGetByID(ctx, bundle.jobRow.ID) require.NoError(t, err) require.WithinDuration(t, executor.ClientRetryPolicy.NextRetry(bundle.jobRow), job.ScheduledAt, 1*time.Second) - require.Equal(t, dbsqlc.JobStateRetryable, job.State) + require.Equal(t, rivertype.JobStateRetryable, job.State) }) t.Run("ErrorSetsJobAvailableBelowSchedulerIntervalThreshold", func(t *testing.T) { @@ -283,15 +273,16 @@ func TestJobExecutor_Execute(t *testing.T) { executor.Execute(ctx) executor.Completer.Wait() - job, err := queries.JobGetByID(ctx, bundle.tx, bundle.jobRow.ID) + job, err := bundle.exec.JobGetByID(ctx, bundle.jobRow.ID) require.NoError(t, err) require.WithinDuration(t, executor.ClientRetryPolicy.NextRetry(bundle.jobRow), job.ScheduledAt, 1*time.Second) - require.Equal(t, dbsqlc.JobStateAvailable, job.State) + require.Equal(t, rivertype.JobStateAvailable, job.State) } - _, err := queries.JobSetState(ctx, bundle.tx, dbsqlc.JobSetStateParams{ - ID: bundle.jobRow.ID, - State: dbsqlc.JobStateRunning, + _, err := bundle.exec.JobUpdate(ctx, &riverdriver.JobUpdateParams{ + ID: bundle.jobRow.ID, + StateDoUpdate: true, + State: rivertype.JobStateRunning, }) require.NoError(t, err) @@ -301,10 +292,10 @@ func TestJobExecutor_Execute(t *testing.T) { executor.Execute(ctx) executor.Completer.Wait() - job, err := queries.JobGetByID(ctx, bundle.tx, bundle.jobRow.ID) + job, err := bundle.exec.JobGetByID(ctx, bundle.jobRow.ID) require.NoError(t, err) require.WithinDuration(t, executor.ClientRetryPolicy.NextRetry(bundle.jobRow), job.ScheduledAt, 16*time.Second) - require.Equal(t, dbsqlc.JobStateRetryable, job.State) + require.Equal(t, rivertype.JobStateRetryable, job.State) } }) @@ -321,10 +312,10 @@ func TestJobExecutor_Execute(t *testing.T) { executor.Execute(ctx) executor.Completer.Wait() - job, err := queries.JobGetByID(ctx, bundle.tx, bundle.jobRow.ID) + job, err := bundle.exec.JobGetByID(ctx, bundle.jobRow.ID) require.NoError(t, err) require.WithinDuration(t, time.Now(), *job.FinalizedAt, 1*time.Second) - require.Equal(t, dbsqlc.JobStateDiscarded, job.State) + require.Equal(t, rivertype.JobStateDiscarded, job.State) }) t.Run("JobCancelErrorCancelsJobEvenWithRemainingAttempts", func(t *testing.T) { @@ -341,13 +332,13 @@ func TestJobExecutor_Execute(t *testing.T) { executor.Execute(ctx) executor.Completer.Wait() - job, err := queries.JobGetByID(ctx, bundle.tx, bundle.jobRow.ID) + job, err := bundle.exec.JobGetByID(ctx, bundle.jobRow.ID) require.NoError(t, err) require.WithinDuration(t, time.Now(), *job.FinalizedAt, 2*time.Second) - require.Equal(t, dbsqlc.JobStateCancelled, job.State) + require.Equal(t, rivertype.JobStateCancelled, job.State) require.Len(t, job.Errors, 1) require.WithinDuration(t, time.Now(), job.Errors[0].At, 2*time.Second) - require.Equal(t, uint16(1), job.Errors[0].Attempt) + require.Equal(t, 1, job.Errors[0].Attempt) require.Equal(t, "jobCancelError: throw away this job", job.Errors[0].Error) require.Equal(t, "", job.Errors[0].Trace) }) @@ -364,11 +355,11 @@ func TestJobExecutor_Execute(t *testing.T) { executor.Execute(ctx) executor.Completer.Wait() - job, err := queries.JobGetByID(ctx, bundle.tx, bundle.jobRow.ID) + job, err := bundle.exec.JobGetByID(ctx, bundle.jobRow.ID) require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateScheduled, job.State) + require.Equal(t, rivertype.JobStateScheduled, job.State) require.WithinDuration(t, time.Now().Add(30*time.Minute), job.ScheduledAt, 2*time.Second) - require.Equal(t, maxAttemptsBefore+1, int(job.MaxAttempts)) + require.Equal(t, maxAttemptsBefore+1, job.MaxAttempts) require.Empty(t, job.Errors) }) @@ -384,11 +375,11 @@ func TestJobExecutor_Execute(t *testing.T) { executor.Execute(ctx) executor.Completer.Wait() - job, err := queries.JobGetByID(ctx, bundle.tx, bundle.jobRow.ID) + job, err := bundle.exec.JobGetByID(ctx, bundle.jobRow.ID) require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateAvailable, job.State) + require.Equal(t, rivertype.JobStateAvailable, job.State) require.WithinDuration(t, time.Now(), job.ScheduledAt, 2*time.Second) - require.Equal(t, maxAttemptsBefore+1, int(job.MaxAttempts)) + require.Equal(t, maxAttemptsBefore+1, job.MaxAttempts) require.Empty(t, job.Errors) }) @@ -404,10 +395,10 @@ func TestJobExecutor_Execute(t *testing.T) { executor.Execute(ctx) executor.Completer.Wait() - job, err := queries.JobGetByID(ctx, bundle.tx, bundle.jobRow.ID) + job, err := bundle.exec.JobGetByID(ctx, bundle.jobRow.ID) require.NoError(t, err) require.WithinDuration(t, executor.ClientRetryPolicy.NextRetry(bundle.jobRow), job.ScheduledAt, 1*time.Second) - require.Equal(t, dbsqlc.JobStateRetryable, job.State) + require.Equal(t, rivertype.JobStateRetryable, job.State) }) t.Run("ErrorWithCustomNextRetryReturnedFromWorker", func(t *testing.T) { @@ -424,9 +415,9 @@ func TestJobExecutor_Execute(t *testing.T) { executor.Execute(ctx) executor.Completer.Wait() - job, err := queries.JobGetByID(ctx, bundle.tx, bundle.jobRow.ID) + job, err := bundle.exec.JobGetByID(ctx, bundle.jobRow.ID) require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateRetryable, job.State) + require.Equal(t, rivertype.JobStateRetryable, job.State) require.WithinDuration(t, nextRetryAt, job.ScheduledAt, time.Microsecond) }) @@ -442,10 +433,10 @@ func TestJobExecutor_Execute(t *testing.T) { executor.Execute(ctx) executor.Completer.Wait() - job, err := queries.JobGetByID(ctx, bundle.tx, bundle.jobRow.ID) + job, err := bundle.exec.JobGetByID(ctx, bundle.jobRow.ID) require.NoError(t, err) require.WithinDuration(t, (&DefaultClientRetryPolicy{}).NextRetry(bundle.jobRow), job.ScheduledAt, 1*time.Second) - require.Equal(t, dbsqlc.JobStateRetryable, job.State) + require.Equal(t, rivertype.JobStateRetryable, job.State) }) t.Run("ErrorWithErrorHandler", func(t *testing.T) { @@ -463,9 +454,9 @@ func TestJobExecutor_Execute(t *testing.T) { executor.Execute(ctx) executor.Completer.Wait() - job, err := queries.JobGetByID(ctx, bundle.tx, bundle.jobRow.ID) + job, err := bundle.exec.JobGetByID(ctx, bundle.jobRow.ID) require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateRetryable, job.State) + require.Equal(t, rivertype.JobStateRetryable, job.State) require.True(t, bundle.errorHandler.HandleErrorCalled) }) @@ -484,9 +475,9 @@ func TestJobExecutor_Execute(t *testing.T) { executor.Execute(ctx) executor.Completer.Wait() - job, err := queries.JobGetByID(ctx, bundle.tx, bundle.jobRow.ID) + job, err := bundle.exec.JobGetByID(ctx, bundle.jobRow.ID) require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateCancelled, job.State) + require.Equal(t, rivertype.JobStateCancelled, job.State) require.True(t, bundle.errorHandler.HandleErrorCalled) }) @@ -505,9 +496,9 @@ func TestJobExecutor_Execute(t *testing.T) { executor.Execute(ctx) executor.Completer.Wait() - job, err := queries.JobGetByID(ctx, bundle.tx, bundle.jobRow.ID) + job, err := bundle.exec.JobGetByID(ctx, bundle.jobRow.ID) require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateRetryable, job.State) + require.Equal(t, rivertype.JobStateRetryable, job.State) require.True(t, bundle.errorHandler.HandleErrorCalled) }) @@ -521,10 +512,10 @@ func TestJobExecutor_Execute(t *testing.T) { executor.Execute(ctx) executor.Completer.Wait() - job, err := queries.JobGetByID(ctx, bundle.tx, bundle.jobRow.ID) + job, err := bundle.exec.JobGetByID(ctx, bundle.jobRow.ID) require.NoError(t, err) require.WithinDuration(t, executor.ClientRetryPolicy.NextRetry(bundle.jobRow), job.ScheduledAt, 1*time.Second) - require.Equal(t, dbsqlc.JobStateRetryable, job.State) + require.Equal(t, rivertype.JobStateRetryable, job.State) require.Len(t, job.Errors, 1) // Sufficient enough to ensure that the stack trace is included: require.Contains(t, job.Errors[0].Trace, "river/job_executor.go") @@ -542,10 +533,10 @@ func TestJobExecutor_Execute(t *testing.T) { executor.Execute(ctx) executor.Completer.Wait() - job, err := queries.JobGetByID(ctx, bundle.tx, bundle.jobRow.ID) + job, err := bundle.exec.JobGetByID(ctx, bundle.jobRow.ID) require.NoError(t, err) require.WithinDuration(t, executor.ClientRetryPolicy.NextRetry(bundle.jobRow), job.ScheduledAt, 1*time.Second) - require.Equal(t, dbsqlc.JobStateRetryable, job.State) + require.Equal(t, rivertype.JobStateRetryable, job.State) }) t.Run("PanicDiscardsJobAfterTooManyAttempts", func(t *testing.T) { @@ -560,10 +551,10 @@ func TestJobExecutor_Execute(t *testing.T) { executor.Execute(ctx) executor.Completer.Wait() - job, err := queries.JobGetByID(ctx, bundle.tx, bundle.jobRow.ID) + job, err := bundle.exec.JobGetByID(ctx, bundle.jobRow.ID) require.NoError(t, err) require.WithinDuration(t, time.Now(), *job.FinalizedAt, 1*time.Second) - require.Equal(t, dbsqlc.JobStateDiscarded, job.State) + require.Equal(t, rivertype.JobStateDiscarded, job.State) }) t.Run("PanicWithPanicHandler", func(t *testing.T) { @@ -580,9 +571,9 @@ func TestJobExecutor_Execute(t *testing.T) { executor.Execute(ctx) executor.Completer.Wait() - job, err := queries.JobGetByID(ctx, bundle.tx, bundle.jobRow.ID) + job, err := bundle.exec.JobGetByID(ctx, bundle.jobRow.ID) require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateRetryable, job.State) + require.Equal(t, rivertype.JobStateRetryable, job.State) require.True(t, bundle.errorHandler.HandlePanicCalled) }) @@ -600,9 +591,9 @@ func TestJobExecutor_Execute(t *testing.T) { executor.Execute(ctx) executor.Completer.Wait() - job, err := queries.JobGetByID(ctx, bundle.tx, bundle.jobRow.ID) + job, err := bundle.exec.JobGetByID(ctx, bundle.jobRow.ID) require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateCancelled, job.State) + require.Equal(t, rivertype.JobStateCancelled, job.State) require.True(t, bundle.errorHandler.HandlePanicCalled) }) @@ -620,14 +611,14 @@ func TestJobExecutor_Execute(t *testing.T) { executor.Execute(ctx) executor.Completer.Wait() - job, err := queries.JobGetByID(ctx, bundle.tx, bundle.jobRow.ID) + job, err := bundle.exec.JobGetByID(ctx, bundle.jobRow.ID) require.NoError(t, err) - require.Equal(t, dbsqlc.JobStateRetryable, job.State) + require.Equal(t, rivertype.JobStateRetryable, job.State) require.True(t, bundle.errorHandler.HandlePanicCalled) }) - runCancelTest := func(t *testing.T, returnErr error) *dbsqlc.RiverJob { //nolint:thelper + runCancelTest := func(t *testing.T, returnErr error) *rivertype.JobRow { //nolint:thelper executor, bundle := setup(t) // ensure we still have remaining attempts: @@ -653,9 +644,9 @@ func TestJobExecutor_Execute(t *testing.T) { executor.Execute(workCtx) executor.Completer.Wait() - job, err := queries.JobGetByID(ctx, bundle.tx, bundle.jobRow.ID) + jobRow, err := bundle.exec.JobGetByID(ctx, bundle.jobRow.ID) require.NoError(t, err) - return job + return jobRow } t.Run("RemoteCancellationViaCancel", func(t *testing.T) { @@ -664,10 +655,10 @@ func TestJobExecutor_Execute(t *testing.T) { job := runCancelTest(t, errors.New("a non-nil error")) require.WithinDuration(t, time.Now(), *job.FinalizedAt, 2*time.Second) - require.Equal(t, dbsqlc.JobStateCancelled, job.State) + require.Equal(t, rivertype.JobStateCancelled, job.State) require.Len(t, job.Errors, 1) require.WithinDuration(t, time.Now(), job.Errors[0].At, 2*time.Second) - require.Equal(t, uint16(1), job.Errors[0].Attempt) + require.Equal(t, 1, job.Errors[0].Attempt) require.Equal(t, "jobCancelError: job cancelled remotely", job.Errors[0].Error) require.Equal(t, ErrJobCancelledRemotely.Error(), job.Errors[0].Error) require.Equal(t, "", job.Errors[0].Trace) @@ -679,7 +670,7 @@ func TestJobExecutor_Execute(t *testing.T) { job := runCancelTest(t, nil) require.WithinDuration(t, time.Now(), *job.FinalizedAt, 2*time.Second) - require.Equal(t, dbsqlc.JobStateCompleted, job.State) + require.Equal(t, rivertype.JobStateCompleted, job.State) require.Empty(t, job.Errors) }) } diff --git a/job_list_params.go b/job_list_params.go index b5b45a41..e35c57d7 100644 --- a/job_list_params.go +++ b/job_list_params.go @@ -8,7 +8,7 @@ import ( "strings" "time" - "github.com/riverqueue/river/internal/dbadapter" + "github.com/riverqueue/river/internal/dblist" "github.com/riverqueue/river/rivertype" ) @@ -105,6 +105,7 @@ const ( // params := NewJobListParams().OrderBy(JobListOrderByTime, SortOrderAsc).First(100) type JobListParams struct { after *JobListCursor + kinds []string metadataFragment string paginationCount int32 queues []string @@ -127,6 +128,7 @@ func NewJobListParams() *JobListParams { func (p *JobListParams) copy() *JobListParams { return &JobListParams{ after: p.after, + kinds: append([]string(nil), p.kinds...), metadataFragment: p.metadataFragment, paginationCount: p.paginationCount, queues: append([]string(nil), p.queues...), @@ -136,18 +138,18 @@ func (p *JobListParams) copy() *JobListParams { } } -func (p *JobListParams) toDBParams() (*dbadapter.JobListParams, error) { +func (p *JobListParams) toDBParams() (*dblist.JobListParams, error) { conditionsBuilder := &strings.Builder{} conditions := make([]string, 0, 10) namedArgs := make(map[string]any) - orderBy := []dbadapter.JobListOrderBy{} + orderBy := []dblist.JobListOrderBy{} - var sortOrder dbadapter.SortOrder + var sortOrder dblist.SortOrder switch p.sortOrder { case SortOrderAsc: - sortOrder = dbadapter.SortOrderAsc + sortOrder = dblist.SortOrderAsc case SortOrderDesc: - sortOrder = dbadapter.SortOrderDesc + sortOrder = dblist.SortOrderDesc default: return nil, errors.New("invalid sort order") } @@ -156,7 +158,7 @@ func (p *JobListParams) toDBParams() (*dbadapter.JobListParams, error) { return nil, errors.New("invalid sort field") } timeField := jobListTimeFieldForState(p.state) - orderBy = append(orderBy, []dbadapter.JobListOrderBy{ + orderBy = append(orderBy, []dblist.JobListOrderBy{ {Expr: timeField, Order: sortOrder}, {Expr: "id", Order: sortOrder}, }...) @@ -167,7 +169,7 @@ func (p *JobListParams) toDBParams() (*dbadapter.JobListParams, error) { } if p.after != nil { - if sortOrder == dbadapter.SortOrderAsc { + if sortOrder == dblist.SortOrderAsc { conditions = append(conditions, fmt.Sprintf(`("%s" > @cursor_time OR ("%s" = @cursor_time AND "id" > @after_id))`, timeField, timeField)) } else { conditions = append(conditions, fmt.Sprintf(`("%s" < @cursor_time OR ("%s" = @cursor_time AND "id" < @after_id))`, timeField, timeField)) @@ -183,8 +185,9 @@ func (p *JobListParams) toDBParams() (*dbadapter.JobListParams, error) { conditionsBuilder.WriteString(condition) } - dbParams := &dbadapter.JobListParams{ + dbParams := &dblist.JobListParams{ Conditions: conditionsBuilder.String(), + Kinds: p.kinds, LimitCount: p.paginationCount, NamedArgs: namedArgs, OrderBy: orderBy, @@ -220,6 +223,15 @@ func (p *JobListParams) First(count int) *JobListParams { return result } +// Kinds returns an updated filter set that will only return jobs of the given +// kinds. +func (p *JobListParams) Kinds(kinds ...string) *JobListParams { + result := p.copy() + result.kinds = make([]string, 0, len(kinds)) + copy(result.kinds, kinds) + return result +} + func (p *JobListParams) Metadata(json string) *JobListParams { result := p.copy() result.metadataFragment = json diff --git a/job_test.go b/job_test.go index 392685d0..1af4bf10 100644 --- a/job_test.go +++ b/job_test.go @@ -18,18 +18,3 @@ func TestJobUniqueOpts_isEmpty(t *testing.T) { require.False(t, (&UniqueOpts{ByQueue: true}).isEmpty()) require.False(t, (&UniqueOpts{ByState: []rivertype.JobState{JobStateAvailable}}).isEmpty()) } - -func TestJobUniqueOpts_validate(t *testing.T) { - t.Parallel() - - require.NoError(t, (&UniqueOpts{}).validate()) - require.NoError(t, (&UniqueOpts{ - ByArgs: true, - ByPeriod: 1 * time.Second, - ByQueue: true, - ByState: []rivertype.JobState{JobStateAvailable}, - }).validate()) - - require.EqualError(t, (&UniqueOpts{ByPeriod: 1 * time.Millisecond}).validate(), "JobUniqueOpts.ByPeriod should not be less than 1 second") - require.EqualError(t, (&UniqueOpts{ByState: []rivertype.JobState{rivertype.JobState("invalid")}}).validate(), `JobUniqueOpts.ByState contains invalid state "invalid"`) -} diff --git a/producer.go b/producer.go index c79b51dc..a73b557e 100644 --- a/producer.go +++ b/producer.go @@ -10,17 +10,16 @@ import ( "github.com/riverqueue/river/internal/baseservice" "github.com/riverqueue/river/internal/componentstatus" - "github.com/riverqueue/river/internal/dbadapter" - "github.com/riverqueue/river/internal/dbsqlc" "github.com/riverqueue/river/internal/jobcompleter" "github.com/riverqueue/river/internal/notifier" "github.com/riverqueue/river/internal/util/chanutil" - "github.com/riverqueue/river/internal/util/sliceutil" "github.com/riverqueue/river/internal/workunit" + "github.com/riverqueue/river/riverdriver" "github.com/riverqueue/river/rivertype" ) type producerConfig struct { + ClientID string ErrorHandler ErrorHandler // FetchCooldown is the minimum amount of time to wait between fetches of new @@ -37,10 +36,9 @@ type producerConfig struct { JobTimeout time.Duration MaxWorkerCount uint16 Notifier *notifier.Notifier - QueueName string + Queue string RetryPolicy ClientRetryPolicy SchedulerInterval time.Duration - WorkerName string Workers *Workers } @@ -57,9 +55,9 @@ type producer struct { // Jobs which are currently being worked. Only used by main goroutine. activeJobs map[int64]*jobExecutor - adapter dbadapter.Adapter completer jobcompleter.JobCompleter config *producerConfig + exec riverdriver.Executor errorHandler ErrorHandler workers *Workers @@ -81,14 +79,16 @@ type producer struct { retryPolicy ClientRetryPolicy } -func newProducer(archetype *baseservice.Archetype, adapter dbadapter.Adapter, completer jobcompleter.JobCompleter, config *producerConfig) (*producer, error) { - if adapter == nil { - return nil, errors.New("Adapter is required") //nolint:stylecheck +func newProducer(archetype *baseservice.Archetype, exec riverdriver.Executor, completer jobcompleter.JobCompleter, config *producerConfig) (*producer, error) { + if exec == nil { + return nil, errors.New("exec is required") } if completer == nil { return nil, errors.New("Completer is required") //nolint:stylecheck } - + if config.ClientID == "" { + return nil, errors.New("ClientName is required") + } if config.FetchCooldown <= 0 { return nil, errors.New("FetchCooldown must be great than zero") } @@ -104,8 +104,8 @@ func newProducer(archetype *baseservice.Archetype, adapter dbadapter.Adapter, co if config.Notifier == nil { return nil, errors.New("Notifier is required") //nolint:stylecheck } - if config.QueueName == "" { - return nil, errors.New("QueueName is required") + if config.Queue == "" { + return nil, errors.New("Queue is required") //nolint:stylecheck } if config.RetryPolicy == nil { return nil, errors.New("RetryPolicy is required") @@ -113,19 +113,16 @@ func newProducer(archetype *baseservice.Archetype, adapter dbadapter.Adapter, co if config.SchedulerInterval == 0 { return nil, errors.New("SchedulerInterval is required") } - if config.WorkerName == "" { - return nil, errors.New("WorkerName is required") - } if config.Workers == nil { return nil, errors.New("Workers is required") } return baseservice.Init(archetype, &producer{ activeJobs: make(map[int64]*jobExecutor), - adapter: adapter, cancelCh: make(chan int64, 1000), completer: completer, config: config, + exec: exec, errorHandler: config.ErrorHandler, jobResultCh: make(chan *rivertype.JobRow, config.MaxWorkerCount), jobTimeout: config.JobTimeout, @@ -144,14 +141,14 @@ type producerStatusUpdateFunc func(queue string, status componentstatus.Status) // jobs. When workCtx is cancelled, any in-progress jobs will have their // contexts cancelled too. func (p *producer) Run(fetchCtx, workCtx context.Context, statusFunc producerStatusUpdateFunc) { - p.Logger.InfoContext(workCtx, p.Name+": Producer started", slog.String("queue", p.config.QueueName)) + p.Logger.InfoContext(workCtx, p.Name+": Producer started", slog.String("queue", p.config.Queue)) defer func() { - p.Logger.InfoContext(workCtx, p.Name+": Producer stopped", slog.String("queue", p.config.QueueName), slog.Uint64("num_completed_jobs", p.numJobsRan.Load())) + p.Logger.InfoContext(workCtx, p.Name+": Producer stopped", slog.String("queue", p.config.Queue), slog.Uint64("num_completed_jobs", p.numJobsRan.Load())) }() go p.heartbeatLogLoop(fetchCtx) - statusFunc(p.config.QueueName, componentstatus.Initializing) + statusFunc(p.config.Queue, componentstatus.Initializing) // TODO: fetcher should have some jitter in it to avoid stampeding issues. fetchLimiter := chanutil.NewDebouncedChan(fetchCtx, p.config.FetchCooldown) @@ -161,7 +158,7 @@ func (p *producer) Run(fetchCtx, workCtx context.Context, statusFunc producerSta p.Logger.ErrorContext(workCtx, p.Name+": Failed to unmarshal job control notification payload", slog.String("err", err.Error())) return } - if string(decoded.Action) == string(jobControlActionCancel) && decoded.Queue == p.config.QueueName && decoded.JobID > 0 { + if string(decoded.Action) == string(jobControlActionCancel) && decoded.Queue == p.config.Queue && decoded.JobID > 0 { select { case p.cancelCh <- decoded.JobID: default: @@ -179,9 +176,9 @@ func (p *producer) Run(fetchCtx, workCtx context.Context, statusFunc producerSta defer sub.Unlisten() p.fetchAndRunLoop(fetchCtx, workCtx, fetchLimiter, statusFunc) - statusFunc(p.config.QueueName, componentstatus.ShuttingDown) + statusFunc(p.config.Queue, componentstatus.ShuttingDown) p.executorShutdownLoop() - statusFunc(p.config.QueueName, componentstatus.Stopped) + statusFunc(p.config.Queue, componentstatus.Stopped) } type jobControlAction string @@ -214,7 +211,7 @@ func (p *producer) fetchAndRunLoop(fetchCtx, workCtx context.Context, fetchLimit p.Logger.ErrorContext(workCtx, p.Name+": Failed to unmarshal insert notification payload", slog.String("err", err.Error())) return } - if decoded.Queue != p.config.QueueName { + if decoded.Queue != p.config.Queue { return } p.Logger.DebugContext(workCtx, p.Name+": Received insert notification", slog.String("queue", decoded.Queue)) @@ -240,7 +237,7 @@ func (p *producer) fetchAndRunLoop(fetchCtx, workCtx context.Context, fetchLimit } }() - statusFunc(p.config.QueueName, componentstatus.Healthy) + statusFunc(p.config.Queue, componentstatus.Healthy) fetchResultCh := make(chan producerFetchResult) for { @@ -263,8 +260,8 @@ func (p *producer) fetchAndRunLoop(fetchCtx, workCtx context.Context, fetchLimit } func (p *producer) innerFetchLoop(workCtx context.Context, fetchResultCh chan producerFetchResult) { - count := p.maxJobsToFetch() - go p.dispatchWork(count, fetchResultCh) //nolint:contextcheck + limit := p.maxJobsToFetch() + go p.dispatchWork(limit, fetchResultCh) //nolint:contextcheck for { select { @@ -314,19 +311,22 @@ func (p *producer) maybeCancelJob(id int64) { executor.Cancel() } -func (p *producer) dispatchWork(count int32, jobsFetchedCh chan<- producerFetchResult) { +func (p *producer) dispatchWork(count int, jobsFetchedCh chan<- producerFetchResult) { // This intentionally uses a background context because we don't want it to // get cancelled if the producer is asked to shut down. In that situation, we // want to finish fetching any jobs we are in the midst of fetching, work // them, and then stop. Otherwise we'd have a risk of shutting down when we // had already fetched jobs in the database, leaving those jobs stranded. We'd // then potentially have to release them back to the queue. - internalJobs, err := p.adapter.JobGetAvailable(context.Background(), p.config.QueueName, count) + jobs, err := p.exec.JobGetAvailable(context.Background(), &riverdriver.JobGetAvailableParams{ + AttemptedBy: p.config.ClientID, + Max: count, + Queue: p.config.Queue, + }) if err != nil { jobsFetchedCh <- producerFetchResult{err: err} return } - jobs := sliceutil.Map(internalJobs, dbsqlc.JobRowFromInternal) jobsFetchedCh <- producerFetchResult{jobs: jobs} } @@ -343,7 +343,7 @@ func (p *producer) heartbeatLogLoop(ctx context.Context) { p.Logger.InfoContext(ctx, p.Name+": Heartbeat", slog.Uint64("num_completed_jobs", p.numJobsRan.Load()), slog.Int("num_jobs_running", int(p.numJobsActive.Load())), - slog.String("queue", p.config.QueueName), + slog.String("queue", p.config.Queue), ) } } @@ -361,7 +361,6 @@ func (p *producer) startNewExecutors(workCtx context.Context, jobs []*rivertype. jobCtx, jobCancel := context.WithCancelCause(workCtx) executor := baseservice.Init(&p.Archetype, &jobExecutor{ - Adapter: p.adapter, CancelFunc: jobCancel, ClientJobTimeout: p.jobTimeout, ClientRetryPolicy: p.retryPolicy, @@ -385,8 +384,8 @@ func (p *producer) startNewExecutors(workCtx context.Context, jobs []*rivertype. } } -func (p *producer) maxJobsToFetch() int32 { - return int32(p.config.MaxWorkerCount) - p.numJobsActive.Load() +func (p *producer) maxJobsToFetch() int { + return int(p.config.MaxWorkerCount) - int(p.numJobsActive.Load()) } func (p *producer) handleWorkerDone(job *rivertype.JobRow) { diff --git a/producer_test.go b/producer_test.go index 6473706c..99e4ecf2 100644 --- a/producer_test.go +++ b/producer_test.go @@ -12,13 +12,14 @@ import ( "github.com/stretchr/testify/require" "github.com/riverqueue/river/internal/componentstatus" - "github.com/riverqueue/river/internal/dbadapter" - "github.com/riverqueue/river/internal/dbsqlc" "github.com/riverqueue/river/internal/jobcompleter" "github.com/riverqueue/river/internal/maintenance" "github.com/riverqueue/river/internal/notifier" "github.com/riverqueue/river/internal/rivercommon" "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdriver" + "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivertype" ) func Test_Producer_CanSafelyCompleteJobsWhileFetchingNewOnes(t *testing.T) { @@ -43,12 +44,11 @@ func Test_Producer_CanSafelyCompleteJobsWhileFetchingNewOnes(t *testing.T) { archetype := riverinternaltest.BaseServiceArchetype(t) - adapter := dbadapter.NewStandardAdapter(archetype, &dbadapter.StandardAdapterConfig{ - Executor: dbPool, - WorkerName: "producer_test_worker", - }) + dbDriver := riverpgxv5.New(dbPool) + exec := dbDriver.GetExecutor() + listener := dbDriver.GetListener() - completer := jobcompleter.NewInlineCompleter(archetype, adapter) + completer := jobcompleter.NewInlineCompleter(archetype, exec) t.Cleanup(completer.Wait) type WithJobNumArgs struct { @@ -71,7 +71,7 @@ func Test_Producer_CanSafelyCompleteJobsWhileFetchingNewOnes(t *testing.T) { })) ignoreNotifierStatusUpdates := func(componentstatus.Status) {} - notifier := notifier.New(archetype, dbPool.Config().ConnConfig, ignoreNotifierStatusUpdates, riverinternaltest.Logger(t)) + notifier := notifier.New(archetype, listener, ignoreNotifierStatusUpdates, riverinternaltest.Logger(t)) config := &producerConfig{ ErrorHandler: newTestErrorHandler(), @@ -81,18 +81,18 @@ func Test_Producer_CanSafelyCompleteJobsWhileFetchingNewOnes(t *testing.T) { JobTimeout: JobTimeoutDefault, MaxWorkerCount: 1000, Notifier: notifier, - QueueName: rivercommon.QueueDefault, + Queue: rivercommon.QueueDefault, RetryPolicy: &DefaultClientRetryPolicy{}, SchedulerInterval: maintenance.SchedulerIntervalDefault, - WorkerName: "fakeWorkerNameTODO", + ClientID: "fakeWorkerNameTODO", Workers: workers, } - producer, err := newProducer(archetype, adapter, completer, config) + producer, err := newProducer(archetype, exec, completer, config) require.NoError(err) - params := make([]*dbadapter.JobInsertParams, maxJobCount) + params := make([]*riverdriver.JobInsertParams, maxJobCount) for i := range params { - insertParams, err := insertParamsFromArgsAndOptions(WithJobNumArgs{JobNum: i}, nil) + insertParams, _, err := insertParamsFromArgsAndOptions(WithJobNumArgs{JobNum: i}, nil) require.NoError(err) params[i] = insertParams @@ -117,7 +117,7 @@ func Test_Producer_CanSafelyCompleteJobsWhileFetchingNewOnes(t *testing.T) { } }() - _, err = adapter.JobInsertMany(ctx, params) + _, err = exec.JobInsertMany(ctx, params) require.NoError(err) ignoreStatusUpdates := func(queue string, status componentstatus.Status) {} @@ -145,8 +145,8 @@ func Test_Producer_Run(t *testing.T) { ctx := context.Background() type testBundle struct { - adapter *dbadapter.StandardAdapter completer jobcompleter.JobCompleter + exec riverdriver.Executor jobUpdates chan jobcompleter.CompleterJobUpdated workers *Workers } @@ -155,15 +155,13 @@ func Test_Producer_Run(t *testing.T) { t.Helper() dbPool := riverinternaltest.TestDB(ctx, t) + driver := riverpgxv5.New(dbPool) + exec := driver.GetExecutor() + listener := driver.GetListener() archetype := riverinternaltest.BaseServiceArchetype(t) - adapter := dbadapter.NewStandardAdapter(archetype, &dbadapter.StandardAdapterConfig{ - Executor: dbPool, - WorkerName: "producer_test_worker", - }) - - completer := jobcompleter.NewInlineCompleter(archetype, adapter) + completer := jobcompleter.NewInlineCompleter(archetype, exec) jobUpdates := make(chan jobcompleter.CompleterJobUpdated, 10) completer.Subscribe(func(update jobcompleter.CompleterJobUpdated) { @@ -172,7 +170,7 @@ func Test_Producer_Run(t *testing.T) { workers := NewWorkers() - notifier := notifier.New(archetype, dbPool.Config().ConnConfig, func(componentstatus.Status) {}, riverinternaltest.Logger(t)) + notifier := notifier.New(archetype, listener, func(componentstatus.Status) {}, riverinternaltest.Logger(t)) config := &producerConfig{ ErrorHandler: newTestErrorHandler(), @@ -181,30 +179,30 @@ func Test_Producer_Run(t *testing.T) { JobTimeout: JobTimeoutDefault, MaxWorkerCount: 1000, Notifier: notifier, - QueueName: rivercommon.QueueDefault, + Queue: rivercommon.QueueDefault, RetryPolicy: &DefaultClientRetryPolicy{}, SchedulerInterval: riverinternaltest.SchedulerShortInterval, - WorkerName: "fakeWorkerNameTODO", + ClientID: "fakeWorkerNameTODO", Workers: workers, } - producer, err := newProducer(archetype, adapter, completer, config) + producer, err := newProducer(archetype, exec, completer, config) require.NoError(t, err) return producer, &testBundle{ - adapter: adapter, completer: completer, + exec: exec, jobUpdates: jobUpdates, workers: workers, } } - mustInsert := func(ctx context.Context, t *testing.T, adapter dbadapter.Adapter, args JobArgs) { + mustInsert := func(ctx context.Context, t *testing.T, exec riverdriver.Executor, args JobArgs) { t.Helper() - insertParams, err := insertParamsFromArgsAndOptions(args, nil) + insertParams, _, err := insertParamsFromArgsAndOptions(args, nil) require.NoError(t, err) - _, err = adapter.JobInsert(ctx, insertParams) + _, err = exec.JobInsert(ctx, insertParams) require.NoError(t, err) } @@ -247,10 +245,10 @@ func Test_Producer_Run(t *testing.T) { t.Cleanup(wg.Wait) t.Cleanup(fetchCtxDone) - mustInsert(ctx, t, bundle.adapter, &noOpArgs{}) + mustInsert(ctx, t, bundle.exec, &noOpArgs{}) update := riverinternaltest.WaitOrTimeout(t, bundle.jobUpdates) - require.Equal(t, dbsqlc.JobStateCompleted, update.Job.State) + require.Equal(t, rivertype.JobStateCompleted, update.Job.State) }) t.Run("UnknownJobKind", func(t *testing.T) { @@ -274,8 +272,8 @@ func Test_Producer_Run(t *testing.T) { t.Cleanup(wg.Wait) t.Cleanup(fetchCtxDone) - mustInsert(ctx, t, bundle.adapter, &noOpArgs{}) - mustInsert(ctx, t, bundle.adapter, &callbackArgs{}) // not registered + mustInsert(ctx, t, bundle.exec, &noOpArgs{}) + mustInsert(ctx, t, bundle.exec, &callbackArgs{}) // not registered updates := riverinternaltest.WaitOrTimeoutN(t, bundle.jobUpdates, 2) @@ -286,7 +284,7 @@ func Test_Producer_Run(t *testing.T) { // Order jobs come back in is not guaranteed, which is why this is // written somewhat strangely. - findJob := func(kind string) *dbsqlc.RiverJob { + findJob := func(kind string) *rivertype.JobRow { index := slices.IndexFunc(updates, func(u jobcompleter.CompleterJobUpdated) bool { return u.Job.Kind == kind }) require.NotEqualf(t, -1, index, "Job update not found", "Job update not found for kind: %s", kind) return updates[index].Job @@ -294,12 +292,12 @@ func Test_Producer_Run(t *testing.T) { { job := findJob((&callbackArgs{}).Kind()) - require.Equal(t, dbsqlc.JobStateRetryable, job.State) + require.Equal(t, rivertype.JobStateRetryable, job.State) require.Equal(t, (&UnknownJobKindError{Kind: (&callbackArgs{}).Kind()}).Error(), job.Errors[0].Error) } { job := findJob((&noOpArgs{}).Kind()) - require.Equal(t, dbsqlc.JobStateCompleted, job.State) + require.Equal(t, rivertype.JobStateCompleted, job.State) } }) } diff --git a/riverdriver/go.mod b/riverdriver/go.mod index 82417c83..c4d8fcb2 100644 --- a/riverdriver/go.mod +++ b/riverdriver/go.mod @@ -1,14 +1,7 @@ module github.com/riverqueue/river/riverdriver -go 1.21 +go 1.21.4 -require github.com/jackc/pgx/v5 v5.5.0 +replace github.com/riverqueue/river/rivertype => ../rivertype -require ( - github.com/jackc/pgpassfile v1.0.0 // indirect - github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect - github.com/jackc/puddle/v2 v2.2.1 // indirect - golang.org/x/crypto v0.15.0 // indirect - golang.org/x/sync v0.5.0 // indirect - golang.org/x/text v0.14.0 // indirect -) +require github.com/riverqueue/river/rivertype v0.0.20 diff --git a/riverdriver/go.sum b/riverdriver/go.sum index b9c08498..e69de29b 100644 --- a/riverdriver/go.sum +++ b/riverdriver/go.sum @@ -1,28 +0,0 @@ -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= -github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= -github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.5.0 h1:NxstgwndsTRy7eq9/kqYc/BZh5w2hHJV86wjvO+1xPw= -github.com/jackc/pgx/v5 v5.5.0/go.mod h1:Ig06C2Vu0t5qXC60W8sqIthScaEnFvojjj9dSljmHRA= -github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= -github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA= -golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/riverdriver/river_driver_interface.go b/riverdriver/river_driver_interface.go index b944ba6b..57a02d59 100644 --- a/riverdriver/river_driver_interface.go +++ b/riverdriver/river_driver_interface.go @@ -17,13 +17,11 @@ import ( "errors" "time" - "github.com/jackc/pgx/v5" - "github.com/jackc/pgx/v5/pgxpool" + "github.com/riverqueue/river/rivertype" ) var ( ErrNotImplemented = errors.New("driver does not implement this functionality") - ErrNoRows = errors.New("no rows found") ErrSubTxNotSupported = errors.New("subtransactions not supported for this driver") ) @@ -39,70 +37,88 @@ var ( // require it to change substantially, and therefore it should not be // implemented or invoked by user code. Changes to this interface WILL NOT be // considered breaking changes for purposes of River's semantic versioning. +// +// API is not stable. DO NOT IMPLEMENT. type Driver[TTx any] interface { - // GetDBPool returns a database pool.This doesn't make sense in a world - // where multiple drivers are supported and is subject to change. + // GetExecutor gets an executor for the driver. // // API is not stable. DO NOT USE. - GetDBPool() *pgxpool.Pool + GetExecutor() Executor - // GetExecutor gets an executor for the driver. + // GetListener gets a listener for purposes of receiving notifications. // // API is not stable. DO NOT USE. - GetExecutor() Executor + GetListener() Listener - // UnwrapExecutor gets unwraps executor from a driver transaction. + // HasPool returns true if the driver is configured with a database pool. // // API is not stable. DO NOT USE. - UnwrapExecutor(tx TTx) Executor + HasPool() bool - // UnwrapTx turns a generically typed transaction into a pgx.Tx for use with - // internal infrastructure. This doesn't make sense in a world where - // multiple drivers are supported and is subject to change. + // UnwrapExecutor gets unwraps executor from a driver transaction. // // API is not stable. DO NOT USE. - UnwrapTx(tx TTx) pgx.Tx + UnwrapExecutor(tx TTx) ExecutorTx } // Executor provides River operations against a database. It may be a database // pool or transaction. +// +// API is not stable. DO NOT IMPLEMENT. type Executor interface { // Begin begins a new subtransaction. ErrSubTxNotSupported may be returned // if the executor is a transaction and the driver doesn't support // subtransactions (like riverdriver/riverdatabasesql for database/sql). - // - // API is not stable. DO NOT USE. Begin(ctx context.Context) (ExecutorTx, error) // Exec executes raw SQL. Used for migrations. - // - // API is not stable. DO NOT USE. Exec(ctx context.Context, sql string) (struct{}, error) + JobCancel(ctx context.Context, params *JobCancelParams) (*rivertype.JobRow, error) + JobDeleteBefore(ctx context.Context, params *JobDeleteBeforeParams) (int, error) + JobGetAvailable(ctx context.Context, params *JobGetAvailableParams) ([]*rivertype.JobRow, error) + JobGetByID(ctx context.Context, id int64) (*rivertype.JobRow, error) + JobGetByIDMany(ctx context.Context, id []int64) ([]*rivertype.JobRow, error) + JobGetByKindAndUniqueProperties(ctx context.Context, params *JobGetByKindAndUniquePropertiesParams) (*rivertype.JobRow, error) + JobGetByKindMany(ctx context.Context, kind []string) ([]*rivertype.JobRow, error) + JobGetStuck(ctx context.Context, params *JobGetStuckParams) ([]*rivertype.JobRow, error) + JobInsert(ctx context.Context, params *JobInsertParams) (*rivertype.JobRow, error) + JobInsertMany(ctx context.Context, params []*JobInsertParams) (int64, error) + JobList(ctx context.Context, sql string, namedArgs map[string]any) ([]*rivertype.JobRow, error) + JobListFields() string + JobRescueMany(ctx context.Context, params *JobRescueManyParams) (*struct{}, error) + JobRetry(ctx context.Context, id int64) (*rivertype.JobRow, error) + JobSchedule(ctx context.Context, params *JobScheduleParams) (int, error) + JobSetStateIfRunning(ctx context.Context, params *JobSetStateIfRunningParams) (*rivertype.JobRow, error) + JobUpdate(ctx context.Context, params *JobUpdateParams) (*rivertype.JobRow, error) + LeaderAttemptElect(ctx context.Context, params *LeaderElectParams) (bool, error) + LeaderAttemptReelect(ctx context.Context, params *LeaderElectParams) (bool, error) + LeaderDeleteExpired(ctx context.Context, name string) (int, error) + LeaderGetElectedLeader(ctx context.Context, name string) (*Leader, error) + LeaderInsert(ctx context.Context, params *LeaderInsertParams) (*Leader, error) + LeaderResign(ctx context.Context, params *LeaderResignParams) (bool, error) + // MigrationDeleteByVersionMany deletes many migration versions. - // - // API is not stable. DO NOT USE. MigrationDeleteByVersionMany(ctx context.Context, versions []int) ([]*Migration, error) // MigrationGetAll gets all currently applied migrations. - // - // API is not stable. DO NOT USE. MigrationGetAll(ctx context.Context) ([]*Migration, error) // MigrationInsertMany inserts many migration versions. - // - // API is not stable. DO NOT USE. MigrationInsertMany(ctx context.Context, versions []int) ([]*Migration, error) + Notify(ctx context.Context, topic string, payload string) error + PGAdvisoryXactLock(ctx context.Context, key int64) (*struct{}, error) + // TableExists checks whether a table exists for the schema in the current // search schema. - // - // API is not stable. DO NOT USE. TableExists(ctx context.Context, tableName string) (bool, error) } // ExecutorTx is an executor which is a transaction. In addition to standard // Executor operations, it may be committed or rolled back. +// +// API is not stable. DO NOT IMPLEMENT. type ExecutorTx interface { Executor @@ -117,7 +133,178 @@ type ExecutorTx interface { Rollback(ctx context.Context) error } +// Listner listens for notifications. In Postgres, this is a database connection +// where `LISTEN` has been run. +// +// API is not stable. DO NOT IMPLEMENT. +type Listener interface { + Close(ctx context.Context) error + Connect(ctx context.Context) error + Listen(ctx context.Context, topic string) error + Ping(ctx context.Context) error + Unlisten(ctx context.Context, topic string) error + WaitForNotification(ctx context.Context) (*Notification, error) +} + +type Notification struct { + Payload string + Topic string +} + +type JobCancelParams struct { + ID int64 + CancelAttemptedAt time.Time + JobControlTopic string +} + +type JobDeleteBeforeParams struct { + CancelledFinalizedAtHorizon time.Time + CompletedFinalizedAtHorizon time.Time + DiscardedFinalizedAtHorizon time.Time + Max int +} + +type JobGetAvailableParams struct { + AttemptedBy string + Max int + Queue string +} + +type JobGetByKindAndUniquePropertiesParams struct { + Kind string + ByArgs bool + Args []byte + ByCreatedAt bool + CreatedAtStart time.Time + CreatedAtEnd time.Time + ByQueue bool + Queue string + ByState bool + State []string +} + +type JobGetStuckParams struct { + Max int + StuckHorizon time.Time +} + +type JobInsertParams struct { + Attempt int + AttemptedAt *time.Time + EncodedArgs []byte + Errors [][]byte + FinalizedAt *time.Time + Kind string + MaxAttempts int + Metadata []byte + Priority int + Queue string + ScheduledAt *time.Time + State rivertype.JobState + Tags []string +} + +type JobRescueManyParams struct { + ID []int64 + Error [][]byte + FinalizedAt []time.Time + ScheduledAt []time.Time + State []string +} + +type JobScheduleParams struct { + InsertTopic string + Max int + Now time.Time +} + +// JobSetStateIfRunningParams are parameters to update the state of a currently running +// job. Use one of the constructors below to ensure a correct combination of +// parameters. +type JobSetStateIfRunningParams struct { + ID int64 + ErrData []byte + FinalizedAt *time.Time + MaxAttempts *int + ScheduledAt *time.Time + State rivertype.JobState +} + +func JobSetStateCancelled(id int64, finalizedAt time.Time, errData []byte) *JobSetStateIfRunningParams { + return &JobSetStateIfRunningParams{ID: id, ErrData: errData, FinalizedAt: &finalizedAt, State: rivertype.JobStateCancelled} +} + +func JobSetStateCompleted(id int64, finalizedAt time.Time) *JobSetStateIfRunningParams { + return &JobSetStateIfRunningParams{ID: id, FinalizedAt: &finalizedAt, State: rivertype.JobStateCompleted} +} + +func JobSetStateDiscarded(id int64, finalizedAt time.Time, errData []byte) *JobSetStateIfRunningParams { + return &JobSetStateIfRunningParams{ID: id, ErrData: errData, FinalizedAt: &finalizedAt, State: rivertype.JobStateDiscarded} +} + +func JobSetStateErrorAvailable(id int64, scheduledAt time.Time, errData []byte) *JobSetStateIfRunningParams { + return &JobSetStateIfRunningParams{ID: id, ErrData: errData, ScheduledAt: &scheduledAt, State: rivertype.JobStateAvailable} +} + +func JobSetStateErrorRetryable(id int64, scheduledAt time.Time, errData []byte) *JobSetStateIfRunningParams { + return &JobSetStateIfRunningParams{ID: id, ErrData: errData, ScheduledAt: &scheduledAt, State: rivertype.JobStateRetryable} +} + +func JobSetStateSnoozed(id int64, scheduledAt time.Time, maxAttempts int) *JobSetStateIfRunningParams { + return &JobSetStateIfRunningParams{ID: id, MaxAttempts: &maxAttempts, ScheduledAt: &scheduledAt, State: rivertype.JobStateScheduled} +} + +func JobSetStateSnoozedAvailable(id int64, scheduledAt time.Time, maxAttempts int) *JobSetStateIfRunningParams { + return &JobSetStateIfRunningParams{ID: id, MaxAttempts: &maxAttempts, ScheduledAt: &scheduledAt, State: rivertype.JobStateAvailable} +} + +type JobUpdateParams struct { + ID int64 + AttemptDoUpdate bool + Attempt int + AttemptedAtDoUpdate bool + AttemptedAt *time.Time + ErrorsDoUpdate bool + Errors [][]byte + FinalizedAtDoUpdate bool + FinalizedAt *time.Time + StateDoUpdate bool + State rivertype.JobState +} + +// Leader represents a River leader. +// +// API is not stable. DO NOT USE. +type Leader struct { + ElectedAt time.Time + ExpiresAt time.Time + Name string + LeaderID string +} + +type LeaderInsertParams struct { + ElectedAt *time.Time + ExpiresAt *time.Time + Name string + LeaderID string + TTL time.Duration +} + +type LeaderElectParams struct { + Name string + LeaderID string + TTL time.Duration +} + +type LeaderResignParams struct { + LeaderID string + LeadershipTopic string + Name string +} + // Migration represents a River migration. +// +// API is not stable. DO NOT USE. type Migration struct { // ID is an automatically generated primary key for the migration. // diff --git a/riverdriver/riverdatabasesql/go.mod b/riverdriver/riverdatabasesql/go.mod index fd967e9d..40ec3662 100644 --- a/riverdriver/riverdatabasesql/go.mod +++ b/riverdriver/riverdatabasesql/go.mod @@ -1,26 +1,23 @@ module github.com/riverqueue/river/riverdriver/riverdatabasesql -go 1.21 +go 1.21.4 replace github.com/riverqueue/river/riverdriver => ../ +replace github.com/riverqueue/river/rivertype => ../../rivertype + require ( - github.com/jackc/pgx/v5 v5.5.0 github.com/lib/pq v1.10.9 github.com/riverqueue/river/riverdriver v0.0.20 + github.com/riverqueue/river/rivertype v0.0.20 github.com/stretchr/testify v1.8.1 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/jackc/pgpassfile v1.0.0 // indirect - github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect - github.com/jackc/puddle/v2 v2.2.1 // indirect - github.com/kr/text v0.2.0 // indirect + github.com/kr/pretty v0.3.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.11.0 // indirect - golang.org/x/crypto v0.15.0 // indirect - golang.org/x/sync v0.5.0 // indirect - golang.org/x/text v0.14.0 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/riverdriver/riverdatabasesql/go.sum b/riverdriver/riverdatabasesql/go.sum index 2ff6c7fe..8226d072 100644 --- a/riverdriver/riverdatabasesql/go.sum +++ b/riverdriver/riverdatabasesql/go.sum @@ -2,42 +2,33 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= -github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= -github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.5.0 h1:NxstgwndsTRy7eq9/kqYc/BZh5w2hHJV86wjvO+1xPw= -github.com/jackc/pgx/v5 v5.5.0/go.mod h1:Ig06C2Vu0t5qXC60W8sqIthScaEnFvojjj9dSljmHRA= -github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= -github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA= -golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/riverdriver/riverdatabasesql/internal/dbsqlc/db.go b/riverdriver/riverdatabasesql/internal/dbsqlc/db.go index 8f705105..785808a0 100644 --- a/riverdriver/riverdatabasesql/internal/dbsqlc/db.go +++ b/riverdriver/riverdatabasesql/internal/dbsqlc/db.go @@ -1,6 +1,6 @@ // Code generated by sqlc. DO NOT EDIT. // versions: -// sqlc v1.24.0 +// sqlc v1.25.0 package dbsqlc diff --git a/internal/dbsqlc/error.go b/riverdriver/riverdatabasesql/internal/dbsqlc/error.go similarity index 100% rename from internal/dbsqlc/error.go rename to riverdriver/riverdatabasesql/internal/dbsqlc/error.go diff --git a/riverdriver/riverdatabasesql/internal/dbsqlc/models.go b/riverdriver/riverdatabasesql/internal/dbsqlc/models.go index c5b6578c..1252d9d8 100644 --- a/riverdriver/riverdatabasesql/internal/dbsqlc/models.go +++ b/riverdriver/riverdatabasesql/internal/dbsqlc/models.go @@ -1,13 +1,89 @@ // Code generated by sqlc. DO NOT EDIT. // versions: -// sqlc v1.24.0 +// sqlc v1.25.0 package dbsqlc import ( + "database/sql/driver" + "encoding/json" + "fmt" "time" ) +type JobState string + +const ( + JobStateAvailable JobState = "available" + JobStateCancelled JobState = "cancelled" + JobStateCompleted JobState = "completed" + JobStateDiscarded JobState = "discarded" + JobStateRetryable JobState = "retryable" + JobStateRunning JobState = "running" + JobStateScheduled JobState = "scheduled" +) + +func (e *JobState) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + *e = JobState(s) + case string: + *e = JobState(s) + default: + return fmt.Errorf("unsupported scan type for JobState: %T", src) + } + return nil +} + +type NullJobState struct { + JobState JobState + Valid bool // Valid is true if JobState is not NULL +} + +// Scan implements the Scanner interface. +func (ns *NullJobState) Scan(value interface{}) error { + if value == nil { + ns.JobState, ns.Valid = "", false + return nil + } + ns.Valid = true + return ns.JobState.Scan(value) +} + +// Value implements the driver Valuer interface. +func (ns NullJobState) Value() (driver.Value, error) { + if !ns.Valid { + return nil, nil + } + return string(ns.JobState), nil +} + +type RiverJob struct { + ID int64 + Args []byte + Attempt int16 + AttemptedAt *time.Time + AttemptedBy []string + CreatedAt time.Time + Errors []AttemptError + FinalizedAt *time.Time + Kind string + MaxAttempts int16 + Metadata json.RawMessage + Priority int16 + Queue string + State JobState + ScheduledAt time.Time + Tags []string +} + +type RiverLeader struct { + ElectedAt time.Time + ExpiresAt time.Time + LeaderID string + Name string +} + type RiverMigration struct { ID int64 CreatedAt time.Time diff --git a/riverdriver/riverdatabasesql/internal/dbsqlc/pg_misc.sql.go b/riverdriver/riverdatabasesql/internal/dbsqlc/pg_misc.sql.go new file mode 100644 index 00000000..472f8fdf --- /dev/null +++ b/riverdriver/riverdatabasesql/internal/dbsqlc/pg_misc.sql.go @@ -0,0 +1,33 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.25.0 +// source: pg_misc.sql + +package dbsqlc + +import ( + "context" +) + +const pGAdvisoryXactLock = `-- name: PGAdvisoryXactLock :exec +SELECT pg_advisory_xact_lock($1) +` + +func (q *Queries) PGAdvisoryXactLock(ctx context.Context, db DBTX, key int64) error { + _, err := db.ExecContext(ctx, pGAdvisoryXactLock, key) + return err +} + +const pGNotify = `-- name: PGNotify :exec +SELECT pg_notify($1, $2) +` + +type PGNotifyParams struct { + Topic string + Payload string +} + +func (q *Queries) PGNotify(ctx context.Context, db DBTX, arg PGNotifyParams) error { + _, err := db.ExecContext(ctx, pGNotify, arg.Topic, arg.Payload) + return err +} diff --git a/riverdriver/riverdatabasesql/internal/dbsqlc/river_job.sql.go b/riverdriver/riverdatabasesql/internal/dbsqlc/river_job.sql.go new file mode 100644 index 00000000..0da5f823 --- /dev/null +++ b/riverdriver/riverdatabasesql/internal/dbsqlc/river_job.sql.go @@ -0,0 +1,813 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.25.0 +// source: river_job.sql + +package dbsqlc + +import ( + "context" + "encoding/json" + "time" + + "github.com/lib/pq" +) + +const jobCancel = `-- name: JobCancel :one +WITH locked_job AS ( + SELECT + id, queue, state, finalized_at + FROM river_job + WHERE river_job.id = $1 + FOR UPDATE +), +notification AS ( + SELECT + id, + pg_notify($2, json_build_object('action', 'cancel', 'job_id', id, 'queue', queue)::text) + FROM + locked_job + WHERE + state NOT IN ('cancelled', 'completed', 'discarded') + AND finalized_at IS NULL +), +updated_job AS ( + UPDATE river_job + SET + -- If the job is actively running, we want to let its current client and + -- producer handle the cancellation. Otherwise, immediately cancel it. + state = CASE WHEN state = 'running'::river_job_state THEN state ELSE 'cancelled'::river_job_state END, + finalized_at = CASE WHEN state = 'running'::river_job_state THEN finalized_at ELSE now() END, + -- Mark the job as cancelled by query so that the rescuer knows not to + -- rescue it, even if it gets stuck in the running state: + metadata = jsonb_set(metadata, '{cancel_attempted_at}'::text[], $3::jsonb, true) + FROM notification + WHERE river_job.id = notification.id + RETURNING river_job.id, river_job.args, river_job.attempt, river_job.attempted_at, river_job.attempted_by, river_job.created_at, river_job.errors, river_job.finalized_at, river_job.kind, river_job.max_attempts, river_job.metadata, river_job.priority, river_job.queue, river_job.state, river_job.scheduled_at, river_job.tags +) +SELECT id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags +FROM river_job +WHERE id = $1::bigint + AND id NOT IN (SELECT id FROM updated_job) +UNION +SELECT id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags +FROM updated_job +` + +type JobCancelParams struct { + ID int64 + JobControlTopic string + CancelAttemptedAt json.RawMessage +} + +func (q *Queries) JobCancel(ctx context.Context, db DBTX, arg JobCancelParams) (*RiverJob, error) { + row := db.QueryRowContext(ctx, jobCancel, arg.ID, arg.JobControlTopic, arg.CancelAttemptedAt) + var i RiverJob + err := row.Scan( + &i.ID, + &i.Args, + &i.Attempt, + &i.AttemptedAt, + pq.Array(&i.AttemptedBy), + &i.CreatedAt, + pq.Array(&i.Errors), + &i.FinalizedAt, + &i.Kind, + &i.MaxAttempts, + &i.Metadata, + &i.Priority, + &i.Queue, + &i.State, + &i.ScheduledAt, + pq.Array(&i.Tags), + ) + return &i, err +} + +const jobDeleteBefore = `-- name: JobDeleteBefore :one +WITH deleted_jobs AS ( + DELETE FROM river_job + WHERE id IN ( + SELECT id + FROM river_job + WHERE + (state = 'cancelled' AND finalized_at < $1::timestamptz) OR + (state = 'completed' AND finalized_at < $2::timestamptz) OR + (state = 'discarded' AND finalized_at < $3::timestamptz) + ORDER BY id + LIMIT $4::bigint + ) + RETURNING id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags +) +SELECT count(*) +FROM deleted_jobs +` + +type JobDeleteBeforeParams struct { + CancelledFinalizedAtHorizon time.Time + CompletedFinalizedAtHorizon time.Time + DiscardedFinalizedAtHorizon time.Time + Max int64 +} + +func (q *Queries) JobDeleteBefore(ctx context.Context, db DBTX, arg JobDeleteBeforeParams) (int64, error) { + row := db.QueryRowContext(ctx, jobDeleteBefore, + arg.CancelledFinalizedAtHorizon, + arg.CompletedFinalizedAtHorizon, + arg.DiscardedFinalizedAtHorizon, + arg.Max, + ) + var count int64 + err := row.Scan(&count) + return count, err +} + +const jobGetAvailable = `-- name: JobGetAvailable :many +WITH locked_jobs AS ( + SELECT + id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags + FROM + river_job + WHERE + state = 'available'::river_job_state + AND queue = $2::text + AND scheduled_at <= now() + ORDER BY + priority ASC, + scheduled_at ASC, + id ASC + LIMIT $3::integer + FOR UPDATE + SKIP LOCKED +) +UPDATE + river_job +SET + state = 'running'::river_job_state, + attempt = river_job.attempt + 1, + attempted_at = now(), + attempted_by = array_append(river_job.attempted_by, $1::text) +FROM + locked_jobs +WHERE + river_job.id = locked_jobs.id +RETURNING + river_job.id, river_job.args, river_job.attempt, river_job.attempted_at, river_job.attempted_by, river_job.created_at, river_job.errors, river_job.finalized_at, river_job.kind, river_job.max_attempts, river_job.metadata, river_job.priority, river_job.queue, river_job.state, river_job.scheduled_at, river_job.tags +` + +type JobGetAvailableParams struct { + AttemptedBy string + Queue string + Max int32 +} + +func (q *Queries) JobGetAvailable(ctx context.Context, db DBTX, arg JobGetAvailableParams) ([]*RiverJob, error) { + rows, err := db.QueryContext(ctx, jobGetAvailable, arg.AttemptedBy, arg.Queue, arg.Max) + if err != nil { + return nil, err + } + defer rows.Close() + var items []*RiverJob + for rows.Next() { + var i RiverJob + if err := rows.Scan( + &i.ID, + &i.Args, + &i.Attempt, + &i.AttemptedAt, + pq.Array(&i.AttemptedBy), + &i.CreatedAt, + pq.Array(&i.Errors), + &i.FinalizedAt, + &i.Kind, + &i.MaxAttempts, + &i.Metadata, + &i.Priority, + &i.Queue, + &i.State, + &i.ScheduledAt, + pq.Array(&i.Tags), + ); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const jobGetByID = `-- name: JobGetByID :one +SELECT id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags +FROM river_job +WHERE id = $1 +LIMIT 1 +` + +func (q *Queries) JobGetByID(ctx context.Context, db DBTX, id int64) (*RiverJob, error) { + row := db.QueryRowContext(ctx, jobGetByID, id) + var i RiverJob + err := row.Scan( + &i.ID, + &i.Args, + &i.Attempt, + &i.AttemptedAt, + pq.Array(&i.AttemptedBy), + &i.CreatedAt, + pq.Array(&i.Errors), + &i.FinalizedAt, + &i.Kind, + &i.MaxAttempts, + &i.Metadata, + &i.Priority, + &i.Queue, + &i.State, + &i.ScheduledAt, + pq.Array(&i.Tags), + ) + return &i, err +} + +const jobGetByIDMany = `-- name: JobGetByIDMany :many +SELECT id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags +FROM river_job +WHERE id = any($1::bigint[]) +ORDER BY id +` + +func (q *Queries) JobGetByIDMany(ctx context.Context, db DBTX, id []int64) ([]*RiverJob, error) { + rows, err := db.QueryContext(ctx, jobGetByIDMany, pq.Array(id)) + if err != nil { + return nil, err + } + defer rows.Close() + var items []*RiverJob + for rows.Next() { + var i RiverJob + if err := rows.Scan( + &i.ID, + &i.Args, + &i.Attempt, + &i.AttemptedAt, + pq.Array(&i.AttemptedBy), + &i.CreatedAt, + pq.Array(&i.Errors), + &i.FinalizedAt, + &i.Kind, + &i.MaxAttempts, + &i.Metadata, + &i.Priority, + &i.Queue, + &i.State, + &i.ScheduledAt, + pq.Array(&i.Tags), + ); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const jobGetByKindAndUniqueProperties = `-- name: JobGetByKindAndUniqueProperties :one +SELECT id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags +FROM river_job +WHERE kind = $1 + AND CASE WHEN $2::boolean THEN args = $3 ELSE true END + AND CASE WHEN $4::boolean THEN tstzrange($5::timestamptz, $6::timestamptz, '[)') @> created_at ELSE true END + AND CASE WHEN $7::boolean THEN queue = $8 ELSE true END + AND CASE WHEN $9::boolean THEN state::text = any($10::text[]) ELSE true END +` + +type JobGetByKindAndUniquePropertiesParams struct { + Kind string + ByArgs bool + Args []byte + ByCreatedAt bool + CreatedAtStart time.Time + CreatedAtEnd time.Time + ByQueue bool + Queue string + ByState bool + State []string +} + +func (q *Queries) JobGetByKindAndUniqueProperties(ctx context.Context, db DBTX, arg JobGetByKindAndUniquePropertiesParams) (*RiverJob, error) { + row := db.QueryRowContext(ctx, jobGetByKindAndUniqueProperties, + arg.Kind, + arg.ByArgs, + arg.Args, + arg.ByCreatedAt, + arg.CreatedAtStart, + arg.CreatedAtEnd, + arg.ByQueue, + arg.Queue, + arg.ByState, + pq.Array(arg.State), + ) + var i RiverJob + err := row.Scan( + &i.ID, + &i.Args, + &i.Attempt, + &i.AttemptedAt, + pq.Array(&i.AttemptedBy), + &i.CreatedAt, + pq.Array(&i.Errors), + &i.FinalizedAt, + &i.Kind, + &i.MaxAttempts, + &i.Metadata, + &i.Priority, + &i.Queue, + &i.State, + &i.ScheduledAt, + pq.Array(&i.Tags), + ) + return &i, err +} + +const jobGetByKindMany = `-- name: JobGetByKindMany :many +SELECT id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags +FROM river_job +WHERE kind = any($1::text[]) +ORDER BY id +` + +func (q *Queries) JobGetByKindMany(ctx context.Context, db DBTX, kind []string) ([]*RiverJob, error) { + rows, err := db.QueryContext(ctx, jobGetByKindMany, pq.Array(kind)) + if err != nil { + return nil, err + } + defer rows.Close() + var items []*RiverJob + for rows.Next() { + var i RiverJob + if err := rows.Scan( + &i.ID, + &i.Args, + &i.Attempt, + &i.AttemptedAt, + pq.Array(&i.AttemptedBy), + &i.CreatedAt, + pq.Array(&i.Errors), + &i.FinalizedAt, + &i.Kind, + &i.MaxAttempts, + &i.Metadata, + &i.Priority, + &i.Queue, + &i.State, + &i.ScheduledAt, + pq.Array(&i.Tags), + ); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const jobGetStuck = `-- name: JobGetStuck :many +SELECT id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags +FROM river_job +WHERE state = 'running'::river_job_state + AND attempted_at < $1::timestamptz +LIMIT $2 +` + +type JobGetStuckParams struct { + StuckHorizon time.Time + Max int32 +} + +func (q *Queries) JobGetStuck(ctx context.Context, db DBTX, arg JobGetStuckParams) ([]*RiverJob, error) { + rows, err := db.QueryContext(ctx, jobGetStuck, arg.StuckHorizon, arg.Max) + if err != nil { + return nil, err + } + defer rows.Close() + var items []*RiverJob + for rows.Next() { + var i RiverJob + if err := rows.Scan( + &i.ID, + &i.Args, + &i.Attempt, + &i.AttemptedAt, + pq.Array(&i.AttemptedBy), + &i.CreatedAt, + pq.Array(&i.Errors), + &i.FinalizedAt, + &i.Kind, + &i.MaxAttempts, + &i.Metadata, + &i.Priority, + &i.Queue, + &i.State, + &i.ScheduledAt, + pq.Array(&i.Tags), + ); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const jobInsert = `-- name: JobInsert :one +INSERT INTO river_job( + args, + attempt, + attempted_at, + created_at, + errors, + finalized_at, + kind, + max_attempts, + metadata, + priority, + queue, + scheduled_at, + state, + tags +) VALUES ( + $1::jsonb, + coalesce($2::smallint, 0), + $3, + coalesce($4::timestamptz, now()), + $5::jsonb[], + $6, + $7::text, + $8::smallint, + coalesce($9::jsonb, '{}'), + $10::smallint, + $11::text, + coalesce($12::timestamptz, now()), + $13::river_job_state, + coalesce($14::varchar(255)[], '{}') +) RETURNING id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags +` + +type JobInsertParams struct { + Args json.RawMessage + Attempt int16 + AttemptedAt *time.Time + CreatedAt *time.Time + Errors []json.RawMessage + FinalizedAt *time.Time + Kind string + MaxAttempts int16 + Metadata json.RawMessage + Priority int16 + Queue string + ScheduledAt *time.Time + State JobState + Tags []string +} + +func (q *Queries) JobInsert(ctx context.Context, db DBTX, arg JobInsertParams) (*RiverJob, error) { + row := db.QueryRowContext(ctx, jobInsert, + arg.Args, + arg.Attempt, + arg.AttemptedAt, + arg.CreatedAt, + pq.Array(arg.Errors), + arg.FinalizedAt, + arg.Kind, + arg.MaxAttempts, + arg.Metadata, + arg.Priority, + arg.Queue, + arg.ScheduledAt, + arg.State, + pq.Array(arg.Tags), + ) + var i RiverJob + err := row.Scan( + &i.ID, + &i.Args, + &i.Attempt, + &i.AttemptedAt, + pq.Array(&i.AttemptedBy), + &i.CreatedAt, + pq.Array(&i.Errors), + &i.FinalizedAt, + &i.Kind, + &i.MaxAttempts, + &i.Metadata, + &i.Priority, + &i.Queue, + &i.State, + &i.ScheduledAt, + pq.Array(&i.Tags), + ) + return &i, err +} + +const jobRescueMany = `-- name: JobRescueMany :exec +UPDATE river_job +SET + errors = array_append(errors, updated_job.error), + finalized_at = updated_job.finalized_at, + scheduled_at = updated_job.scheduled_at, + state = updated_job.state +FROM ( + SELECT + unnest($1::bigint[]) AS id, + unnest($2::jsonb[]) AS error, + nullif(unnest($3::timestamptz[]), '0001-01-01 00:00:00 +0000') AS finalized_at, + unnest($4::timestamptz[]) AS scheduled_at, + unnest($5::text[])::river_job_state AS state +) AS updated_job +WHERE river_job.id = updated_job.id +` + +type JobRescueManyParams struct { + ID []int64 + Error []json.RawMessage + FinalizedAt []time.Time + ScheduledAt []time.Time + State []string +} + +// Run by the rescuer to queue for retry or discard depending on job state. +func (q *Queries) JobRescueMany(ctx context.Context, db DBTX, arg JobRescueManyParams) error { + _, err := db.ExecContext(ctx, jobRescueMany, + pq.Array(arg.ID), + pq.Array(arg.Error), + pq.Array(arg.FinalizedAt), + pq.Array(arg.ScheduledAt), + pq.Array(arg.State), + ) + return err +} + +const jobRetry = `-- name: JobRetry :one +WITH job_to_update AS ( + SELECT id + FROM river_job + WHERE river_job.id = $1 + FOR UPDATE +), +updated_job AS ( + UPDATE river_job + SET + state = 'available'::river_job_state, + scheduled_at = now(), + max_attempts = CASE WHEN attempt = max_attempts THEN max_attempts + 1 ELSE max_attempts END, + finalized_at = NULL + FROM job_to_update + WHERE river_job.id = job_to_update.id + -- Do not touch running jobs: + AND river_job.state != 'running'::river_job_state + -- If the job is already available with a prior scheduled_at, leave it alone. + AND NOT (river_job.state = 'available'::river_job_state AND river_job.scheduled_at < now()) + RETURNING river_job.id, river_job.args, river_job.attempt, river_job.attempted_at, river_job.attempted_by, river_job.created_at, river_job.errors, river_job.finalized_at, river_job.kind, river_job.max_attempts, river_job.metadata, river_job.priority, river_job.queue, river_job.state, river_job.scheduled_at, river_job.tags +) +SELECT id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags +FROM river_job +WHERE id = $1::bigint + AND id NOT IN (SELECT id FROM updated_job) +UNION +SELECT id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags +FROM updated_job +` + +func (q *Queries) JobRetry(ctx context.Context, db DBTX, id int64) (*RiverJob, error) { + row := db.QueryRowContext(ctx, jobRetry, id) + var i RiverJob + err := row.Scan( + &i.ID, + &i.Args, + &i.Attempt, + &i.AttemptedAt, + pq.Array(&i.AttemptedBy), + &i.CreatedAt, + pq.Array(&i.Errors), + &i.FinalizedAt, + &i.Kind, + &i.MaxAttempts, + &i.Metadata, + &i.Priority, + &i.Queue, + &i.State, + &i.ScheduledAt, + pq.Array(&i.Tags), + ) + return &i, err +} + +const jobSchedule = `-- name: JobSchedule :one +WITH jobs_to_schedule AS ( + SELECT id + FROM river_job + WHERE + state IN ('scheduled', 'retryable') + AND queue IS NOT NULL + AND priority >= 0 + AND scheduled_at <= $2::timestamptz + ORDER BY + priority, + scheduled_at, + id + LIMIT $3::bigint + FOR UPDATE +), +river_job_scheduled AS ( + UPDATE river_job + SET state = 'available'::river_job_state + FROM jobs_to_schedule + WHERE river_job.id = jobs_to_schedule.id + RETURNING jobs_to_schedule.id, river_job.id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags +) +SELECT count(*) +FROM ( + SELECT pg_notify($1, json_build_object('queue', queue)::text) + FROM river_job_scheduled +) AS notifications_sent +` + +type JobScheduleParams struct { + InsertTopic string + Now time.Time + Max int64 +} + +func (q *Queries) JobSchedule(ctx context.Context, db DBTX, arg JobScheduleParams) (int64, error) { + row := db.QueryRowContext(ctx, jobSchedule, arg.InsertTopic, arg.Now, arg.Max) + var count int64 + err := row.Scan(&count) + return count, err +} + +const jobSetStateIfRunning = `-- name: JobSetStateIfRunning :one +WITH job_to_update AS ( + SELECT + id, + $1::river_job_state IN ('retryable'::river_job_state, 'scheduled'::river_job_state) AND metadata ? 'cancel_attempted_at' AS should_cancel + FROM river_job + WHERE id = $2::bigint + FOR UPDATE +), +updated_job AS ( + UPDATE river_job + SET + state = CASE WHEN should_cancel THEN 'cancelled'::river_job_state + ELSE $1::river_job_state END, + finalized_at = CASE WHEN should_cancel THEN now() + WHEN $3::boolean THEN $4 + ELSE finalized_at END, + errors = CASE WHEN $5::boolean THEN array_append(errors, $6::jsonb) + ELSE errors END, + max_attempts = CASE WHEN NOT should_cancel AND $7::boolean THEN $8 + ELSE max_attempts END, + scheduled_at = CASE WHEN NOT should_cancel AND $9::boolean THEN $10::timestamptz + ELSE scheduled_at END + FROM job_to_update + WHERE river_job.id = job_to_update.id + AND river_job.state = 'running'::river_job_state + RETURNING river_job.id, river_job.args, river_job.attempt, river_job.attempted_at, river_job.attempted_by, river_job.created_at, river_job.errors, river_job.finalized_at, river_job.kind, river_job.max_attempts, river_job.metadata, river_job.priority, river_job.queue, river_job.state, river_job.scheduled_at, river_job.tags +) +SELECT id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags +FROM river_job +WHERE id = $2::bigint + AND id NOT IN (SELECT id FROM updated_job) +UNION +SELECT id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags +FROM updated_job +` + +type JobSetStateIfRunningParams struct { + State JobState + ID int64 + FinalizedAtDoUpdate bool + FinalizedAt *time.Time + ErrorDoUpdate bool + Error json.RawMessage + MaxAttemptsUpdate bool + MaxAttempts int16 + ScheduledAtDoUpdate bool + ScheduledAt *time.Time +} + +func (q *Queries) JobSetStateIfRunning(ctx context.Context, db DBTX, arg JobSetStateIfRunningParams) (*RiverJob, error) { + row := db.QueryRowContext(ctx, jobSetStateIfRunning, + arg.State, + arg.ID, + arg.FinalizedAtDoUpdate, + arg.FinalizedAt, + arg.ErrorDoUpdate, + arg.Error, + arg.MaxAttemptsUpdate, + arg.MaxAttempts, + arg.ScheduledAtDoUpdate, + arg.ScheduledAt, + ) + var i RiverJob + err := row.Scan( + &i.ID, + &i.Args, + &i.Attempt, + &i.AttemptedAt, + pq.Array(&i.AttemptedBy), + &i.CreatedAt, + pq.Array(&i.Errors), + &i.FinalizedAt, + &i.Kind, + &i.MaxAttempts, + &i.Metadata, + &i.Priority, + &i.Queue, + &i.State, + &i.ScheduledAt, + pq.Array(&i.Tags), + ) + return &i, err +} + +const jobUpdate = `-- name: JobUpdate :one +UPDATE river_job +SET + attempt = CASE WHEN $1::boolean THEN $2 ELSE attempt END, + attempted_at = CASE WHEN $3::boolean THEN $4 ELSE attempted_at END, + errors = CASE WHEN $5::boolean THEN $6::jsonb[] ELSE errors END, + finalized_at = CASE WHEN $7::boolean THEN $8 ELSE finalized_at END, + state = CASE WHEN $9::boolean THEN $10 ELSE state END +WHERE id = $11 +RETURNING id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags +` + +type JobUpdateParams struct { + AttemptDoUpdate bool + Attempt int16 + AttemptedAtDoUpdate bool + AttemptedAt *time.Time + ErrorsDoUpdate bool + Errors []json.RawMessage + FinalizedAtDoUpdate bool + FinalizedAt *time.Time + StateDoUpdate bool + State JobState + ID int64 +} + +// A generalized update for any property on a job. This brings in a large number +// of parameters and therefore may be more suitable for testing than production. +func (q *Queries) JobUpdate(ctx context.Context, db DBTX, arg JobUpdateParams) (*RiverJob, error) { + row := db.QueryRowContext(ctx, jobUpdate, + arg.AttemptDoUpdate, + arg.Attempt, + arg.AttemptedAtDoUpdate, + arg.AttemptedAt, + arg.ErrorsDoUpdate, + pq.Array(arg.Errors), + arg.FinalizedAtDoUpdate, + arg.FinalizedAt, + arg.StateDoUpdate, + arg.State, + arg.ID, + ) + var i RiverJob + err := row.Scan( + &i.ID, + &i.Args, + &i.Attempt, + &i.AttemptedAt, + pq.Array(&i.AttemptedBy), + &i.CreatedAt, + pq.Array(&i.Errors), + &i.FinalizedAt, + &i.Kind, + &i.MaxAttempts, + &i.Metadata, + &i.Priority, + &i.Queue, + &i.State, + &i.ScheduledAt, + pq.Array(&i.Tags), + ) + return &i, err +} diff --git a/riverdriver/riverdatabasesql/internal/dbsqlc/river_leader.sql.go b/riverdriver/riverdatabasesql/internal/dbsqlc/river_leader.sql.go new file mode 100644 index 00000000..75097a29 --- /dev/null +++ b/riverdriver/riverdatabasesql/internal/dbsqlc/river_leader.sql.go @@ -0,0 +1,161 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.25.0 +// source: river_leader.sql + +package dbsqlc + +import ( + "context" + "time" +) + +const leaderAttemptElect = `-- name: LeaderAttemptElect :execrows +INSERT INTO river_leader(name, leader_id, elected_at, expires_at) + VALUES ($1::text, $2::text, now(), now() + $3::interval) +ON CONFLICT (name) + DO NOTHING +` + +type LeaderAttemptElectParams struct { + Name string + LeaderID string + TTL time.Duration +} + +func (q *Queries) LeaderAttemptElect(ctx context.Context, db DBTX, arg LeaderAttemptElectParams) (int64, error) { + result, err := db.ExecContext(ctx, leaderAttemptElect, arg.Name, arg.LeaderID, arg.TTL) + if err != nil { + return 0, err + } + return result.RowsAffected() +} + +const leaderAttemptReelect = `-- name: LeaderAttemptReelect :execrows +INSERT INTO river_leader(name, leader_id, elected_at, expires_at) + VALUES ($1::text, $2::text, now(), now() + $3::interval) +ON CONFLICT (name) + DO UPDATE SET + expires_at = now() + $3::interval + WHERE + river_leader.leader_id = $2::text +` + +type LeaderAttemptReelectParams struct { + Name string + LeaderID string + TTL time.Duration +} + +func (q *Queries) LeaderAttemptReelect(ctx context.Context, db DBTX, arg LeaderAttemptReelectParams) (int64, error) { + result, err := db.ExecContext(ctx, leaderAttemptReelect, arg.Name, arg.LeaderID, arg.TTL) + if err != nil { + return 0, err + } + return result.RowsAffected() +} + +const leaderDeleteExpired = `-- name: LeaderDeleteExpired :execrows +DELETE FROM river_leader +WHERE name = $1::text + AND expires_at < now() +` + +func (q *Queries) LeaderDeleteExpired(ctx context.Context, db DBTX, name string) (int64, error) { + result, err := db.ExecContext(ctx, leaderDeleteExpired, name) + if err != nil { + return 0, err + } + return result.RowsAffected() +} + +const leaderGetElectedLeader = `-- name: LeaderGetElectedLeader :one +SELECT elected_at, expires_at, leader_id, name +FROM river_leader +WHERE name = $1 +` + +func (q *Queries) LeaderGetElectedLeader(ctx context.Context, db DBTX, name string) (*RiverLeader, error) { + row := db.QueryRowContext(ctx, leaderGetElectedLeader, name) + var i RiverLeader + err := row.Scan( + &i.ElectedAt, + &i.ExpiresAt, + &i.LeaderID, + &i.Name, + ) + return &i, err +} + +const leaderInsert = `-- name: LeaderInsert :one +INSERT INTO river_leader( + elected_at, + expires_at, + leader_id, + name +) VALUES ( + coalesce($1::timestamptz, now()), + coalesce($2::timestamptz, now() + $3::interval), + $4, + $5 +) RETURNING elected_at, expires_at, leader_id, name +` + +type LeaderInsertParams struct { + ElectedAt *time.Time + ExpiresAt *time.Time + TTL time.Duration + LeaderID string + Name string +} + +func (q *Queries) LeaderInsert(ctx context.Context, db DBTX, arg LeaderInsertParams) (*RiverLeader, error) { + row := db.QueryRowContext(ctx, leaderInsert, + arg.ElectedAt, + arg.ExpiresAt, + arg.TTL, + arg.LeaderID, + arg.Name, + ) + var i RiverLeader + err := row.Scan( + &i.ElectedAt, + &i.ExpiresAt, + &i.LeaderID, + &i.Name, + ) + return &i, err +} + +const leaderResign = `-- name: LeaderResign :execrows +WITH currently_held_leaders AS ( + SELECT elected_at, expires_at, leader_id, name + FROM river_leader + WHERE + name = $1::text + AND leader_id = $2::text + FOR UPDATE +), +notified_resignations AS ( + SELECT + pg_notify($3, json_build_object('name', name, 'leader_id', leader_id, 'action', 'resigned')::text), + currently_held_leaders.name + FROM currently_held_leaders +) +DELETE FROM river_leader USING notified_resignations +WHERE river_leader.name = notified_resignations.name +` + +type LeaderResignParams struct { + Name string + LeaderID string + LeadershipTopic string +} + +func (q *Queries) LeaderResign(ctx context.Context, db DBTX, arg LeaderResignParams) (int64, error) { + result, err := db.ExecContext(ctx, leaderResign, arg.Name, arg.LeaderID, arg.LeadershipTopic) + if err != nil { + return 0, err + } + return result.RowsAffected() +} diff --git a/riverdriver/riverdatabasesql/internal/dbsqlc/river_migration.sql.go b/riverdriver/riverdatabasesql/internal/dbsqlc/river_migration.sql.go index 9829149b..d267930c 100644 --- a/riverdriver/riverdatabasesql/internal/dbsqlc/river_migration.sql.go +++ b/riverdriver/riverdatabasesql/internal/dbsqlc/river_migration.sql.go @@ -1,6 +1,6 @@ // Code generated by sqlc. DO NOT EDIT. // versions: -// sqlc v1.24.0 +// sqlc v1.25.0 // source: river_migration.sql package dbsqlc @@ -71,9 +71,9 @@ func (q *Queries) RiverMigrationGetAll(ctx context.Context, db DBTX) ([]*RiverMi const riverMigrationInsert = `-- name: RiverMigrationInsert :one INSERT INTO river_migration ( - version + version ) VALUES ( - $1 + $1 ) RETURNING id, created_at, version ` @@ -86,10 +86,10 @@ func (q *Queries) RiverMigrationInsert(ctx context.Context, db DBTX, version int const riverMigrationInsertMany = `-- name: RiverMigrationInsertMany :many INSERT INTO river_migration ( - version + version ) SELECT - unnest($1::bigint[]) + unnest($1::bigint[]) RETURNING id, created_at, version ` diff --git a/riverdriver/riverdatabasesql/internal/dbsqlc/sqlc.yaml b/riverdriver/riverdatabasesql/internal/dbsqlc/sqlc.yaml index ec379388..af61876c 100644 --- a/riverdriver/riverdatabasesql/internal/dbsqlc/sqlc.yaml +++ b/riverdriver/riverdatabasesql/internal/dbsqlc/sqlc.yaml @@ -2,8 +2,14 @@ version: "2" sql: - engine: "postgresql" queries: + - ../../../riverpgxv5/internal/dbsqlc/pg_misc.sql + - ../../../riverpgxv5/internal/dbsqlc/river_job.sql + - ../../../riverpgxv5/internal/dbsqlc/river_leader.sql - ../../../riverpgxv5/internal/dbsqlc/river_migration.sql schema: + - ../../../riverpgxv5/internal/dbsqlc/pg_misc.sql + - ../../../riverpgxv5/internal/dbsqlc/river_job.sql + - ../../../riverpgxv5/internal/dbsqlc/river_leader.sql - ../../../riverpgxv5/internal/dbsqlc/river_migration.sql gen: go: @@ -14,11 +20,40 @@ sql: emit_methods_with_db_argument: true emit_result_struct_pointers: true + rename: + river_job_state: "JobState" + river_job_state_available: "JobStateAvailable" + river_job_state_cancelled: "JobStateCancelled" + river_job_state_completed: "JobStateCompleted" + river_job_state_discarded: "JobStateDiscarded" + river_job_state_retryable: "JobStateRetryable" + river_job_state_running: "JobStateRunning" + river_job_state_scheduled: "JobStateScheduled" + ttl: "TTL" + overrides: + - db_type: "pg_catalog.interval" + go_type: "time.Duration" + - db_type: "timestamptz" go_type: "time.Time" + - db_type: "timestamptz" go_type: type: "time.Time" pointer: true nullable: true + + # specific columns + + # This one is necessary because `args` is nullable (this seems to have + # been an oversight, but one we're determined isn't worth correcting + # for now), and the `database/sql` variant of sqlc will give it a + # crazy type by default, so here we give it something more reasonable. + - column: "river_job.args" + go_type: + type: "[]byte" + + - column: "river_job.errors" + go_type: + type: "[]AttemptError" diff --git a/riverdriver/riverdatabasesql/river_database_sql.go b/riverdriver/riverdatabasesql/river_database_sql.go index 8b1136f6..8c294ecd 100644 --- a/riverdriver/riverdatabasesql/river_database_sql.go +++ b/riverdriver/riverdatabasesql/river_database_sql.go @@ -10,11 +10,10 @@ import ( "database/sql" "errors" - "github.com/jackc/pgx/v5" - "github.com/jackc/pgx/v5/pgxpool" - "github.com/riverqueue/river/riverdriver" "github.com/riverqueue/river/riverdriver/riverdatabasesql/internal/dbsqlc" + "github.com/riverqueue/river/riverdriver/riverdriverutil" + "github.com/riverqueue/river/rivertype" ) // Driver is an implementation of riverdriver.Driver for database/sql. @@ -36,15 +35,17 @@ func New(dbPool *sql.DB) *Driver { return &Driver{dbPool: dbPool, queries: dbsqlc.New()} } -func (d *Driver) GetDBPool() *pgxpool.Pool { panic(riverdriver.ErrNotImplemented) } func (d *Driver) GetExecutor() riverdriver.Executor { return &Executor{d.dbPool, d.dbPool, dbsqlc.New()} } -func (d *Driver) UnwrapExecutor(tx *sql.Tx) riverdriver.Executor { - return &Executor{nil, tx, dbsqlc.New()} +func (d *Driver) GetListener() riverdriver.Listener { panic(riverdriver.ErrNotImplemented) } + +func (d *Driver) HasPool() bool { return d.dbPool != nil } + +func (d *Driver) UnwrapExecutor(tx *sql.Tx) riverdriver.ExecutorTx { + return &ExecutorTx{Executor: Executor{nil, tx, dbsqlc.New()}, tx: tx} } -func (d *Driver) UnwrapTx(tx *sql.Tx) pgx.Tx { panic(riverdriver.ErrNotImplemented) } type Executor struct { dbPool *sql.DB @@ -69,21 +70,130 @@ func (e *Executor) Exec(ctx context.Context, sql string) (struct{}, error) { return struct{}{}, interpretError(err) } +func (e *Executor) JobCancel(ctx context.Context, params *riverdriver.JobCancelParams) (*rivertype.JobRow, error) { + return nil, riverdriver.ErrNotImplemented +} + +func (e *Executor) JobDeleteBefore(ctx context.Context, params *riverdriver.JobDeleteBeforeParams) (int, error) { + return 0, riverdriver.ErrNotImplemented +} + +func (e *Executor) JobGetAvailable(ctx context.Context, params *riverdriver.JobGetAvailableParams) ([]*rivertype.JobRow, error) { + return nil, riverdriver.ErrNotImplemented +} + +func (e *Executor) JobGetByID(ctx context.Context, id int64) (*rivertype.JobRow, error) { + return nil, riverdriver.ErrNotImplemented +} + +func (e *Executor) JobGetByIDMany(ctx context.Context, id []int64) ([]*rivertype.JobRow, error) { + return nil, riverdriver.ErrNotImplemented +} + +func (e *Executor) JobGetByKindAndUniqueProperties(ctx context.Context, params *riverdriver.JobGetByKindAndUniquePropertiesParams) (*rivertype.JobRow, error) { + return nil, riverdriver.ErrNotImplemented +} + +func (e *Executor) JobGetByKindMany(ctx context.Context, kind []string) ([]*rivertype.JobRow, error) { + return nil, riverdriver.ErrNotImplemented +} + +func (e *Executor) JobGetStuck(ctx context.Context, params *riverdriver.JobGetStuckParams) ([]*rivertype.JobRow, error) { + return nil, riverdriver.ErrNotImplemented +} + +func (e *Executor) JobInsert(ctx context.Context, params *riverdriver.JobInsertParams) (*rivertype.JobRow, error) { + return nil, riverdriver.ErrNotImplemented +} + +func (e *Executor) JobInsertMany(ctx context.Context, params []*riverdriver.JobInsertParams) (int64, error) { + return 0, riverdriver.ErrNotImplemented +} + +func (e *Executor) JobList(ctx context.Context, sql string, namedArgs map[string]any) ([]*rivertype.JobRow, error) { + return nil, riverdriver.ErrNotImplemented +} + +func (e *Executor) JobListFields() string { + panic(riverdriver.ErrNotImplemented) +} + +func (e *Executor) JobRescueMany(ctx context.Context, params *riverdriver.JobRescueManyParams) (*struct{}, error) { + return nil, riverdriver.ErrNotImplemented +} + +func (e *Executor) JobRetry(ctx context.Context, id int64) (*rivertype.JobRow, error) { + return nil, riverdriver.ErrNotImplemented +} + +func (e *Executor) JobSchedule(ctx context.Context, params *riverdriver.JobScheduleParams) (int, error) { + return 0, riverdriver.ErrNotImplemented +} + +func (e *Executor) JobSetStateIfRunning(ctx context.Context, params *riverdriver.JobSetStateIfRunningParams) (*rivertype.JobRow, error) { + return nil, riverdriver.ErrNotImplemented +} + +func (e *Executor) JobUpdate(ctx context.Context, params *riverdriver.JobUpdateParams) (*rivertype.JobRow, error) { + return nil, riverdriver.ErrNotImplemented +} + +func (e *Executor) LeaderAttemptElect(ctx context.Context, params *riverdriver.LeaderElectParams) (bool, error) { + return false, riverdriver.ErrNotImplemented +} + +func (e *Executor) LeaderAttemptReelect(ctx context.Context, params *riverdriver.LeaderElectParams) (bool, error) { + return false, riverdriver.ErrNotImplemented +} + +func (e *Executor) LeaderDeleteExpired(ctx context.Context, name string) (int, error) { + return 0, riverdriver.ErrNotImplemented +} + +func (e *Executor) LeaderGetElectedLeader(ctx context.Context, name string) (*riverdriver.Leader, error) { + return nil, riverdriver.ErrNotImplemented +} + +func (e *Executor) LeaderInsert(ctx context.Context, params *riverdriver.LeaderInsertParams) (*riverdriver.Leader, error) { + return nil, riverdriver.ErrNotImplemented +} + +func (e *Executor) LeaderResign(ctx context.Context, params *riverdriver.LeaderResignParams) (bool, error) { + return false, riverdriver.ErrNotImplemented +} + func (e *Executor) MigrationDeleteByVersionMany(ctx context.Context, versions []int) ([]*riverdriver.Migration, error) { migrations, err := e.queries.RiverMigrationDeleteByVersionMany(ctx, e.dbtx, - mapSlice(versions, func(v int) int64 { return int64(v) })) - return mapMigrations(migrations), interpretError(err) + riverdriverutil.Map(versions, func(v int) int64 { return int64(v) })) + if err != nil { + return nil, interpretError(err) + } + return riverdriverutil.Map(migrations, migrationFromInternal), nil } func (e *Executor) MigrationGetAll(ctx context.Context) ([]*riverdriver.Migration, error) { migrations, err := e.queries.RiverMigrationGetAll(ctx, e.dbtx) - return mapMigrations(migrations), interpretError(err) + if err != nil { + return nil, interpretError(err) + } + return riverdriverutil.Map(migrations, migrationFromInternal), nil } func (e *Executor) MigrationInsertMany(ctx context.Context, versions []int) ([]*riverdriver.Migration, error) { migrations, err := e.queries.RiverMigrationInsertMany(ctx, e.dbtx, - mapSlice(versions, func(v int) int64 { return int64(v) })) - return mapMigrations(migrations), interpretError(err) + riverdriverutil.Map(versions, func(v int) int64 { return int64(v) })) + if err != nil { + return nil, interpretError(err) + } + return riverdriverutil.Map(migrations, migrationFromInternal), nil +} + +func (e *Executor) Notify(ctx context.Context, topic string, payload string) error { + return riverdriver.ErrNotImplemented +} + +func (e *Executor) PGAdvisoryXactLock(ctx context.Context, key int64) (*struct{}, error) { + return nil, riverdriver.ErrNotImplemented } func (e *Executor) TableExists(ctx context.Context, tableName string) (bool, error) { @@ -108,32 +218,15 @@ func (t *ExecutorTx) Rollback(ctx context.Context) error { func interpretError(err error) error { if errors.Is(err, sql.ErrNoRows) { - return riverdriver.ErrNoRows + return rivertype.ErrNotFound } return err } -func mapMigrations(migrations []*dbsqlc.RiverMigration) []*riverdriver.Migration { - if migrations == nil { - return nil - } - - return mapSlice(migrations, func(m *dbsqlc.RiverMigration) *riverdriver.Migration { - return &riverdriver.Migration{ - ID: int(m.ID), - CreatedAt: m.CreatedAt, - Version: int(m.Version), - } - }) -} - -// mapSlice manipulates a slice and transforms it to a slice of another type. -func mapSlice[T any, R any](collection []T, mapFunc func(T) R) []R { - result := make([]R, len(collection)) - - for i, item := range collection { - result[i] = mapFunc(item) +func migrationFromInternal(internal *dbsqlc.RiverMigration) *riverdriver.Migration { + return &riverdriver.Migration{ + ID: int(internal.ID), + CreatedAt: internal.CreatedAt, + Version: int(internal.Version), } - - return result } diff --git a/riverdriver/riverdatabasesql/river_database_sql_test.go b/riverdriver/riverdatabasesql/river_database_sql_test.go index f97be426..49d69a38 100644 --- a/riverdriver/riverdatabasesql/river_database_sql_test.go +++ b/riverdriver/riverdatabasesql/river_database_sql_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/require" "github.com/riverqueue/river/riverdriver" + "github.com/riverqueue/river/rivertype" ) // Verify interface compliance. @@ -36,6 +37,6 @@ func TestInterpretError(t *testing.T) { t.Parallel() require.EqualError(t, interpretError(errors.New("an error")), "an error") - require.ErrorIs(t, interpretError(sql.ErrNoRows), riverdriver.ErrNoRows) + require.ErrorIs(t, interpretError(sql.ErrNoRows), rivertype.ErrNotFound) require.NoError(t, interpretError(nil)) } diff --git a/riverdriver/riverdriverutil/river_driver_util.go b/riverdriver/riverdriverutil/river_driver_util.go new file mode 100644 index 00000000..0afec672 --- /dev/null +++ b/riverdriver/riverdriverutil/river_driver_util.go @@ -0,0 +1,16 @@ +package riverdriverutil + +// Map manipulates a slice and transforms it to a slice of another type. +func Map[T any, R any](collection []T, mapFunc func(T) R) []R { + if collection == nil { + return nil + } + + result := make([]R, len(collection)) + + for i, item := range collection { + result[i] = mapFunc(item) + } + + return result +} diff --git a/riverdriver/riverpgxv5/go.mod b/riverdriver/riverpgxv5/go.mod index cdbd7d06..eb0705d6 100644 --- a/riverdriver/riverpgxv5/go.mod +++ b/riverdriver/riverpgxv5/go.mod @@ -1,12 +1,15 @@ module github.com/riverqueue/river/riverdriver/riverpgxv5 -go 1.21 +go 1.21.4 replace github.com/riverqueue/river/riverdriver => ../ +replace github.com/riverqueue/river/rivertype => ../../rivertype + require ( github.com/jackc/pgx/v5 v5.5.0 github.com/riverqueue/river/riverdriver v0.0.20 + github.com/riverqueue/river/rivertype v0.0.20 github.com/stretchr/testify v1.8.1 ) diff --git a/internal/dbsqlc/copyfrom.go b/riverdriver/riverpgxv5/internal/dbsqlc/copyfrom.go similarity index 79% rename from internal/dbsqlc/copyfrom.go rename to riverdriver/riverpgxv5/internal/dbsqlc/copyfrom.go index 3c8ea698..c1c9c3b9 100644 --- a/internal/dbsqlc/copyfrom.go +++ b/riverdriver/riverpgxv5/internal/dbsqlc/copyfrom.go @@ -1,6 +1,6 @@ // Code generated by sqlc. DO NOT EDIT. // versions: -// sqlc v1.24.0 +// sqlc v1.25.0 // source: copyfrom.go package dbsqlc @@ -30,7 +30,10 @@ func (r *iteratorForJobInsertMany) Next() bool { func (r iteratorForJobInsertMany) Values() ([]interface{}, error) { return []interface{}{ r.rows[0].Args, + r.rows[0].Attempt, + r.rows[0].AttemptedAt, r.rows[0].Errors, + r.rows[0].FinalizedAt, r.rows[0].Kind, r.rows[0].MaxAttempts, r.rows[0].Metadata, @@ -47,5 +50,5 @@ func (r iteratorForJobInsertMany) Err() error { } func (q *Queries) JobInsertMany(ctx context.Context, db DBTX, arg []JobInsertManyParams) (int64, error) { - return db.CopyFrom(ctx, []string{"river_job"}, []string{"args", "errors", "kind", "max_attempts", "metadata", "priority", "queue", "scheduled_at", "state", "tags"}, &iteratorForJobInsertMany{rows: arg}) + return db.CopyFrom(ctx, []string{"river_job"}, []string{"args", "attempt", "attempted_at", "errors", "finalized_at", "kind", "max_attempts", "metadata", "priority", "queue", "scheduled_at", "state", "tags"}, &iteratorForJobInsertMany{rows: arg}) } diff --git a/riverdriver/riverpgxv5/internal/dbsqlc/db.go b/riverdriver/riverpgxv5/internal/dbsqlc/db.go index 533f98ed..f403cafe 100644 --- a/riverdriver/riverpgxv5/internal/dbsqlc/db.go +++ b/riverdriver/riverpgxv5/internal/dbsqlc/db.go @@ -1,6 +1,6 @@ // Code generated by sqlc. DO NOT EDIT. // versions: -// sqlc v1.24.0 +// sqlc v1.25.0 package dbsqlc @@ -15,6 +15,7 @@ type DBTX interface { Exec(context.Context, string, ...interface{}) (pgconn.CommandTag, error) Query(context.Context, string, ...interface{}) (pgx.Rows, error) QueryRow(context.Context, string, ...interface{}) pgx.Row + CopyFrom(ctx context.Context, tableName pgx.Identifier, columnNames []string, rowSrc pgx.CopyFromSource) (int64, error) } func New() *Queries { diff --git a/riverdriver/riverpgxv5/internal/dbsqlc/error.go b/riverdriver/riverpgxv5/internal/dbsqlc/error.go new file mode 100644 index 00000000..5a9ad974 --- /dev/null +++ b/riverdriver/riverpgxv5/internal/dbsqlc/error.go @@ -0,0 +1,10 @@ +package dbsqlc + +import "time" + +type AttemptError struct { + At time.Time `json:"at"` + Attempt uint16 `json:"attempt"` + Error string `json:"error"` + Trace string `json:"trace"` +} diff --git a/riverdriver/riverpgxv5/internal/dbsqlc/models.go b/riverdriver/riverpgxv5/internal/dbsqlc/models.go index c5b6578c..c0fd187a 100644 --- a/riverdriver/riverpgxv5/internal/dbsqlc/models.go +++ b/riverdriver/riverpgxv5/internal/dbsqlc/models.go @@ -1,13 +1,88 @@ // Code generated by sqlc. DO NOT EDIT. // versions: -// sqlc v1.24.0 +// sqlc v1.25.0 package dbsqlc import ( + "database/sql/driver" + "fmt" "time" ) +type JobState string + +const ( + JobStateAvailable JobState = "available" + JobStateCancelled JobState = "cancelled" + JobStateCompleted JobState = "completed" + JobStateDiscarded JobState = "discarded" + JobStateRetryable JobState = "retryable" + JobStateRunning JobState = "running" + JobStateScheduled JobState = "scheduled" +) + +func (e *JobState) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + *e = JobState(s) + case string: + *e = JobState(s) + default: + return fmt.Errorf("unsupported scan type for JobState: %T", src) + } + return nil +} + +type NullJobState struct { + JobState JobState + Valid bool // Valid is true if JobState is not NULL +} + +// Scan implements the Scanner interface. +func (ns *NullJobState) Scan(value interface{}) error { + if value == nil { + ns.JobState, ns.Valid = "", false + return nil + } + ns.Valid = true + return ns.JobState.Scan(value) +} + +// Value implements the driver Valuer interface. +func (ns NullJobState) Value() (driver.Value, error) { + if !ns.Valid { + return nil, nil + } + return string(ns.JobState), nil +} + +type RiverJob struct { + ID int64 + Args []byte + Attempt int16 + AttemptedAt *time.Time + AttemptedBy []string + CreatedAt time.Time + Errors []AttemptError + FinalizedAt *time.Time + Kind string + MaxAttempts int16 + Metadata []byte + Priority int16 + Queue string + State JobState + ScheduledAt time.Time + Tags []string +} + +type RiverLeader struct { + ElectedAt time.Time + ExpiresAt time.Time + LeaderID string + Name string +} + type RiverMigration struct { ID int64 CreatedAt time.Time diff --git a/riverdriver/riverpgxv5/internal/dbsqlc/pg_misc.sql b/riverdriver/riverpgxv5/internal/dbsqlc/pg_misc.sql new file mode 100644 index 00000000..9c6e4606 --- /dev/null +++ b/riverdriver/riverpgxv5/internal/dbsqlc/pg_misc.sql @@ -0,0 +1,5 @@ +-- name: PGAdvisoryXactLock :exec +SELECT pg_advisory_xact_lock(@key); + +-- name: PGNotify :exec +SELECT pg_notify(@topic, @payload); \ No newline at end of file diff --git a/riverdriver/riverpgxv5/internal/dbsqlc/pg_misc.sql.go b/riverdriver/riverpgxv5/internal/dbsqlc/pg_misc.sql.go new file mode 100644 index 00000000..44cf5c44 --- /dev/null +++ b/riverdriver/riverpgxv5/internal/dbsqlc/pg_misc.sql.go @@ -0,0 +1,33 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.25.0 +// source: pg_misc.sql + +package dbsqlc + +import ( + "context" +) + +const pGAdvisoryXactLock = `-- name: PGAdvisoryXactLock :exec +SELECT pg_advisory_xact_lock($1) +` + +func (q *Queries) PGAdvisoryXactLock(ctx context.Context, db DBTX, key int64) error { + _, err := db.Exec(ctx, pGAdvisoryXactLock, key) + return err +} + +const pGNotify = `-- name: PGNotify :exec +SELECT pg_notify($1, $2) +` + +type PGNotifyParams struct { + Topic string + Payload string +} + +func (q *Queries) PGNotify(ctx context.Context, db DBTX, arg PGNotifyParams) error { + _, err := db.Exec(ctx, pGNotify, arg.Topic, arg.Payload) + return err +} diff --git a/riverdriver/riverpgxv5/internal/dbsqlc/river_job.sql b/riverdriver/riverpgxv5/internal/dbsqlc/river_job.sql new file mode 100644 index 00000000..c33c440d --- /dev/null +++ b/riverdriver/riverpgxv5/internal/dbsqlc/river_job.sql @@ -0,0 +1,315 @@ +CREATE TYPE river_job_state AS ENUM( + 'available', + 'cancelled', + 'completed', + 'discarded', + 'retryable', + 'running', + 'scheduled' +); + +CREATE TABLE river_job( + id bigserial PRIMARY KEY, + args jsonb, + attempt smallint NOT NULL DEFAULT 0, + attempted_at timestamptz, + attempted_by text[], + created_at timestamptz NOT NULL DEFAULT NOW(), + errors jsonb[], + finalized_at timestamptz, + kind text NOT NULL, + max_attempts smallint NOT NULL, + metadata jsonb NOT NULL DEFAULT '{}' ::jsonb, + priority smallint NOT NULL DEFAULT 1, + queue text NOT NULL DEFAULT 'default' ::text, + state river_job_state NOT NULL DEFAULT 'available' ::river_job_state, + scheduled_at timestamptz NOT NULL DEFAULT NOW(), + tags varchar(255)[] NOT NULL DEFAULT '{}' ::varchar(255)[], + CONSTRAINT finalized_or_finalized_at_null CHECK ((state IN ('cancelled', 'completed', 'discarded') AND finalized_at IS NOT NULL) OR finalized_at IS NULL), + CONSTRAINT priority_in_range CHECK (priority >= 1 AND priority <= 4), + CONSTRAINT queue_length CHECK (char_length(queue) > 0 AND char_length(queue) < 128), + CONSTRAINT kind_length CHECK (char_length(kind) > 0 AND char_length(kind) < 128) +); + +-- name: JobCancel :one +WITH locked_job AS ( + SELECT + id, queue, state, finalized_at + FROM river_job + WHERE river_job.id = @id + FOR UPDATE +), +notification AS ( + SELECT + id, + pg_notify(@job_control_topic, json_build_object('action', 'cancel', 'job_id', id, 'queue', queue)::text) + FROM + locked_job + WHERE + state NOT IN ('cancelled', 'completed', 'discarded') + AND finalized_at IS NULL +), +updated_job AS ( + UPDATE river_job + SET + -- If the job is actively running, we want to let its current client and + -- producer handle the cancellation. Otherwise, immediately cancel it. + state = CASE WHEN state = 'running'::river_job_state THEN state ELSE 'cancelled'::river_job_state END, + finalized_at = CASE WHEN state = 'running'::river_job_state THEN finalized_at ELSE now() END, + -- Mark the job as cancelled by query so that the rescuer knows not to + -- rescue it, even if it gets stuck in the running state: + metadata = jsonb_set(metadata, '{cancel_attempted_at}'::text[], @cancel_attempted_at::jsonb, true) + FROM notification + WHERE river_job.id = notification.id + RETURNING river_job.* +) +SELECT * +FROM river_job +WHERE id = @id::bigint + AND id NOT IN (SELECT id FROM updated_job) +UNION +SELECT * +FROM updated_job; + +-- name: JobDeleteBefore :one +WITH deleted_jobs AS ( + DELETE FROM river_job + WHERE id IN ( + SELECT id + FROM river_job + WHERE + (state = 'cancelled' AND finalized_at < @cancelled_finalized_at_horizon::timestamptz) OR + (state = 'completed' AND finalized_at < @completed_finalized_at_horizon::timestamptz) OR + (state = 'discarded' AND finalized_at < @discarded_finalized_at_horizon::timestamptz) + ORDER BY id + LIMIT @max::bigint + ) + RETURNING * +) +SELECT count(*) +FROM deleted_jobs; + +-- name: JobGetAvailable :many +WITH locked_jobs AS ( + SELECT + * + FROM + river_job + WHERE + state = 'available'::river_job_state + AND queue = @queue::text + AND scheduled_at <= now() + ORDER BY + priority ASC, + scheduled_at ASC, + id ASC + LIMIT @max::integer + FOR UPDATE + SKIP LOCKED +) +UPDATE + river_job +SET + state = 'running'::river_job_state, + attempt = river_job.attempt + 1, + attempted_at = now(), + attempted_by = array_append(river_job.attempted_by, @attempted_by::text) +FROM + locked_jobs +WHERE + river_job.id = locked_jobs.id +RETURNING + river_job.*; + +-- name: JobGetByKindMany :many +SELECT * +FROM river_job +WHERE kind = any(@kind::text[]) +ORDER BY id; + +-- name: JobGetByKindAndUniqueProperties :one +SELECT * +FROM river_job +WHERE kind = @kind + AND CASE WHEN @by_args::boolean THEN args = @args ELSE true END + AND CASE WHEN @by_created_at::boolean THEN tstzrange(@created_at_start::timestamptz, @created_at_end::timestamptz, '[)') @> created_at ELSE true END + AND CASE WHEN @by_queue::boolean THEN queue = @queue ELSE true END + AND CASE WHEN @by_state::boolean THEN state::text = any(@state::text[]) ELSE true END; + +-- name: JobGetByID :one +SELECT * +FROM river_job +WHERE id = @id +LIMIT 1; + +-- name: JobGetByIDMany :many +SELECT * +FROM river_job +WHERE id = any(@id::bigint[]) +ORDER BY id; + +-- name: JobGetStuck :many +SELECT * +FROM river_job +WHERE state = 'running'::river_job_state + AND attempted_at < @stuck_horizon::timestamptz +LIMIT @max; + +-- name: JobInsert :one +INSERT INTO river_job( + args, + attempt, + attempted_at, + created_at, + errors, + finalized_at, + kind, + max_attempts, + metadata, + priority, + queue, + scheduled_at, + state, + tags +) VALUES ( + @args::jsonb, + coalesce(@attempt::smallint, 0), + @attempted_at, + coalesce(sqlc.narg('created_at')::timestamptz, now()), + @errors::jsonb[], + @finalized_at, + @kind::text, + @max_attempts::smallint, + coalesce(@metadata::jsonb, '{}'), + @priority::smallint, + @queue::text, + coalesce(sqlc.narg('scheduled_at')::timestamptz, now()), + @state::river_job_state, + coalesce(@tags::varchar(255)[], '{}') +) RETURNING *; + +-- Run by the rescuer to queue for retry or discard depending on job state. +-- name: JobRescueMany :exec +UPDATE river_job +SET + errors = array_append(errors, updated_job.error), + finalized_at = updated_job.finalized_at, + scheduled_at = updated_job.scheduled_at, + state = updated_job.state +FROM ( + SELECT + unnest(@id::bigint[]) AS id, + unnest(@error::jsonb[]) AS error, + nullif(unnest(@finalized_at::timestamptz[]), '0001-01-01 00:00:00 +0000') AS finalized_at, + unnest(@scheduled_at::timestamptz[]) AS scheduled_at, + unnest(@state::text[])::river_job_state AS state +) AS updated_job +WHERE river_job.id = updated_job.id; + +-- name: JobRetry :one +WITH job_to_update AS ( + SELECT id + FROM river_job + WHERE river_job.id = @id + FOR UPDATE +), +updated_job AS ( + UPDATE river_job + SET + state = 'available'::river_job_state, + scheduled_at = now(), + max_attempts = CASE WHEN attempt = max_attempts THEN max_attempts + 1 ELSE max_attempts END, + finalized_at = NULL + FROM job_to_update + WHERE river_job.id = job_to_update.id + -- Do not touch running jobs: + AND river_job.state != 'running'::river_job_state + -- If the job is already available with a prior scheduled_at, leave it alone. + AND NOT (river_job.state = 'available'::river_job_state AND river_job.scheduled_at < now()) + RETURNING river_job.* +) +SELECT * +FROM river_job +WHERE id = @id::bigint + AND id NOT IN (SELECT id FROM updated_job) +UNION +SELECT * +FROM updated_job; + +-- name: JobSchedule :one +WITH jobs_to_schedule AS ( + SELECT id + FROM river_job + WHERE + state IN ('scheduled', 'retryable') + AND queue IS NOT NULL + AND priority >= 0 + AND scheduled_at <= @now::timestamptz + ORDER BY + priority, + scheduled_at, + id + LIMIT @max::bigint + FOR UPDATE +), +river_job_scheduled AS ( + UPDATE river_job + SET state = 'available'::river_job_state + FROM jobs_to_schedule + WHERE river_job.id = jobs_to_schedule.id + RETURNING * +) +SELECT count(*) +FROM ( + SELECT pg_notify(@insert_topic, json_build_object('queue', queue)::text) + FROM river_job_scheduled +) AS notifications_sent; + +-- name: JobSetStateIfRunning :one +WITH job_to_update AS ( + SELECT + id, + @state::river_job_state IN ('retryable'::river_job_state, 'scheduled'::river_job_state) AND metadata ? 'cancel_attempted_at' AS should_cancel + FROM river_job + WHERE id = @id::bigint + FOR UPDATE +), +updated_job AS ( + UPDATE river_job + SET + state = CASE WHEN should_cancel THEN 'cancelled'::river_job_state + ELSE @state::river_job_state END, + finalized_at = CASE WHEN should_cancel THEN now() + WHEN @finalized_at_do_update::boolean THEN @finalized_at + ELSE finalized_at END, + errors = CASE WHEN @error_do_update::boolean THEN array_append(errors, @error::jsonb) + ELSE errors END, + max_attempts = CASE WHEN NOT should_cancel AND @max_attempts_update::boolean THEN @max_attempts + ELSE max_attempts END, + scheduled_at = CASE WHEN NOT should_cancel AND @scheduled_at_do_update::boolean THEN sqlc.narg('scheduled_at')::timestamptz + ELSE scheduled_at END + FROM job_to_update + WHERE river_job.id = job_to_update.id + AND river_job.state = 'running'::river_job_state + RETURNING river_job.* +) +SELECT * +FROM river_job +WHERE id = @id::bigint + AND id NOT IN (SELECT id FROM updated_job) +UNION +SELECT * +FROM updated_job; + +-- A generalized update for any property on a job. This brings in a large number +-- of parameters and therefore may be more suitable for testing than production. +-- name: JobUpdate :one +UPDATE river_job +SET + attempt = CASE WHEN @attempt_do_update::boolean THEN @attempt ELSE attempt END, + attempted_at = CASE WHEN @attempted_at_do_update::boolean THEN @attempted_at ELSE attempted_at END, + errors = CASE WHEN @errors_do_update::boolean THEN @errors::jsonb[] ELSE errors END, + finalized_at = CASE WHEN @finalized_at_do_update::boolean THEN @finalized_at ELSE finalized_at END, + state = CASE WHEN @state_do_update::boolean THEN @state ELSE state END +WHERE id = @id +RETURNING *; \ No newline at end of file diff --git a/internal/dbsqlc/river_job.sql.go b/riverdriver/riverpgxv5/internal/dbsqlc/river_job.sql.go similarity index 58% rename from internal/dbsqlc/river_job.sql.go rename to riverdriver/riverpgxv5/internal/dbsqlc/river_job.sql.go index 91886bfa..9e41b929 100644 --- a/internal/dbsqlc/river_job.sql.go +++ b/riverdriver/riverpgxv5/internal/dbsqlc/river_job.sql.go @@ -1,6 +1,6 @@ // Code generated by sqlc. DO NOT EDIT. // versions: -// sqlc v1.24.0 +// sqlc v1.25.0 // source: river_job.sql package dbsqlc @@ -12,41 +12,36 @@ import ( const jobCancel = `-- name: JobCancel :one WITH locked_job AS ( - SELECT - id, queue, state, finalized_at - FROM river_job - WHERE - river_job.id = $1 - FOR UPDATE + SELECT + id, queue, state, finalized_at + FROM river_job + WHERE river_job.id = $1 + FOR UPDATE ), - notification AS ( - SELECT - id, - pg_notify($2, json_build_object('action', 'cancel', 'job_id', id, 'queue', queue)::text) - FROM - locked_job - WHERE - state NOT IN ('cancelled', 'completed', 'discarded') - AND finalized_at IS NULL + SELECT + id, + pg_notify($2, json_build_object('action', 'cancel', 'job_id', id, 'queue', queue)::text) + FROM + locked_job + WHERE + state NOT IN ('cancelled', 'completed', 'discarded') + AND finalized_at IS NULL ), - updated_job AS ( - UPDATE river_job - SET - -- If the job is actively running, we want to let its current client and - -- producer handle the cancellation. Otherwise, immediately cancel it. - state = CASE WHEN state = 'running'::river_job_state THEN state ELSE 'cancelled'::river_job_state END, - finalized_at = CASE WHEN state = 'running'::river_job_state THEN finalized_at ELSE now() END, - -- Mark the job as cancelled by query so that the rescuer knows not to - -- rescue it, even if it gets stuck in the running state: - metadata = jsonb_set(metadata, '{cancel_attempted_at}'::text[], $3::jsonb, true) - FROM notification - WHERE - river_job.id = notification.id - RETURNING river_job.id, river_job.args, river_job.attempt, river_job.attempted_at, river_job.attempted_by, river_job.created_at, river_job.errors, river_job.finalized_at, river_job.kind, river_job.max_attempts, river_job.metadata, river_job.priority, river_job.queue, river_job.state, river_job.scheduled_at, river_job.tags + UPDATE river_job + SET + -- If the job is actively running, we want to let its current client and + -- producer handle the cancellation. Otherwise, immediately cancel it. + state = CASE WHEN state = 'running'::river_job_state THEN state ELSE 'cancelled'::river_job_state END, + finalized_at = CASE WHEN state = 'running'::river_job_state THEN finalized_at ELSE now() END, + -- Mark the job as cancelled by query so that the rescuer knows not to + -- rescue it, even if it gets stuck in the running state: + metadata = jsonb_set(metadata, '{cancel_attempted_at}'::text[], $3::jsonb, true) + FROM notification + WHERE river_job.id = notification.id + RETURNING river_job.id, river_job.args, river_job.attempt, river_job.attempted_at, river_job.attempted_by, river_job.created_at, river_job.errors, river_job.finalized_at, river_job.kind, river_job.max_attempts, river_job.metadata, river_job.priority, river_job.queue, river_job.state, river_job.scheduled_at, river_job.tags ) - SELECT id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags FROM river_job WHERE id = $1::bigint @@ -86,45 +81,23 @@ func (q *Queries) JobCancel(ctx context.Context, db DBTX, arg JobCancelParams) ( return &i, err } -const jobCountRunning = `-- name: JobCountRunning :one -SELECT - count(*) -FROM - river_job -WHERE - state = 'running' -` - -func (q *Queries) JobCountRunning(ctx context.Context, db DBTX) (int64, error) { - row := db.QueryRow(ctx, jobCountRunning) - var count int64 - err := row.Scan(&count) - return count, err -} - const jobDeleteBefore = `-- name: JobDeleteBefore :one WITH deleted_jobs AS ( - DELETE FROM - river_job - WHERE - id IN ( - SELECT - id - FROM - river_job - WHERE - (state = 'cancelled' AND finalized_at < $1::timestamptz) OR - (state = 'completed' AND finalized_at < $2::timestamptz) OR - (state = 'discarded' AND finalized_at < $3::timestamptz) - ORDER BY id - LIMIT $4::bigint + DELETE FROM river_job + WHERE id IN ( + SELECT id + FROM river_job + WHERE + (state = 'cancelled' AND finalized_at < $1::timestamptz) OR + (state = 'completed' AND finalized_at < $2::timestamptz) OR + (state = 'discarded' AND finalized_at < $3::timestamptz) + ORDER BY id + LIMIT $4::bigint ) - RETURNING id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags + RETURNING id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags ) -SELECT - count(*) -FROM - deleted_jobs +SELECT count(*) +FROM deleted_jobs ` type JobDeleteBeforeParams struct { @@ -148,44 +121,45 @@ func (q *Queries) JobDeleteBefore(ctx context.Context, db DBTX, arg JobDeleteBef const jobGetAvailable = `-- name: JobGetAvailable :many WITH locked_jobs AS ( - SELECT - id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags - FROM - river_job - WHERE - state = 'available'::river_job_state - AND queue = $2::text - AND scheduled_at <= now() - ORDER BY - priority ASC, - scheduled_at ASC, - id ASC - LIMIT $3::integer - FOR UPDATE - SKIP LOCKED) + SELECT + id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags + FROM + river_job + WHERE + state = 'available'::river_job_state + AND queue = $2::text + AND scheduled_at <= now() + ORDER BY + priority ASC, + scheduled_at ASC, + id ASC + LIMIT $3::integer + FOR UPDATE + SKIP LOCKED +) UPDATE - river_job + river_job SET - state = 'running'::river_job_state, - attempt = river_job.attempt + 1, - attempted_at = now(), - attempted_by = array_append(river_job.attempted_by, $1::text) + state = 'running'::river_job_state, + attempt = river_job.attempt + 1, + attempted_at = now(), + attempted_by = array_append(river_job.attempted_by, $1::text) FROM - locked_jobs + locked_jobs WHERE - river_job.id = locked_jobs.id + river_job.id = locked_jobs.id RETURNING - river_job.id, river_job.args, river_job.attempt, river_job.attempted_at, river_job.attempted_by, river_job.created_at, river_job.errors, river_job.finalized_at, river_job.kind, river_job.max_attempts, river_job.metadata, river_job.priority, river_job.queue, river_job.state, river_job.scheduled_at, river_job.tags + river_job.id, river_job.args, river_job.attempt, river_job.attempted_at, river_job.attempted_by, river_job.created_at, river_job.errors, river_job.finalized_at, river_job.kind, river_job.max_attempts, river_job.metadata, river_job.priority, river_job.queue, river_job.state, river_job.scheduled_at, river_job.tags ` type JobGetAvailableParams struct { - Worker string - Queue string - LimitCount int32 + AttemptedBy string + Queue string + Max int32 } func (q *Queries) JobGetAvailable(ctx context.Context, db DBTX, arg JobGetAvailableParams) ([]*RiverJob, error) { - rows, err := db.Query(ctx, jobGetAvailable, arg.Worker, arg.Queue, arg.LimitCount) + rows, err := db.Query(ctx, jobGetAvailable, arg.AttemptedBy, arg.Queue, arg.Max) if err != nil { return nil, err } @@ -222,12 +196,9 @@ func (q *Queries) JobGetAvailable(ctx context.Context, db DBTX, arg JobGetAvaila } const jobGetByID = `-- name: JobGetByID :one -SELECT - id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags -FROM - river_job -WHERE - id = $1 +SELECT id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags +FROM river_job +WHERE id = $1 LIMIT 1 ` @@ -256,60 +227,14 @@ func (q *Queries) JobGetByID(ctx context.Context, db DBTX, id int64) (*RiverJob, } const jobGetByIDMany = `-- name: JobGetByIDMany :many -SELECT - id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags -FROM - river_job -WHERE - id = any($1::bigint[]) -` - -func (q *Queries) JobGetByIDMany(ctx context.Context, db DBTX, id []int64) ([]*RiverJob, error) { - rows, err := db.Query(ctx, jobGetByIDMany, id) - if err != nil { - return nil, err - } - defer rows.Close() - var items []*RiverJob - for rows.Next() { - var i RiverJob - if err := rows.Scan( - &i.ID, - &i.Args, - &i.Attempt, - &i.AttemptedAt, - &i.AttemptedBy, - &i.CreatedAt, - &i.Errors, - &i.FinalizedAt, - &i.Kind, - &i.MaxAttempts, - &i.Metadata, - &i.Priority, - &i.Queue, - &i.State, - &i.ScheduledAt, - &i.Tags, - ); err != nil { - return nil, err - } - items = append(items, &i) - } - if err := rows.Err(); err != nil { - return nil, err - } - return items, nil -} - -const jobGetByKind = `-- name: JobGetByKind :many SELECT id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags FROM river_job -WHERE kind = $1 +WHERE id = any($1::bigint[]) ORDER BY id ` -func (q *Queries) JobGetByKind(ctx context.Context, db DBTX, kind string) ([]*RiverJob, error) { - rows, err := db.Query(ctx, jobGetByKind, kind) +func (q *Queries) JobGetByIDMany(ctx context.Context, db DBTX, id []int64) ([]*RiverJob, error) { + rows, err := db.Query(ctx, jobGetByIDMany, id) if err != nil { return nil, err } @@ -349,10 +274,10 @@ const jobGetByKindAndUniqueProperties = `-- name: JobGetByKindAndUniquePropertie SELECT id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags FROM river_job WHERE kind = $1 - AND CASE WHEN $2::boolean THEN args = $3 ELSE true END - AND CASE WHEN $4::boolean THEN tstzrange($5::timestamptz, $6::timestamptz, '[)') @> created_at ELSE true END - AND CASE WHEN $7::boolean THEN queue = $8 ELSE true END - AND CASE WHEN $9::boolean THEN state::text = any($10::text[]) ELSE true END + AND CASE WHEN $2::boolean THEN args = $3 ELSE true END + AND CASE WHEN $4::boolean THEN tstzrange($5::timestamptz, $6::timestamptz, '[)') @> created_at ELSE true END + AND CASE WHEN $7::boolean THEN queue = $8 ELSE true END + AND CASE WHEN $9::boolean THEN state::text = any($10::text[]) ELSE true END ` type JobGetByKindAndUniquePropertiesParams struct { @@ -448,23 +373,20 @@ func (q *Queries) JobGetByKindMany(ctx context.Context, db DBTX, kind []string) } const jobGetStuck = `-- name: JobGetStuck :many -SELECT - id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags -FROM - river_job -WHERE - state = 'running'::river_job_state - AND attempted_at < $1::timestamptz -LIMIT $2::integer +SELECT id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags +FROM river_job +WHERE state = 'running'::river_job_state + AND attempted_at < $1::timestamptz +LIMIT $2 ` type JobGetStuckParams struct { StuckHorizon time.Time - LimitCount int32 + Max int32 } func (q *Queries) JobGetStuck(ctx context.Context, db DBTX, arg JobGetStuckParams) ([]*RiverJob, error) { - rows, err := db.Query(ctx, jobGetStuck, arg.StuckHorizon, arg.LimitCount) + rows, err := db.Query(ctx, jobGetStuck, arg.StuckHorizon, arg.Max) if err != nil { return nil, err } @@ -502,35 +424,35 @@ func (q *Queries) JobGetStuck(ctx context.Context, db DBTX, arg JobGetStuckParam const jobInsert = `-- name: JobInsert :one INSERT INTO river_job( - args, - attempt, - attempted_at, - created_at, - errors, - finalized_at, - kind, - max_attempts, - metadata, - priority, - queue, - scheduled_at, - state, - tags + args, + attempt, + attempted_at, + created_at, + errors, + finalized_at, + kind, + max_attempts, + metadata, + priority, + queue, + scheduled_at, + state, + tags ) VALUES ( - $1::jsonb, - coalesce($2::smallint, 0), - $3, - coalesce($4::timestamptz, now()), - $5::jsonb[], - $6, - $7::text, - $8::smallint, - coalesce($9::jsonb, '{}'), - $10::smallint, - $11::text, - coalesce($12::timestamptz, now()), - $13::river_job_state, - coalesce($14::varchar(255)[], '{}') + $1::jsonb, + coalesce($2::smallint, 0), + $3, + coalesce($4::timestamptz, now()), + $5::jsonb[], + $6, + $7::text, + $8::smallint, + coalesce($9::jsonb, '{}'), + $10::smallint, + $11::text, + coalesce($12::timestamptz, now()), + $13::river_job_state, + coalesce($14::varchar(255)[], '{}') ) RETURNING id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags ` @@ -590,33 +512,20 @@ func (q *Queries) JobInsert(ctx context.Context, db DBTX, arg JobInsertParams) ( return &i, err } -type JobInsertManyParams struct { - Args []byte - Errors []AttemptError - Kind string - MaxAttempts int16 - Metadata []byte - Priority int16 - Queue string - ScheduledAt time.Time - State JobState - Tags []string -} - const jobRescueMany = `-- name: JobRescueMany :exec UPDATE river_job SET - errors = array_append(errors, updated_job.error), - finalized_at = updated_job.finalized_at, - scheduled_at = updated_job.scheduled_at, - state = updated_job.state + errors = array_append(errors, updated_job.error), + finalized_at = updated_job.finalized_at, + scheduled_at = updated_job.scheduled_at, + state = updated_job.state FROM ( - SELECT - unnest($1::bigint[]) AS id, - unnest($2::jsonb[]) AS error, - nullif(unnest($3::timestamptz[]), '0001-01-01 00:00:00 +0000') AS finalized_at, - unnest($4::timestamptz[]) AS scheduled_at, - unnest($5::text[])::river_job_state AS state + SELECT + unnest($1::bigint[]) AS id, + unnest($2::jsonb[]) AS error, + nullif(unnest($3::timestamptz[]), '0001-01-01 00:00:00 +0000') AS finalized_at, + unnest($4::timestamptz[]) AS scheduled_at, + unnest($5::text[])::river_job_state AS state ) AS updated_job WHERE river_job.id = updated_job.id ` @@ -641,33 +550,28 @@ func (q *Queries) JobRescueMany(ctx context.Context, db DBTX, arg JobRescueManyP return err } -const jobRetryImmediately = `-- name: JobRetryImmediately :one +const jobRetry = `-- name: JobRetry :one WITH job_to_update AS ( - SELECT - id - FROM - river_job - WHERE - river_job.id = $1 - FOR UPDATE + SELECT id + FROM river_job + WHERE river_job.id = $1 + FOR UPDATE ), - updated_job AS ( - UPDATE river_job - SET - state = 'available'::river_job_state, - scheduled_at = now(), - max_attempts = CASE WHEN attempt = max_attempts THEN max_attempts + 1 ELSE max_attempts END, - finalized_at = NULL - FROM job_to_update - WHERE river_job.id = job_to_update.id - -- Do not touch running jobs: - AND river_job.state != 'running'::river_job_state - -- If the job is already available with a prior scheduled_at, leave it alone. - AND NOT (river_job.state = 'available'::river_job_state AND river_job.scheduled_at < now()) - RETURNING river_job.id, river_job.args, river_job.attempt, river_job.attempted_at, river_job.attempted_by, river_job.created_at, river_job.errors, river_job.finalized_at, river_job.kind, river_job.max_attempts, river_job.metadata, river_job.priority, river_job.queue, river_job.state, river_job.scheduled_at, river_job.tags + UPDATE river_job + SET + state = 'available'::river_job_state, + scheduled_at = now(), + max_attempts = CASE WHEN attempt = max_attempts THEN max_attempts + 1 ELSE max_attempts END, + finalized_at = NULL + FROM job_to_update + WHERE river_job.id = job_to_update.id + -- Do not touch running jobs: + AND river_job.state != 'running'::river_job_state + -- If the job is already available with a prior scheduled_at, leave it alone. + AND NOT (river_job.state = 'available'::river_job_state AND river_job.scheduled_at < now()) + RETURNING river_job.id, river_job.args, river_job.attempt, river_job.attempted_at, river_job.attempted_by, river_job.created_at, river_job.errors, river_job.finalized_at, river_job.kind, river_job.max_attempts, river_job.metadata, river_job.priority, river_job.queue, river_job.state, river_job.scheduled_at, river_job.tags ) - SELECT id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags FROM river_job WHERE id = $1::bigint @@ -677,8 +581,8 @@ SELECT id, args, attempt, attempted_at, attempted_by, created_at, errors, finali FROM updated_job ` -func (q *Queries) JobRetryImmediately(ctx context.Context, db DBTX, id int64) (*RiverJob, error) { - row := db.QueryRow(ctx, jobRetryImmediately, id) +func (q *Queries) JobRetry(ctx context.Context, db DBTX, id int64) (*RiverJob, error) { + row := db.QueryRow(ctx, jobRetry, id) var i RiverJob err := row.Scan( &i.ID, @@ -703,31 +607,32 @@ func (q *Queries) JobRetryImmediately(ctx context.Context, db DBTX, id int64) (* const jobSchedule = `-- name: JobSchedule :one WITH jobs_to_schedule AS ( - SELECT id - FROM river_job - WHERE - state IN ('scheduled', 'retryable') - AND queue IS NOT NULL - AND priority >= 0 - AND scheduled_at <= $2::timestamptz - ORDER BY - priority, - scheduled_at, - id - LIMIT $3::bigint - FOR UPDATE + SELECT id + FROM river_job + WHERE + state IN ('scheduled', 'retryable') + AND queue IS NOT NULL + AND priority >= 0 + AND scheduled_at <= $2::timestamptz + ORDER BY + priority, + scheduled_at, + id + LIMIT $3::bigint + FOR UPDATE ), river_job_scheduled AS ( - UPDATE river_job - SET state = 'available'::river_job_state - FROM jobs_to_schedule - WHERE river_job.id = jobs_to_schedule.id - RETURNING jobs_to_schedule.id, river_job.id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags + UPDATE river_job + SET state = 'available'::river_job_state + FROM jobs_to_schedule + WHERE river_job.id = jobs_to_schedule.id + RETURNING jobs_to_schedule.id, river_job.id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags ) SELECT count(*) FROM ( -SELECT pg_notify($1, json_build_object('queue', queue)::text) -FROM river_job_scheduled) AS notifications_sent + SELECT pg_notify($1, json_build_object('queue', queue)::text) + FROM river_job_scheduled +) AS notifications_sent ` type JobScheduleParams struct { @@ -743,65 +648,6 @@ func (q *Queries) JobSchedule(ctx context.Context, db DBTX, arg JobScheduleParam return count, err } -const jobSetState = `-- name: JobSetState :one -UPDATE river_job -SET errors = CASE WHEN $1::boolean THEN array_append(errors, $2::jsonb) ELSE errors END, - finalized_at = CASE WHEN $3::boolean THEN $4 ELSE finalized_at END, - max_attempts = CASE WHEN $5::boolean THEN $6 ELSE max_attempts END, - scheduled_at = CASE WHEN $7::boolean THEN $8 ELSE scheduled_at END, - state = $9 -WHERE id = $10 -RETURNING id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags -` - -type JobSetStateParams struct { - ErrorDoUpdate bool - Error []byte - FinalizedAtDoUpdate bool - FinalizedAt *time.Time - MaxAttemptsUpdate bool - MaxAttempts int16 - ScheduledAtDoUpdate bool - ScheduledAt time.Time - State JobState - ID int64 -} - -func (q *Queries) JobSetState(ctx context.Context, db DBTX, arg JobSetStateParams) (*RiverJob, error) { - row := db.QueryRow(ctx, jobSetState, - arg.ErrorDoUpdate, - arg.Error, - arg.FinalizedAtDoUpdate, - arg.FinalizedAt, - arg.MaxAttemptsUpdate, - arg.MaxAttempts, - arg.ScheduledAtDoUpdate, - arg.ScheduledAt, - arg.State, - arg.ID, - ) - var i RiverJob - err := row.Scan( - &i.ID, - &i.Args, - &i.Attempt, - &i.AttemptedAt, - &i.AttemptedBy, - &i.CreatedAt, - &i.Errors, - &i.FinalizedAt, - &i.Kind, - &i.MaxAttempts, - &i.Metadata, - &i.Priority, - &i.Queue, - &i.State, - &i.ScheduledAt, - &i.Tags, - ) - return &i, err -} - const jobSetStateIfRunning = `-- name: JobSetStateIfRunning :one WITH job_to_update AS ( SELECT @@ -823,7 +669,7 @@ updated_job AS ( ELSE errors END, max_attempts = CASE WHEN NOT should_cancel AND $7::boolean THEN $8 ELSE max_attempts END, - scheduled_at = CASE WHEN NOT should_cancel AND $9::boolean THEN $10 + scheduled_at = CASE WHEN NOT should_cancel AND $9::boolean THEN $10::timestamptz ELSE scheduled_at END FROM job_to_update WHERE river_job.id = job_to_update.id @@ -849,7 +695,7 @@ type JobSetStateIfRunningParams struct { MaxAttemptsUpdate bool MaxAttempts int16 ScheduledAtDoUpdate bool - ScheduledAt time.Time + ScheduledAt *time.Time } func (q *Queries) JobSetStateIfRunning(ctx context.Context, db DBTX, arg JobSetStateIfRunningParams) (*RiverJob, error) { @@ -890,11 +736,12 @@ func (q *Queries) JobSetStateIfRunning(ctx context.Context, db DBTX, arg JobSetS const jobUpdate = `-- name: JobUpdate :one UPDATE river_job SET - attempt = CASE WHEN $1::boolean THEN $2 ELSE attempt END, - attempted_at = CASE WHEN $3::boolean THEN $4 ELSE attempted_at END, - finalized_at = CASE WHEN $5::boolean THEN $6 ELSE finalized_at END, - state = CASE WHEN $7::boolean THEN $8 ELSE state END -WHERE id = $9 + attempt = CASE WHEN $1::boolean THEN $2 ELSE attempt END, + attempted_at = CASE WHEN $3::boolean THEN $4 ELSE attempted_at END, + errors = CASE WHEN $5::boolean THEN $6::jsonb[] ELSE errors END, + finalized_at = CASE WHEN $7::boolean THEN $8 ELSE finalized_at END, + state = CASE WHEN $9::boolean THEN $10 ELSE state END +WHERE id = $11 RETURNING id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags ` @@ -903,6 +750,8 @@ type JobUpdateParams struct { Attempt int16 AttemptedAtDoUpdate bool AttemptedAt *time.Time + ErrorsDoUpdate bool + Errors [][]byte FinalizedAtDoUpdate bool FinalizedAt *time.Time StateDoUpdate bool @@ -918,6 +767,8 @@ func (q *Queries) JobUpdate(ctx context.Context, db DBTX, arg JobUpdateParams) ( arg.Attempt, arg.AttemptedAtDoUpdate, arg.AttemptedAt, + arg.ErrorsDoUpdate, + arg.Errors, arg.FinalizedAtDoUpdate, arg.FinalizedAt, arg.StateDoUpdate, @@ -945,12 +796,3 @@ func (q *Queries) JobUpdate(ctx context.Context, db DBTX, arg JobUpdateParams) ( ) return &i, err } - -const pGAdvisoryXactLock = `-- name: PGAdvisoryXactLock :exec -SELECT pg_advisory_xact_lock($1) -` - -func (q *Queries) PGAdvisoryXactLock(ctx context.Context, db DBTX, key int64) error { - _, err := db.Exec(ctx, pGAdvisoryXactLock, key) - return err -} diff --git a/riverdriver/riverpgxv5/internal/dbsqlc/river_job_copyfrom.sql b/riverdriver/riverpgxv5/internal/dbsqlc/river_job_copyfrom.sql new file mode 100644 index 00000000..48fc6dd7 --- /dev/null +++ b/riverdriver/riverpgxv5/internal/dbsqlc/river_job_copyfrom.sql @@ -0,0 +1,30 @@ +-- name: JobInsertMany :copyfrom +INSERT INTO river_job( + args, + attempt, + attempted_at, + errors, + finalized_at, + kind, + max_attempts, + metadata, + priority, + queue, + scheduled_at, + state, + tags +) VALUES ( + @args, + @attempt, + @attempted_at, + @errors::jsonb[], + @finalized_at, + @kind, + @max_attempts, + @metadata, + @priority, + @queue, + @scheduled_at, + @state, + @tags +); \ No newline at end of file diff --git a/riverdriver/riverpgxv5/internal/dbsqlc/river_job_copyfrom.sql.go b/riverdriver/riverpgxv5/internal/dbsqlc/river_job_copyfrom.sql.go new file mode 100644 index 00000000..ea0a77ee --- /dev/null +++ b/riverdriver/riverpgxv5/internal/dbsqlc/river_job_copyfrom.sql.go @@ -0,0 +1,26 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.25.0 +// source: river_job_copyfrom.sql + +package dbsqlc + +import ( + "time" +) + +type JobInsertManyParams struct { + Args []byte + Attempt int16 + AttemptedAt *time.Time + Errors [][]byte + FinalizedAt *time.Time + Kind string + MaxAttempts int16 + Metadata []byte + Priority int16 + Queue string + ScheduledAt time.Time + State JobState + Tags []string +} diff --git a/riverdriver/riverpgxv5/internal/dbsqlc/river_leader.sql b/riverdriver/riverpgxv5/internal/dbsqlc/river_leader.sql new file mode 100644 index 00000000..c45168b9 --- /dev/null +++ b/riverdriver/riverpgxv5/internal/dbsqlc/river_leader.sql @@ -0,0 +1,64 @@ +CREATE UNLOGGED TABLE river_leader( + elected_at timestamptz NOT NULL, + expires_at timestamptz NOT NULL, + leader_id text NOT NULL, + name text PRIMARY KEY, + CONSTRAINT name_length CHECK (char_length(name) > 0 AND char_length(name) < 128), + CONSTRAINT leader_id_length CHECK (char_length(leader_id) > 0 AND char_length(leader_id) < 128) +); + +-- name: LeaderAttemptElect :execrows +INSERT INTO river_leader(name, leader_id, elected_at, expires_at) + VALUES (@name::text, @leader_id::text, now(), now() + @ttl::interval) +ON CONFLICT (name) + DO NOTHING; + +-- name: LeaderAttemptReelect :execrows +INSERT INTO river_leader(name, leader_id, elected_at, expires_at) + VALUES (@name::text, @leader_id::text, now(), now() + @ttl::interval) +ON CONFLICT (name) + DO UPDATE SET + expires_at = now() + @ttl::interval + WHERE + river_leader.leader_id = @leader_id::text; + +-- name: LeaderDeleteExpired :execrows +DELETE FROM river_leader +WHERE name = @name::text + AND expires_at < now(); + +-- name: LeaderGetElectedLeader :one +SELECT * +FROM river_leader +WHERE name = @name; + +-- name: LeaderInsert :one +INSERT INTO river_leader( + elected_at, + expires_at, + leader_id, + name +) VALUES ( + coalesce(sqlc.narg('elected_at')::timestamptz, now()), + coalesce(sqlc.narg('expires_at')::timestamptz, now() + @ttl::interval), + @leader_id, + @name +) RETURNING *; + +-- name: LeaderResign :execrows +WITH currently_held_leaders AS ( + SELECT * + FROM river_leader + WHERE + name = @name::text + AND leader_id = @leader_id::text + FOR UPDATE +), +notified_resignations AS ( + SELECT + pg_notify(@leadership_topic, json_build_object('name', name, 'leader_id', leader_id, 'action', 'resigned')::text), + currently_held_leaders.name + FROM currently_held_leaders +) +DELETE FROM river_leader USING notified_resignations +WHERE river_leader.name = notified_resignations.name; \ No newline at end of file diff --git a/riverdriver/riverpgxv5/internal/dbsqlc/river_leader.sql.go b/riverdriver/riverpgxv5/internal/dbsqlc/river_leader.sql.go new file mode 100644 index 00000000..d8d95a81 --- /dev/null +++ b/riverdriver/riverpgxv5/internal/dbsqlc/river_leader.sql.go @@ -0,0 +1,161 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.25.0 +// source: river_leader.sql + +package dbsqlc + +import ( + "context" + "time" +) + +const leaderAttemptElect = `-- name: LeaderAttemptElect :execrows +INSERT INTO river_leader(name, leader_id, elected_at, expires_at) + VALUES ($1::text, $2::text, now(), now() + $3::interval) +ON CONFLICT (name) + DO NOTHING +` + +type LeaderAttemptElectParams struct { + Name string + LeaderID string + TTL time.Duration +} + +func (q *Queries) LeaderAttemptElect(ctx context.Context, db DBTX, arg LeaderAttemptElectParams) (int64, error) { + result, err := db.Exec(ctx, leaderAttemptElect, arg.Name, arg.LeaderID, arg.TTL) + if err != nil { + return 0, err + } + return result.RowsAffected(), nil +} + +const leaderAttemptReelect = `-- name: LeaderAttemptReelect :execrows +INSERT INTO river_leader(name, leader_id, elected_at, expires_at) + VALUES ($1::text, $2::text, now(), now() + $3::interval) +ON CONFLICT (name) + DO UPDATE SET + expires_at = now() + $3::interval + WHERE + river_leader.leader_id = $2::text +` + +type LeaderAttemptReelectParams struct { + Name string + LeaderID string + TTL time.Duration +} + +func (q *Queries) LeaderAttemptReelect(ctx context.Context, db DBTX, arg LeaderAttemptReelectParams) (int64, error) { + result, err := db.Exec(ctx, leaderAttemptReelect, arg.Name, arg.LeaderID, arg.TTL) + if err != nil { + return 0, err + } + return result.RowsAffected(), nil +} + +const leaderDeleteExpired = `-- name: LeaderDeleteExpired :execrows +DELETE FROM river_leader +WHERE name = $1::text + AND expires_at < now() +` + +func (q *Queries) LeaderDeleteExpired(ctx context.Context, db DBTX, name string) (int64, error) { + result, err := db.Exec(ctx, leaderDeleteExpired, name) + if err != nil { + return 0, err + } + return result.RowsAffected(), nil +} + +const leaderGetElectedLeader = `-- name: LeaderGetElectedLeader :one +SELECT elected_at, expires_at, leader_id, name +FROM river_leader +WHERE name = $1 +` + +func (q *Queries) LeaderGetElectedLeader(ctx context.Context, db DBTX, name string) (*RiverLeader, error) { + row := db.QueryRow(ctx, leaderGetElectedLeader, name) + var i RiverLeader + err := row.Scan( + &i.ElectedAt, + &i.ExpiresAt, + &i.LeaderID, + &i.Name, + ) + return &i, err +} + +const leaderInsert = `-- name: LeaderInsert :one +INSERT INTO river_leader( + elected_at, + expires_at, + leader_id, + name +) VALUES ( + coalesce($1::timestamptz, now()), + coalesce($2::timestamptz, now() + $3::interval), + $4, + $5 +) RETURNING elected_at, expires_at, leader_id, name +` + +type LeaderInsertParams struct { + ElectedAt *time.Time + ExpiresAt *time.Time + TTL time.Duration + LeaderID string + Name string +} + +func (q *Queries) LeaderInsert(ctx context.Context, db DBTX, arg LeaderInsertParams) (*RiverLeader, error) { + row := db.QueryRow(ctx, leaderInsert, + arg.ElectedAt, + arg.ExpiresAt, + arg.TTL, + arg.LeaderID, + arg.Name, + ) + var i RiverLeader + err := row.Scan( + &i.ElectedAt, + &i.ExpiresAt, + &i.LeaderID, + &i.Name, + ) + return &i, err +} + +const leaderResign = `-- name: LeaderResign :execrows +WITH currently_held_leaders AS ( + SELECT elected_at, expires_at, leader_id, name + FROM river_leader + WHERE + name = $1::text + AND leader_id = $2::text + FOR UPDATE +), +notified_resignations AS ( + SELECT + pg_notify($3, json_build_object('name', name, 'leader_id', leader_id, 'action', 'resigned')::text), + currently_held_leaders.name + FROM currently_held_leaders +) +DELETE FROM river_leader USING notified_resignations +WHERE river_leader.name = notified_resignations.name +` + +type LeaderResignParams struct { + Name string + LeaderID string + LeadershipTopic string +} + +func (q *Queries) LeaderResign(ctx context.Context, db DBTX, arg LeaderResignParams) (int64, error) { + result, err := db.Exec(ctx, leaderResign, arg.Name, arg.LeaderID, arg.LeadershipTopic) + if err != nil { + return 0, err + } + return result.RowsAffected(), nil +} diff --git a/riverdriver/riverpgxv5/internal/dbsqlc/river_migration.sql b/riverdriver/riverpgxv5/internal/dbsqlc/river_migration.sql index fa7a65ba..c2701835 100644 --- a/riverdriver/riverpgxv5/internal/dbsqlc/river_migration.sql +++ b/riverdriver/riverpgxv5/internal/dbsqlc/river_migration.sql @@ -1,8 +1,8 @@ CREATE TABLE river_migration( - id bigserial PRIMARY KEY, - created_at timestamptz NOT NULL DEFAULT NOW(), - version bigint NOT NULL, - CONSTRAINT version CHECK (version >= 1) + id bigserial PRIMARY KEY, + created_at timestamptz NOT NULL DEFAULT NOW(), + version bigint NOT NULL, + CONSTRAINT version CHECK (version >= 1) ); -- name: RiverMigrationDeleteByVersionMany :many @@ -17,17 +17,17 @@ ORDER BY version; -- name: RiverMigrationInsert :one INSERT INTO river_migration ( - version + version ) VALUES ( - @version + @version ) RETURNING *; -- name: RiverMigrationInsertMany :many INSERT INTO river_migration ( - version + version ) SELECT - unnest(@version::bigint[]) + unnest(@version::bigint[]) RETURNING *; -- name: TableExists :one diff --git a/riverdriver/riverpgxv5/internal/dbsqlc/river_migration.sql.go b/riverdriver/riverpgxv5/internal/dbsqlc/river_migration.sql.go index b9424df3..b5c89cdb 100644 --- a/riverdriver/riverpgxv5/internal/dbsqlc/river_migration.sql.go +++ b/riverdriver/riverpgxv5/internal/dbsqlc/river_migration.sql.go @@ -1,6 +1,6 @@ // Code generated by sqlc. DO NOT EDIT. // versions: -// sqlc v1.24.0 +// sqlc v1.25.0 // source: river_migration.sql package dbsqlc @@ -63,9 +63,9 @@ func (q *Queries) RiverMigrationGetAll(ctx context.Context, db DBTX) ([]*RiverMi const riverMigrationInsert = `-- name: RiverMigrationInsert :one INSERT INTO river_migration ( - version + version ) VALUES ( - $1 + $1 ) RETURNING id, created_at, version ` @@ -78,10 +78,10 @@ func (q *Queries) RiverMigrationInsert(ctx context.Context, db DBTX, version int const riverMigrationInsertMany = `-- name: RiverMigrationInsertMany :many INSERT INTO river_migration ( - version + version ) SELECT - unnest($1::bigint[]) + unnest($1::bigint[]) RETURNING id, created_at, version ` diff --git a/riverdriver/riverpgxv5/internal/dbsqlc/sqlc.yaml b/riverdriver/riverpgxv5/internal/dbsqlc/sqlc.yaml index ace0bb62..42233fc7 100644 --- a/riverdriver/riverpgxv5/internal/dbsqlc/sqlc.yaml +++ b/riverdriver/riverpgxv5/internal/dbsqlc/sqlc.yaml @@ -2,8 +2,15 @@ version: "2" sql: - engine: "postgresql" queries: + - pg_misc.sql + - river_job.sql + - river_job_copyfrom.sql + - river_leader.sql - river_migration.sql schema: + - pg_misc.sql + - river_job.sql + - river_leader.sql - river_migration.sql gen: go: @@ -14,11 +21,31 @@ sql: emit_methods_with_db_argument: true emit_result_struct_pointers: true + rename: + river_job_state: "JobState" + river_job_state_available: "JobStateAvailable" + river_job_state_cancelled: "JobStateCancelled" + river_job_state_completed: "JobStateCompleted" + river_job_state_discarded: "JobStateDiscarded" + river_job_state_retryable: "JobStateRetryable" + river_job_state_running: "JobStateRunning" + river_job_state_scheduled: "JobStateScheduled" + ttl: "TTL" + overrides: + - db_type: "pg_catalog.interval" + go_type: "time.Duration" + - db_type: "timestamptz" go_type: "time.Time" + - db_type: "timestamptz" go_type: type: "time.Time" pointer: true nullable: true + + # specific columns + - column: "river_job.errors" + go_type: + type: "[]AttemptError" diff --git a/riverdriver/riverpgxv5/river_pgx_v5_driver.go b/riverdriver/riverpgxv5/river_pgx_v5_driver.go index 0dc8cc3a..fa889708 100644 --- a/riverdriver/riverpgxv5/river_pgx_v5_driver.go +++ b/riverdriver/riverpgxv5/river_pgx_v5_driver.go @@ -8,12 +8,18 @@ package riverpgxv5 import ( "context" "errors" + "fmt" + "math" + "sync" + "time" "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgxpool" "github.com/riverqueue/river/riverdriver" + "github.com/riverqueue/river/riverdriver/riverdriverutil" "github.com/riverqueue/river/riverdriver/riverpgxv5/internal/dbsqlc" + "github.com/riverqueue/river/rivertype" ) // Driver is an implementation of riverdriver.Driver for Pgx v5. @@ -38,10 +44,13 @@ func New(dbPool *pgxpool.Pool) *Driver { return &Driver{dbPool: dbPool, queries: dbsqlc.New()} } -func (d *Driver) GetDBPool() *pgxpool.Pool { return d.dbPool } -func (d *Driver) GetExecutor() riverdriver.Executor { return &Executor{d.dbPool, dbsqlc.New()} } -func (d *Driver) UnwrapExecutor(tx pgx.Tx) riverdriver.Executor { return &Executor{tx, dbsqlc.New()} } -func (d *Driver) UnwrapTx(tx pgx.Tx) pgx.Tx { return tx } +func (d *Driver) GetExecutor() riverdriver.Executor { return &Executor{d.dbPool, dbsqlc.New()} } +func (d *Driver) GetListener() riverdriver.Listener { return &Listener{dbPool: d.dbPool} } +func (d *Driver) HasPool() bool { return d.dbPool != nil } + +func (d *Driver) UnwrapExecutor(tx pgx.Tx) riverdriver.ExecutorTx { + return &ExecutorTx{Executor: Executor{tx, dbsqlc.New()}, tx: tx} +} type Executor struct { dbtx interface { @@ -64,21 +73,360 @@ func (e *Executor) Exec(ctx context.Context, sql string) (struct{}, error) { return struct{}{}, interpretError(err) } +func (e *Executor) JobCancel(ctx context.Context, params *riverdriver.JobCancelParams) (*rivertype.JobRow, error) { + cancelledAt, err := params.CancelAttemptedAt.MarshalJSON() + if err != nil { + return nil, err + } + + job, err := e.queries.JobCancel(ctx, e.dbtx, dbsqlc.JobCancelParams{ + ID: params.ID, + CancelAttemptedAt: cancelledAt, + JobControlTopic: params.JobControlTopic, + }) + if err != nil { + return nil, interpretError(err) + } + return jobRowFromInternal(job), nil +} + +func (e *Executor) JobDeleteBefore(ctx context.Context, params *riverdriver.JobDeleteBeforeParams) (int, error) { + numDeleted, err := e.queries.JobDeleteBefore(ctx, e.dbtx, dbsqlc.JobDeleteBeforeParams{ + CancelledFinalizedAtHorizon: params.CancelledFinalizedAtHorizon, + CompletedFinalizedAtHorizon: params.CompletedFinalizedAtHorizon, + DiscardedFinalizedAtHorizon: params.DiscardedFinalizedAtHorizon, + Max: int64(params.Max), + }) + return int(numDeleted), interpretError(err) +} + +func (e *Executor) JobGetAvailable(ctx context.Context, params *riverdriver.JobGetAvailableParams) ([]*rivertype.JobRow, error) { + jobs, err := e.queries.JobGetAvailable(ctx, e.dbtx, dbsqlc.JobGetAvailableParams{ + AttemptedBy: params.AttemptedBy, + Max: int32(params.Max), + Queue: params.Queue, + }) + return riverdriverutil.Map(jobs, jobRowFromInternal), interpretError(err) +} + +func (e *Executor) JobGetByID(ctx context.Context, id int64) (*rivertype.JobRow, error) { + job, err := e.queries.JobGetByID(ctx, e.dbtx, id) + if err != nil { + return nil, interpretError(err) + } + return jobRowFromInternal(job), nil +} + +func (e *Executor) JobGetByIDMany(ctx context.Context, id []int64) ([]*rivertype.JobRow, error) { + jobs, err := e.queries.JobGetByIDMany(ctx, e.dbtx, id) + if err != nil { + return nil, interpretError(err) + } + return riverdriverutil.Map(jobs, jobRowFromInternal), nil +} + +func (e *Executor) JobGetByKindAndUniqueProperties(ctx context.Context, params *riverdriver.JobGetByKindAndUniquePropertiesParams) (*rivertype.JobRow, error) { + job, err := e.queries.JobGetByKindAndUniqueProperties(ctx, e.dbtx, dbsqlc.JobGetByKindAndUniquePropertiesParams(*params)) + if err != nil { + return nil, interpretError(err) + } + return jobRowFromInternal(job), nil +} + +func (e *Executor) JobGetByKindMany(ctx context.Context, kind []string) ([]*rivertype.JobRow, error) { + jobs, err := e.queries.JobGetByKindMany(ctx, e.dbtx, kind) + if err != nil { + return nil, interpretError(err) + } + return riverdriverutil.Map(jobs, jobRowFromInternal), nil +} + +func (e *Executor) JobGetStuck(ctx context.Context, params *riverdriver.JobGetStuckParams) ([]*rivertype.JobRow, error) { + jobs, err := e.queries.JobGetStuck(ctx, e.dbtx, dbsqlc.JobGetStuckParams{Max: int32(params.Max), StuckHorizon: params.StuckHorizon}) + return riverdriverutil.Map(jobs, jobRowFromInternal), interpretError(err) +} + +func (e *Executor) JobInsert(ctx context.Context, params *riverdriver.JobInsertParams) (*rivertype.JobRow, error) { + job, err := e.queries.JobInsert(ctx, e.dbtx, dbsqlc.JobInsertParams{ + Attempt: int16(params.Attempt), + AttemptedAt: params.AttemptedAt, + Args: params.EncodedArgs, + Errors: params.Errors, + FinalizedAt: params.FinalizedAt, + Kind: params.Kind, + MaxAttempts: int16(min(params.MaxAttempts, math.MaxInt16)), + Metadata: params.Metadata, + Priority: int16(min(params.Priority, math.MaxInt16)), + Queue: params.Queue, + ScheduledAt: params.ScheduledAt, + State: dbsqlc.JobState(params.State), + Tags: params.Tags, + }) + if err != nil { + return nil, interpretError(err) + } + return jobRowFromInternal(job), nil +} + +func (e *Executor) JobInsertMany(ctx context.Context, params []*riverdriver.JobInsertParams) (int64, error) { + insertJobsParams := make([]dbsqlc.JobInsertManyParams, len(params)) + now := time.Now() + + for i := 0; i < len(params); i++ { + params := params[i] + + metadata := params.Metadata + if metadata == nil { + metadata = []byte("{}") + } + + scheduledAt := now + if params.ScheduledAt != nil { + scheduledAt = *params.ScheduledAt + } + + tags := params.Tags + if tags == nil { + tags = []string{} + } + + insertJobsParams[i] = dbsqlc.JobInsertManyParams{ + Attempt: int16(params.Attempt), + AttemptedAt: params.AttemptedAt, + Args: params.EncodedArgs, + Errors: params.Errors, + FinalizedAt: params.FinalizedAt, + Kind: params.Kind, + MaxAttempts: int16(min(params.MaxAttempts, math.MaxInt16)), + Metadata: metadata, + Priority: int16(min(params.Priority, math.MaxInt16)), + Queue: params.Queue, + ScheduledAt: scheduledAt, + State: dbsqlc.JobState(params.State), + Tags: tags, + } + } + + numInserted, err := e.queries.JobInsertMany(ctx, e.dbtx, insertJobsParams) + if err != nil { + return 0, fmt.Errorf("error inserting many jobs: %w", err) + } + + return numInserted, nil +} + +func (e *Executor) JobList(ctx context.Context, sql string, namedArgs map[string]any) ([]*rivertype.JobRow, error) { + rows, err := e.dbtx.Query(ctx, sql, pgx.NamedArgs(namedArgs)) + if err != nil { + return nil, err + } + defer rows.Close() + + var items []*dbsqlc.RiverJob + for rows.Next() { + var i dbsqlc.RiverJob + if err := rows.Scan( + &i.ID, + &i.Args, + &i.Attempt, + &i.AttemptedAt, + &i.AttemptedBy, + &i.CreatedAt, + &i.Errors, + &i.FinalizedAt, + &i.Kind, + &i.MaxAttempts, + &i.Metadata, + &i.Priority, + &i.Queue, + &i.State, + &i.ScheduledAt, + &i.Tags, + ); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Err(); err != nil { + return nil, interpretError(err) + } + + return riverdriverutil.Map(items, jobRowFromInternal), nil +} + +func (e *Executor) JobListFields() string { + return "id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags" +} + +func (e *Executor) JobRetry(ctx context.Context, id int64) (*rivertype.JobRow, error) { + job, err := e.queries.JobRetry(ctx, e.dbtx, id) + if err != nil { + return nil, interpretError(err) + } + return jobRowFromInternal(job), nil +} + +func (e *Executor) JobRescueMany(ctx context.Context, params *riverdriver.JobRescueManyParams) (*struct{}, error) { + err := e.queries.JobRescueMany(ctx, e.dbtx, dbsqlc.JobRescueManyParams(*params)) + return &struct{}{}, interpretError(err) +} + +func (e *Executor) JobSchedule(ctx context.Context, params *riverdriver.JobScheduleParams) (int, error) { + numScheduled, err := e.queries.JobSchedule(ctx, e.dbtx, dbsqlc.JobScheduleParams{ + InsertTopic: params.InsertTopic, + Max: int64(params.Max), + Now: params.Now, + }) + return int(numScheduled), interpretError(err) +} + +func (e *Executor) JobSetStateIfRunning(ctx context.Context, params *riverdriver.JobSetStateIfRunningParams) (*rivertype.JobRow, error) { + var maxAttempts int16 + if params.MaxAttempts != nil { + maxAttempts = int16(*params.MaxAttempts) + } + + job, err := e.queries.JobSetStateIfRunning(ctx, e.dbtx, dbsqlc.JobSetStateIfRunningParams{ + ID: params.ID, + ErrorDoUpdate: params.ErrData != nil, + Error: params.ErrData, + FinalizedAtDoUpdate: params.FinalizedAt != nil, + FinalizedAt: params.FinalizedAt, + MaxAttemptsUpdate: params.MaxAttempts != nil, + MaxAttempts: maxAttempts, + ScheduledAtDoUpdate: params.ScheduledAt != nil, + ScheduledAt: params.ScheduledAt, + State: dbsqlc.JobState(params.State), + }) + if err != nil { + return nil, interpretError(err) + } + return jobRowFromInternal(job), nil +} + +func (e *Executor) JobUpdate(ctx context.Context, params *riverdriver.JobUpdateParams) (*rivertype.JobRow, error) { + job, err := e.queries.JobUpdate(ctx, e.dbtx, dbsqlc.JobUpdateParams{ + ID: params.ID, + AttemptedAtDoUpdate: params.AttemptedAtDoUpdate, + AttemptedAt: params.AttemptedAt, + AttemptDoUpdate: params.AttemptDoUpdate, + Attempt: int16(params.Attempt), + ErrorsDoUpdate: params.ErrorsDoUpdate, + Errors: params.Errors, + FinalizedAtDoUpdate: params.FinalizedAtDoUpdate, + FinalizedAt: params.FinalizedAt, + StateDoUpdate: params.StateDoUpdate, + State: dbsqlc.JobState(params.State), + }) + if err != nil { + return nil, interpretError(err) + } + + return jobRowFromInternal(job), nil +} + +func (e *Executor) LeaderAttemptElect(ctx context.Context, params *riverdriver.LeaderElectParams) (bool, error) { + numElectionsWon, err := e.queries.LeaderAttemptElect(ctx, e.dbtx, dbsqlc.LeaderAttemptElectParams{ + Name: params.Name, + LeaderID: params.LeaderID, + TTL: params.TTL, + }) + if err != nil { + return false, interpretError(err) + } + return numElectionsWon > 0, nil +} + +func (e *Executor) LeaderAttemptReelect(ctx context.Context, params *riverdriver.LeaderElectParams) (bool, error) { + numElectionsWon, err := e.queries.LeaderAttemptReelect(ctx, e.dbtx, dbsqlc.LeaderAttemptReelectParams{ + Name: params.Name, + LeaderID: params.LeaderID, + TTL: params.TTL, + }) + if err != nil { + return false, interpretError(err) + } + return numElectionsWon > 0, nil +} + +func (e *Executor) LeaderDeleteExpired(ctx context.Context, name string) (int, error) { + numDeleted, err := e.queries.LeaderDeleteExpired(ctx, e.dbtx, name) + if err != nil { + return 0, interpretError(err) + } + return int(numDeleted), nil +} + +func (e *Executor) LeaderGetElectedLeader(ctx context.Context, name string) (*riverdriver.Leader, error) { + leader, err := e.queries.LeaderGetElectedLeader(ctx, e.dbtx, name) + if err != nil { + return nil, interpretError(err) + } + return leaderFromInternal(leader), nil +} + +func (e *Executor) LeaderInsert(ctx context.Context, params *riverdriver.LeaderInsertParams) (*riverdriver.Leader, error) { + leader, err := e.queries.LeaderInsert(ctx, e.dbtx, dbsqlc.LeaderInsertParams{ + ElectedAt: params.ElectedAt, + ExpiresAt: params.ExpiresAt, + LeaderID: params.LeaderID, + Name: params.Name, + TTL: params.TTL, + }) + if err != nil { + return nil, interpretError(err) + } + return leaderFromInternal(leader), nil +} + +func (e *Executor) LeaderResign(ctx context.Context, params *riverdriver.LeaderResignParams) (bool, error) { + numResigned, err := e.queries.LeaderResign(ctx, e.dbtx, dbsqlc.LeaderResignParams{ + LeaderID: params.LeaderID, + LeadershipTopic: params.LeadershipTopic, + Name: params.Name, + }) + if err != nil { + return false, interpretError(err) + } + return numResigned > 0, nil +} + func (e *Executor) MigrationDeleteByVersionMany(ctx context.Context, versions []int) ([]*riverdriver.Migration, error) { migrations, err := e.queries.RiverMigrationDeleteByVersionMany(ctx, e.dbtx, - mapSlice(versions, func(v int) int64 { return int64(v) })) - return mapMigrations(migrations), interpretError(err) + riverdriverutil.Map(versions, func(v int) int64 { return int64(v) })) + if err != nil { + return nil, interpretError(err) + } + return riverdriverutil.Map(migrations, migrationFromInternal), nil } func (e *Executor) MigrationGetAll(ctx context.Context) ([]*riverdriver.Migration, error) { migrations, err := e.queries.RiverMigrationGetAll(ctx, e.dbtx) - return mapMigrations(migrations), interpretError(err) + if err != nil { + return nil, interpretError(err) + } + return riverdriverutil.Map(migrations, migrationFromInternal), nil } func (e *Executor) MigrationInsertMany(ctx context.Context, versions []int) ([]*riverdriver.Migration, error) { migrations, err := e.queries.RiverMigrationInsertMany(ctx, e.dbtx, - mapSlice(versions, func(v int) int64 { return int64(v) })) - return mapMigrations(migrations), interpretError(err) + riverdriverutil.Map(versions, func(v int) int64 { return int64(v) })) + if err != nil { + return nil, interpretError(err) + } + return riverdriverutil.Map(migrations, migrationFromInternal), nil +} + +func (e *Executor) Notify(ctx context.Context, topic string, payload string) error { + return e.queries.PGNotify(ctx, e.dbtx, dbsqlc.PGNotifyParams{ + Payload: payload, + Topic: topic, + }) +} + +func (e *Executor) PGAdvisoryXactLock(ctx context.Context, key int64) (*struct{}, error) { + err := e.queries.PGAdvisoryXactLock(ctx, e.dbtx, key) + return &struct{}{}, interpretError(err) } func (e *Executor) TableExists(ctx context.Context, tableName string) (bool, error) { @@ -99,34 +447,133 @@ func (t *ExecutorTx) Rollback(ctx context.Context) error { return t.tx.Rollback(ctx) } -func interpretError(err error) error { - if errors.Is(err, pgx.ErrNoRows) { - return riverdriver.ErrNoRows +type Listener struct { + conn *pgxpool.Conn + dbPool *pgxpool.Pool + mu sync.RWMutex +} + +func (l *Listener) Close(ctx context.Context) error { + l.mu.Lock() + defer l.mu.Unlock() + + if l.conn == nil { + return nil + } + + if err := l.conn.Conn().Close(ctx); err != nil { + return err + } + l.conn.Release() + l.conn = nil + return nil +} + +func (l *Listener) Connect(ctx context.Context) error { + l.mu.Lock() + defer l.mu.Unlock() + + if l.conn != nil { + return errors.New("connection already established") + } + + conn, err := l.dbPool.Acquire(ctx) + if err != nil { + return err } + + l.conn = conn + return nil +} + +func (l *Listener) Listen(ctx context.Context, topic string) error { + l.mu.RLock() + defer l.mu.RUnlock() + + _, err := l.conn.Exec(ctx, "LISTEN "+topic) return err } -func mapMigrations(migrations []*dbsqlc.RiverMigration) []*riverdriver.Migration { - if migrations == nil { - return nil +func (l *Listener) Ping(ctx context.Context) error { + l.mu.RLock() + defer l.mu.RUnlock() + + return l.conn.Ping(ctx) +} + +func (l *Listener) Unlisten(ctx context.Context, topic string) error { + l.mu.RLock() + defer l.mu.RUnlock() + + _, err := l.conn.Exec(ctx, "UNLISTEN "+topic) + return err +} + +func (l *Listener) WaitForNotification(ctx context.Context) (*riverdriver.Notification, error) { + l.mu.RLock() + defer l.mu.RUnlock() + + notification, err := l.conn.Conn().WaitForNotification(ctx) + if err != nil { + return nil, err } - return mapSlice(migrations, func(m *dbsqlc.RiverMigration) *riverdriver.Migration { - return &riverdriver.Migration{ - ID: int(m.ID), - CreatedAt: m.CreatedAt, - Version: int(m.Version), - } - }) + return &riverdriver.Notification{ + Topic: notification.Channel, + Payload: notification.Payload, + }, nil } -// mapSlice manipulates a slice and transforms it to a slice of another type. -func mapSlice[T any, R any](collection []T, mapFunc func(T) R) []R { - result := make([]R, len(collection)) +func attemptErrorFromInternal(e *dbsqlc.AttemptError) rivertype.AttemptError { + return rivertype.AttemptError{ + At: e.At, + Attempt: int(e.Attempt), + Error: e.Error, + Trace: e.Trace, + } +} - for i, item := range collection { - result[i] = mapFunc(item) +func interpretError(err error) error { + if errors.Is(err, pgx.ErrNoRows) { + return rivertype.ErrNotFound + } + return err +} + +func jobRowFromInternal(internal *dbsqlc.RiverJob) *rivertype.JobRow { + return &rivertype.JobRow{ + ID: internal.ID, + Attempt: max(int(internal.Attempt), 0), + AttemptedAt: internal.AttemptedAt, + AttemptedBy: internal.AttemptedBy, + CreatedAt: internal.CreatedAt, + EncodedArgs: internal.Args, + Errors: riverdriverutil.Map(internal.Errors, func(e dbsqlc.AttemptError) rivertype.AttemptError { return attemptErrorFromInternal(&e) }), + FinalizedAt: internal.FinalizedAt, + Kind: internal.Kind, + MaxAttempts: max(int(internal.MaxAttempts), 0), + Metadata: internal.Metadata, + Priority: max(int(internal.Priority), 0), + Queue: internal.Queue, + ScheduledAt: internal.ScheduledAt.UTC(), + State: rivertype.JobState(internal.State), + Tags: internal.Tags, } +} - return result +func leaderFromInternal(internal *dbsqlc.RiverLeader) *riverdriver.Leader { + return &riverdriver.Leader{ + ElectedAt: internal.ElectedAt, + ExpiresAt: internal.ExpiresAt, + LeaderID: internal.LeaderID, + Name: internal.Name, + } +} + +func migrationFromInternal(internal *dbsqlc.RiverMigration) *riverdriver.Migration { + return &riverdriver.Migration{ + ID: int(internal.ID), + CreatedAt: internal.CreatedAt, + Version: int(internal.Version), + } } diff --git a/riverdriver/riverpgxv5/river_pgx_v5_driver_test.go b/riverdriver/riverpgxv5/river_pgx_v5_driver_test.go index 229b735c..7c2f799a 100644 --- a/riverdriver/riverpgxv5/river_pgx_v5_driver_test.go +++ b/riverdriver/riverpgxv5/river_pgx_v5_driver_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/riverqueue/river/riverdriver" + "github.com/riverqueue/river/rivertype" ) // Verify interface compliance. @@ -37,6 +38,6 @@ func TestInterpretError(t *testing.T) { t.Parallel() require.EqualError(t, interpretError(errors.New("an error")), "an error") - require.ErrorIs(t, interpretError(pgx.ErrNoRows), riverdriver.ErrNoRows) + require.ErrorIs(t, interpretError(pgx.ErrNoRows), rivertype.ErrNotFound) require.NoError(t, interpretError(nil)) } diff --git a/rivermigrate/river_migrate.go b/rivermigrate/river_migrate.go index 6e9581d3..6d3a8ae6 100644 --- a/rivermigrate/river_migrate.go +++ b/rivermigrate/river_migrate.go @@ -17,7 +17,6 @@ import ( "time" "github.com/riverqueue/river/internal/baseservice" - "github.com/riverqueue/river/internal/dbsqlc" "github.com/riverqueue/river/internal/util/dbutil" "github.com/riverqueue/river/internal/util/maputil" "github.com/riverqueue/river/internal/util/sliceutil" @@ -56,7 +55,6 @@ type Migrator[TTx any] struct { driver riverdriver.Driver[TTx] migrations map[int]*migrationBundle // allows us to inject test migrations - queries *dbsqlc.Queries } // New returns a new migrator with the given database driver and configuration. @@ -102,7 +100,6 @@ func New[TTx any](driver riverdriver.Driver[TTx], config *Config) *Migrator[TTx] return baseservice.Init(archetype, &Migrator[TTx]{ driver: driver, migrations: riverMigrationsMap, - queries: dbsqlc.New(), }) } @@ -172,7 +169,7 @@ const ( // // handle error // } func (m *Migrator[TTx]) Migrate(ctx context.Context, direction Direction, opts *MigrateOpts) (*MigrateResult, error) { - return dbutil.WithExecutorTxV(ctx, m.driver.GetExecutor(), func(ctx context.Context, tx riverdriver.ExecutorTx) (*MigrateResult, error) { + return dbutil.WithTxV(ctx, m.driver.GetExecutor(), func(ctx context.Context, tx riverdriver.ExecutorTx) (*MigrateResult, error) { switch direction { case DirectionDown: return m.migrateDown(ctx, tx, direction, opts) @@ -227,7 +224,7 @@ type ValidateResult struct { // validation and usable message in case there are migrations that haven't yet // been applied. func (m *Migrator[TTx]) Validate(ctx context.Context) (*ValidateResult, error) { - return dbutil.WithExecutorTxV(ctx, m.driver.GetExecutor(), func(ctx context.Context, tx riverdriver.ExecutorTx) (*ValidateResult, error) { + return dbutil.WithTxV(ctx, m.driver.GetExecutor(), func(ctx context.Context, tx riverdriver.ExecutorTx) (*ValidateResult, error) { return m.validate(ctx, tx) }) } diff --git a/rivermigrate/river_migrate_test.go b/rivermigrate/river_migrate_test.go index ac22b8e9..ef99434f 100644 --- a/rivermigrate/river_migrate_test.go +++ b/rivermigrate/river_migrate_test.go @@ -120,7 +120,7 @@ func TestMigrator(t *testing.T) { require.Equal(t, DirectionDown, res.Direction) require.Equal(t, []int{3}, sliceutil.Map(res.Versions, migrateVersionToInt)) - err = dbExecError(ctx, bundle.tx, "SELECT * FROM river_job") + err = dbExecError(ctx, bundle.driver.UnwrapExecutor(bundle.tx), "SELECT * FROM river_job") require.NoError(t, err) } @@ -131,7 +131,7 @@ func TestMigrator(t *testing.T) { require.Equal(t, DirectionDown, res.Direction) require.Equal(t, []int{2}, sliceutil.Map(res.Versions, migrateVersionToInt)) - err = dbExecError(ctx, bundle.tx, "SELECT * FROM river_job") + err = dbExecError(ctx, bundle.driver.UnwrapExecutor(bundle.tx), "SELECT * FROM river_job") require.Error(t, err) } }) @@ -167,7 +167,7 @@ func TestMigrator(t *testing.T) { require.Equal(t, seqOneTo(riverMigrationsWithTestVersionsMaxVersion-2), sliceutil.Map(migrations, migrationToInt)) - err = dbExecError(ctx, bundle.tx, "SELECT name FROM test_table") + err = dbExecError(ctx, bundle.driver.UnwrapExecutor(bundle.tx), "SELECT name FROM test_table") require.Error(t, err) }) @@ -223,7 +223,7 @@ func TestMigrator(t *testing.T) { require.Equal(t, seqOneTo(3), sliceutil.Map(migrations, migrationToInt)) - err = dbExecError(ctx, bundle.tx, "SELECT name FROM test_table") + err = dbExecError(ctx, bundle.driver.UnwrapExecutor(bundle.tx), "SELECT name FROM test_table") require.Error(t, err) }) @@ -240,7 +240,7 @@ func TestMigrator(t *testing.T) { require.Equal(t, seqToOne(5), sliceutil.Map(res.Versions, migrateVersionToInt)) - err = dbExecError(ctx, bundle.tx, "SELECT name FROM river_migrate") + err = dbExecError(ctx, bundle.driver.UnwrapExecutor(bundle.tx), "SELECT name FROM river_migrate") require.Error(t, err) }) @@ -327,7 +327,7 @@ func TestMigrator(t *testing.T) { sliceutil.Map(migrations, migrationToInt)) // Column `name` is only added in the second test version. - err = dbExecError(ctx, bundle.tx, "SELECT name FROM test_table") + err = dbExecError(ctx, bundle.driver.UnwrapExecutor(bundle.tx), "SELECT name FROM test_table") require.Error(t, err) var pgErr *pgconn.PgError @@ -432,9 +432,9 @@ func TestMigrator(t *testing.T) { // A command returning an error aborts the transaction. This is a shortcut to // execute a command in a subtransaction so that we can verify an error, but // continue to use the original transaction. -func dbExecError(ctx context.Context, executor dbutil.Executor, sql string) error { - return dbutil.WithTx(ctx, executor, func(ctx context.Context, tx pgx.Tx) error { - _, err := tx.Exec(ctx, sql) +func dbExecError(ctx context.Context, exec riverdriver.Executor, sql string) error { + return dbutil.WithTx(ctx, exec, func(ctx context.Context, exec riverdriver.ExecutorTx) error { + _, err := exec.Exec(ctx, sql) return err }) } diff --git a/rivertest/rivertest.go b/rivertest/rivertest.go index 31b797b3..1281b27e 100644 --- a/rivertest/rivertest.go +++ b/rivertest/rivertest.go @@ -10,26 +10,12 @@ import ( "testing" "time" - "github.com/jackc/pgx/v5" - "github.com/jackc/pgx/v5/pgconn" - "github.com/riverqueue/river" - "github.com/riverqueue/river/internal/dbsqlc" "github.com/riverqueue/river/internal/util/sliceutil" "github.com/riverqueue/river/riverdriver" "github.com/riverqueue/river/rivertype" ) -// dbtx is a database-like executor which is implemented by all of pgxpool.Pool, -// pgx.Conn, and pgx.Tx. It's used to let package functions share code with a -// common implementation that takes one of these. -type dbtx interface { - CopyFrom(ctx context.Context, tableName pgx.Identifier, columnNames []string, rowSrc pgx.CopyFromSource) (int64, error) - Exec(ctx context.Context, query string, args ...interface{}) (pgconn.CommandTag, error) - Query(ctx context.Context, query string, args ...interface{}) (pgx.Rows, error) - QueryRow(ctx context.Context, query string, args ...interface{}) pgx.Row -} - // testingT is an interface wrapper around *testing.T that's implemented by all // of *testing.T, *testing.F, and *testing.B. // @@ -104,7 +90,7 @@ func RequireInserted[TDriver riverdriver.Driver[TTx], TTx any, TArgs river.JobAr func requireInserted[TDriver riverdriver.Driver[TTx], TTx any, TArgs river.JobArgs](ctx context.Context, t testingT, driver TDriver, expectedJob TArgs, opts *RequireInsertedOpts) *river.Job[TArgs] { t.Helper() - actualArgs, err := requireInsertedErr[TDriver](ctx, t, driver.GetDBPool(), expectedJob, opts) + actualArgs, err := requireInsertedErr[TDriver](ctx, t, driver.GetExecutor(), expectedJob, opts) if err != nil { failure(t, "Internal failure: %s", err) } @@ -137,34 +123,33 @@ func RequireInsertedTx[TDriver riverdriver.Driver[TTx], TTx any, TArgs river.Job func requireInsertedTx[TDriver riverdriver.Driver[TTx], TTx any, TArgs river.JobArgs](ctx context.Context, t testingT, tx TTx, expectedJob TArgs, opts *RequireInsertedOpts) *river.Job[TArgs] { t.Helper() var driver TDriver - actualArgs, err := requireInsertedErr[TDriver](ctx, t, driver.UnwrapTx(tx), expectedJob, opts) + actualArgs, err := requireInsertedErr[TDriver](ctx, t, driver.UnwrapExecutor(tx), expectedJob, opts) if err != nil { failure(t, "Internal failure: %s", err) } return actualArgs } -func requireInsertedErr[TDriver riverdriver.Driver[TTx], TTx any, TArgs river.JobArgs](ctx context.Context, t testingT, db dbtx, expectedJob TArgs, opts *RequireInsertedOpts) (*river.Job[TArgs], error) { +func requireInsertedErr[TDriver riverdriver.Driver[TTx], TTx any, TArgs river.JobArgs](ctx context.Context, t testingT, exec riverdriver.Executor, expectedJob TArgs, opts *RequireInsertedOpts) (*river.Job[TArgs], error) { t.Helper() - queries := dbsqlc.New() // Returned ordered by ID. - dbJobs, err := queries.JobGetByKind(ctx, db, expectedJob.Kind()) + jobRows, err := exec.JobGetByKindMany(ctx, []string{expectedJob.Kind()}) if err != nil { return nil, fmt.Errorf("error querying jobs: %w", err) } - if len(dbJobs) < 1 { + if len(jobRows) < 1 { failure(t, "No jobs found with kind: %s", expectedJob.Kind()) return nil, nil //nolint:nilnil } - if len(dbJobs) > 1 { + if len(jobRows) > 1 { failure(t, "More than one job found with kind: %s (you might want RequireManyInserted instead)", expectedJob.Kind()) return nil, nil //nolint:nilnil } - jobRow := dbsqlc.JobRowFromInternal(dbJobs[0]) + jobRow := jobRows[0] var actualArgs TArgs if err := json.Unmarshal(jobRow.EncodedArgs, &actualArgs); err != nil { @@ -218,7 +203,7 @@ func RequireManyInserted[TDriver riverdriver.Driver[TTx], TTx any](ctx context.C func requireManyInserted[TDriver riverdriver.Driver[TTx], TTx any](ctx context.Context, t testingT, driver TDriver, expectedJobs []ExpectedJob) []*rivertype.JobRow { t.Helper() - actualArgs, err := requireManyInsertedErr[TDriver](ctx, t, driver.GetDBPool(), expectedJobs) + actualArgs, err := requireManyInsertedErr[TDriver](ctx, t, driver.GetExecutor(), expectedJobs) if err != nil { failure(t, "Internal failure: %s", err) } @@ -255,26 +240,25 @@ func RequireManyInsertedTx[TDriver riverdriver.Driver[TTx], TTx any](ctx context func requireManyInsertedTx[TDriver riverdriver.Driver[TTx], TTx any](ctx context.Context, t testingT, tx TTx, expectedJobs []ExpectedJob) []*rivertype.JobRow { t.Helper() var driver TDriver - actualArgs, err := requireManyInsertedErr[TDriver](ctx, t, driver.UnwrapTx(tx), expectedJobs) + actualArgs, err := requireManyInsertedErr[TDriver](ctx, t, driver.UnwrapExecutor(tx), expectedJobs) if err != nil { failure(t, "Internal failure: %s", err) } return actualArgs } -func requireManyInsertedErr[TDriver riverdriver.Driver[TTx], TTx any](ctx context.Context, t testingT, db dbtx, expectedJobs []ExpectedJob) ([]*rivertype.JobRow, error) { +func requireManyInsertedErr[TDriver riverdriver.Driver[TTx], TTx any](ctx context.Context, t testingT, exec riverdriver.Executor, expectedJobs []ExpectedJob) ([]*rivertype.JobRow, error) { t.Helper() - queries := dbsqlc.New() expectedArgsKinds := sliceutil.Map(expectedJobs, func(j ExpectedJob) string { return j.Args.Kind() }) // Returned ordered by ID. - dbJobs, err := queries.JobGetByKindMany(ctx, db, expectedArgsKinds) + jobRows, err := exec.JobGetByKindMany(ctx, expectedArgsKinds) if err != nil { return nil, fmt.Errorf("error querying jobs: %w", err) } - actualArgsKinds := sliceutil.Map(dbJobs, func(j *dbsqlc.RiverJob) string { return j.Kind }) + actualArgsKinds := sliceutil.Map(jobRows, func(j *rivertype.JobRow) string { return j.Kind }) if !slices.Equal(expectedArgsKinds, actualArgsKinds) { failure(t, "Inserted jobs didn't match expectation; expected: %+v, actual: %+v", @@ -282,8 +266,6 @@ func requireManyInsertedErr[TDriver riverdriver.Driver[TTx], TTx any](ctx contex return nil, nil } - jobRows := sliceutil.Map(dbJobs, dbsqlc.JobRowFromInternal) - for i, jobRow := range jobRows { if expectedJobs[i].Opts != nil { if !compareJobToInsertOpts(t, jobRow, *expectedJobs[i].Opts, i) { diff --git a/rivertype/go.mod b/rivertype/go.mod new file mode 100644 index 00000000..d4db74f5 --- /dev/null +++ b/rivertype/go.mod @@ -0,0 +1,3 @@ +module github.com/riverqueue/river/rivertype + +go 1.21.4 diff --git a/rivertype/job_row.go b/rivertype/job_row.go index c4f49fb7..1efb9406 100644 --- a/rivertype/job_row.go +++ b/rivertype/job_row.go @@ -4,9 +4,15 @@ package rivertype import ( + "errors" "time" ) +// ErrNotFound is returned when a query by ID does not match any existing +// rows. For example, attempting to cancel a job that doesn't exist will +// return this error. +var ErrNotFound = errors.New("not found") + // JobRow contains the properties of a job that are persisted to the database. // Use of `Job[T]` will generally be preferred in user-facing code like worker // interfaces. diff --git a/worker_test.go b/worker_test.go index bb08f8e7..4d1bea68 100644 --- a/worker_test.go +++ b/worker_test.go @@ -86,7 +86,9 @@ func TestWorkFunc(t *testing.T) { setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { t.Helper() - client := newTestClient(ctx, t, newTestConfig(t, nil)) + dbPool := riverinternaltest.TestDB(ctx, t) + + client := newTestClient(t, dbPool, newTestConfig(t, nil)) startClient(ctx, t, client) return client, &testBundle{}