Skip to content

Commit 91f5b0b

Browse files
📝 Add docstrings to feat/bucket-deletion
Docstrings generation was requested by @gfyrag. * #1106 (comment) The following files were modified: * `cmd/worker.go` * `internal/api/v2/controllers_buckets_delete.go` * `internal/api/v2/controllers_buckets_restore.go` * `internal/api/v2/controllers_ledgers_list.go` * `internal/api/v2/routes.go` * `internal/controller/system/controller.go` * `internal/storage/system/migrations.go` * `internal/storage/worker_bucket_cleanup.go` * `internal/worker/module.go` * `pkg/testserver/worker.go`
1 parent fcd6974 commit 91f5b0b

File tree

10 files changed

+58
-10
lines changed

10 files changed

+58
-10
lines changed

cmd/worker.go

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,8 @@ type WorkerCommandConfiguration struct {
5858
WorkerGRPCConfig `mapstructure:",squash"`
5959
}
6060

61+
// addWorkerFlags adds command-line flags to cmd to configure worker runtime behavior.
62+
// The flags control async block hashing, pipeline pull/push/sync behavior and pagination, and bucket cleanup retention and schedule.
6163
func addWorkerFlags(cmd *cobra.Command) {
6264
cmd.Flags().Int(WorkerAsyncBlockHasherMaxBlockSizeFlag, 1000, "Max block size")
6365
cmd.Flags().String(WorkerAsyncBlockHasherScheduleFlag, "0 * * * * *", "Schedule")
@@ -69,6 +71,9 @@ func addWorkerFlags(cmd *cobra.Command) {
6971
cmd.Flags().String(WorkerBucketCleanupScheduleFlag, "0 0 * * * *", "Schedule for bucket cleanup (cron format)")
7072
}
7173

74+
// NewWorkerCommand constructs the "worker" Cobra command which initializes and runs the worker service using loaded configuration and composed FX modules.
75+
// The command registers worker-specific flags via addWorkerFlags and common service, bunconnect, and OTLP flags, and exposes the --worker-grpc-address flag (default ":8081").
76+
// When executed it loads configuration and starts the service with the configured modules and a gRPC server.
7277
func NewWorkerCommand() *cobra.Command {
7378
cmd := &cobra.Command{
7479
Use: "worker",
@@ -113,6 +118,8 @@ func NewWorkerCommand() *cobra.Command {
113118
return cmd
114119
}
115120

121+
// newWorkerModule creates an fx.Option that configures the worker module using the provided WorkerConfiguration.
122+
// It maps the configuration into AsyncBlockRunnerConfig, ReplicationConfig, and BucketCleanupRunnerConfig for the worker.
116123
func newWorkerModule(configuration WorkerConfiguration) fx.Option {
117124
return worker.NewFXModule(worker.ModuleConfig{
118125
AsyncBlockRunnerConfig: storage.AsyncBlockRunnerConfig{
@@ -130,4 +137,4 @@ func newWorkerModule(configuration WorkerConfiguration) fx.Option {
130137
Schedule: configuration.BucketCleanupCRONSpec,
131138
},
132139
})
133-
}
140+
}

internal/api/v2/controllers_buckets_delete.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,8 @@ import (
99
"github.com/formancehq/go-libs/v3/api"
1010
)
1111

12+
// deleteBucket constructs an HTTP handler that deletes the bucket specified by the "bucket" URL parameter.
13+
// The handler invokes systemController.DeleteBucket with the request context; if deletion fails it responds with an internal server error, otherwise it responds with 204 No Content.
1214
func deleteBucket(systemController system.Controller) http.HandlerFunc {
1315
return func(w http.ResponseWriter, r *http.Request) {
1416
bucket := chi.URLParam(r, "bucket")
@@ -22,4 +24,3 @@ func deleteBucket(systemController system.Controller) http.HandlerFunc {
2224
api.NoContent(w)
2325
}
2426
}
25-

internal/api/v2/controllers_buckets_restore.go

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,9 @@ import (
99
"github.com/formancehq/go-libs/v3/api"
1010
)
1111

12+
// restoreBucket returns an HTTP handler that restores the bucket identified by the URL parameter "bucket".
13+
// It invokes the provided system.Controller's RestoreBucket with the request context and the extracted bucket name.
14+
// On success it responds with HTTP 204 No Content; on failure it writes an internal server error response.
1215
func restoreBucket(systemController system.Controller) http.HandlerFunc {
1316
return func(w http.ResponseWriter, r *http.Request) {
1417
bucket := chi.URLParam(r, "bucket")
@@ -22,4 +25,3 @@ func restoreBucket(systemController system.Controller) http.HandlerFunc {
2225
api.NoContent(w)
2326
}
2427
}
25-

internal/api/v2/controllers_ledgers_list.go

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,10 @@ import (
1212
"github.com/formancehq/ledger/internal/controller/system"
1313
)
1414

15+
// listLedgers constructs an HTTP handler that lists ledgers with pagination.
16+
// The handler applies the provided pagination configuration (sorted by "id" ascending),
17+
// reads the "includeDeleted" query parameter to include deleted ledgers when set,
18+
// invokes the controller's ListLedgers, and renders the resulting paginated cursor.
1519
func listLedgers(b system.Controller, paginationConfig common.PaginationConfig) http.HandlerFunc {
1620
return func(w http.ResponseWriter, r *http.Request) {
1721

@@ -39,4 +43,4 @@ func listLedgers(b system.Controller, paginationConfig common.PaginationConfig)
3943

4044
api.RenderCursor(w, *ledgers)
4145
}
42-
}
46+
}

internal/api/v2/routes.go

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,14 @@ import (
1818
"github.com/go-chi/chi/v5"
1919
)
2020

21+
// NewRouter creates a chi.Router configured with the v2 HTTP API routes for the ledger service.
22+
// It registers authentication-protected top-level endpoints (including /_info), an "/_" group
23+
// that may expose exporter management and bucket operations, ledger-scoped routes (ledger creation,
24+
// metadata, and nested ledger subroutes such as bulk operations, info, stats, pipelines when
25+
// enabled, logs, accounts, transactions, aggregated balances, and volumes), and applies tracing
26+
// attributes for the selected ledger on ledger-scoped requests.
27+
// The behavior of tracing, bulking, bulk handler factories, pagination, and whether exporter-related
28+
// endpoints are mounted is controlled via RouterOption arguments.
2129
func NewRouter(
2230
systemController systemcontroller.Controller,
2331
authenticator auth.Authenticator,
@@ -180,4 +188,4 @@ var defaultRouterOptions = []RouterOption{
180188
DefaultPageSize: bunpaginate.QueryDefaultPageSize,
181189
MaxPageSize: bunpaginate.MaxPageSize,
182190
}),
183-
}
191+
}

internal/controller/system/controller.go

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -236,6 +236,12 @@ func (ctrl *DefaultController) RestoreBucket(ctx context.Context, bucket string)
236236
})))
237237
}
238238

239+
// NewDefaultController creates a DefaultController configured with the provided
240+
// store, listener, replication backend, and optional functional options.
241+
//
242+
// The controller is initialized with a new StateRegistry and a default Numscript
243+
// parser; any of these defaults (and other fields) can be overridden by passing
244+
// Option values. The returned controller is ready for further initialization or use.
239245
func NewDefaultController(
240246
store Driver,
241247
listener ledgercontroller.Listener,
@@ -296,4 +302,4 @@ func WithEnableFeatures(v bool) Option {
296302
var defaultOptions = []Option{
297303
WithMeterProvider(noopmetrics.MeterProvider{}),
298304
WithTracerProvider(nooptracer.TracerProvider{}),
299-
}
305+
}

internal/storage/system/migrations.go

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,8 @@ import (
1111
"github.com/uptrace/bun"
1212
)
1313

14+
// GetMigrator creates a Migrator configured with the package's system schema migrations for the given database.
15+
// It appends the system schema option to any provided migration options, registers all system migrations, and returns the configured *migrations.Migrator.
1416
func GetMigrator(db bun.IDB, options ...migrations.Option) *migrations.Migrator {
1517

1618
// configuration table has been removed, we keep the model to keep migrations consistent but the table is not used anymore.
@@ -373,4 +375,4 @@ BEGIN
373375
END IF;
374376
END;
375377
$$ LANGUAGE plpgsql;
376-
`
378+
`

internal/storage/worker_bucket_cleanup.go

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,10 @@ func (r *BucketCleanupRunner) processBucket(ctx context.Context, bucket string)
109109
return nil
110110
}
111111

112+
// NewBucketCleanupRunner creates a BucketCleanupRunner configured with the provided logger,
113+
// database handle, and configuration, applying any functional options.
114+
//
115+
// The returned runner is ready to be started; provided options override default behavior.
112116
func NewBucketCleanupRunner(logger logging.Logger, db *bun.DB, cfg BucketCleanupRunnerConfig, opts ...BucketCleanupRunnerOption) *BucketCleanupRunner {
113117
ret := &BucketCleanupRunner{
114118
stopChannel: make(chan chan struct{}),
@@ -126,6 +130,7 @@ func NewBucketCleanupRunner(logger logging.Logger, db *bun.DB, cfg BucketCleanup
126130

127131
type BucketCleanupRunnerOption func(*BucketCleanupRunner)
128132

133+
// WithBucketCleanupRunnerTracer returns a BucketCleanupRunnerOption that sets the OpenTelemetry tracer used by the BucketCleanupRunner.
129134
func WithBucketCleanupRunnerTracer(tracer trace.Tracer) BucketCleanupRunnerOption {
130135
return func(r *BucketCleanupRunner) {
131136
r.tracer = tracer
@@ -136,6 +141,10 @@ var defaultBucketCleanupRunnerOptions = []BucketCleanupRunnerOption{
136141
WithBucketCleanupRunnerTracer(noop.Tracer{}),
137142
}
138143

144+
// NewBucketCleanupRunnerModule returns an Fx module that provides a configured BucketCleanupRunner
145+
// and registers lifecycle hooks to start it in the background when the application starts and to stop
146+
// it when the application shuts down. The background goroutine will panic if the runner's Run method
147+
// returns an error.
139148
func NewBucketCleanupRunnerModule(cfg BucketCleanupRunnerConfig) fx.Option {
140149
return fx.Options(
141150
fx.Provide(func(logger logging.Logger, db *bun.DB) (*BucketCleanupRunner, error) {
@@ -156,4 +165,4 @@ func NewBucketCleanupRunnerModule(cfg BucketCleanupRunnerConfig) fx.Option {
156165
})
157166
}),
158167
)
159-
}
168+
}

internal/worker/module.go

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,9 @@ type ModuleConfig struct {
2525
BucketCleanupRunnerConfig storage.BucketCleanupRunnerConfig
2626
}
2727

28+
// NewFXModule constructs an fx.Option that installs the storage async block runner,
29+
// the replication worker, and the bucket cleanup runner modules into an Fx application.
30+
// The provided cfg supplies each submodule's configuration.
2831
func NewFXModule(cfg ModuleConfig) fx.Option {
2932
return fx.Options(
3033
// todo: add auto discovery
@@ -69,4 +72,4 @@ func NewGRPCClientFxModule(
6972
return client, nil
7073
}),
7174
)
72-
}
75+
}

pkg/testserver/worker.go

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,30 +8,36 @@ import (
88
"time"
99
)
1010

11+
// LogsHashBlockMaxSizeInstrumentation returns an instrumentation function that appends the worker async block hasher max block size flag and the provided size to the run configuration's CLI arguments.
12+
// The returned function adds the flag with the size formatted as a decimal string and always returns nil.
1113
func LogsHashBlockMaxSizeInstrumentation(size int) testservice.InstrumentationFunc {
1214
return func(ctx context.Context, runConfiguration *testservice.RunConfiguration) error {
1315
runConfiguration.AppendArgs("--"+cmd.WorkerAsyncBlockHasherMaxBlockSizeFlag, strconv.Itoa(size))
1416
return nil
1517
}
1618
}
1719

20+
// LogsHashBlockCRONSpecInstrumentation returns an instrumentation function that appends the async block hasher CRON schedule flag and the given spec to a run configuration.
21+
// The spec parameter is the CRON schedule expression to be passed as the value for the WorkerAsyncBlockHasherScheduleFlag.
1822
func LogsHashBlockCRONSpecInstrumentation(spec string) testservice.InstrumentationFunc {
1923
return func(ctx context.Context, runConfiguration *testservice.RunConfiguration) error {
2024
runConfiguration.AppendArgs("--"+cmd.WorkerAsyncBlockHasherScheduleFlag, spec)
2125
return nil
2226
}
2327
}
2428

29+
// BucketCleanupRetentionPeriodInstrumentation creates an instrumentation function that appends the bucket cleanup retention period flag and its value to a test run configuration.
2530
func BucketCleanupRetentionPeriodInstrumentation(retentionPeriod time.Duration) testservice.InstrumentationFunc {
2631
return func(ctx context.Context, runConfiguration *testservice.RunConfiguration) error {
2732
runConfiguration.AppendArgs("--"+cmd.WorkerBucketCleanupRetentionPeriodFlag, retentionPeriod.String())
2833
return nil
2934
}
3035
}
3136

37+
// BucketCleanupCRONSpecInstrumentation returns an instrumentation function that appends the bucket cleanup CRON schedule flag and the provided CRON spec to a test run configuration.
3238
func BucketCleanupCRONSpecInstrumentation(spec string) testservice.InstrumentationFunc {
3339
return func(ctx context.Context, runConfiguration *testservice.RunConfiguration) error {
3440
runConfiguration.AppendArgs("--"+cmd.WorkerBucketCleanupScheduleFlag, spec)
3541
return nil
3642
}
37-
}
43+
}

0 commit comments

Comments
 (0)