Skip to content

Better usage example #7

@pbedat

Description

@pbedat

Continuing our discussion from ThreeDotsLabs/watermill-sql#31

Aren't you tired to be forced to write a different code for sqlite3 because you can't use it with multiple goroutines?

Can you provide an example, where concurrent (multiple goroutines) sqlite3 code is truly problematic?

I modified your example, to only use the stdlib and go-sqlite3, but I cannot spot any concurrency problems:

package main

import (
	"context"
	"database/sql"
	"errors"
	"fmt"
	"sync"
	"time"

	"math/rand/v2"

	_ "github.com/mattn/go-sqlite3"
)

func main() {

	//db, err := sql.Open("sqlite3", "file:test.db?_journal=wal&_sync=normal")
	db, err := sql.Open("sqlite3", ":memory:")
	db.SetMaxOpenConns(1)
	if err != nil {
		panic(err)
	}

	if _, err := db.Exec("CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY, name TEXT)"); err != nil {
		panic(err)
	}

	var wg sync.WaitGroup

	wg.Add(10000)
	go func() {
		for range 10000 {
			defer wg.Done()
			_, err := db.Exec("INSERT INTO users (name) VALUES (?)", fmt.Sprintf("user%d", 1))
			if err != nil {
				panic(err)
			}
			// simulate random insert with a random sleep
			time.Sleep(time.Duration(rand.IntN(5)) * time.Millisecond)
		}
	}()

	// Let's measure how many records per second
	metrics := []int{}
	ticker := time.NewTicker(1 * time.Second)
	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
	defer cancel()

	go func() {
		wg.Wait()

		ticker.Stop()
		cancel()
	}()

	for {
		select {
		case <-ctx.Done():
			goto RESULT
		case <-ticker.C:

			row := db.QueryRowContext(ctx, "SELECT COUNT(*) FROM users;")
			if errors.Is(row.Err(), context.DeadlineExceeded) {
				goto RESULT
			}
			if row.Err() != nil {
				panic(err)
			}

			var result int
			err = row.Scan(&result)
			if err != nil {
				panic(err)
			}

			metrics = append(metrics, result)
		}
	}
RESULT:
	total := 0
	for _, value := range metrics {
		total += value
	}
	average := float64(total) / float64(len(metrics))
	fmt.Printf("Average: %.2f\n", average)
}

When I modify your example to run the inserts truly concurrent, the throughput increases by factor 4x:

	wg.Add(10000)
	for range 10000 {
		go func() {
			defer wg.Done()
			_, err := db.Exec("INSERT INTO users (name) VALUES (?)", fmt.Sprintf("user%d", 1))
			if err != nil {
				panic(err)
			}
		}()
	}

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions