Skip to content

summary of possible goroutine leaks #20663

@Chain-Fox

Description

@Chain-Fox

System information

Geth version: 1.9.10
OS & Version: Windows/Linux/OSX
Commit hash : 8045504

Expected behaviour

Possible goroutine leaks in test.

  1. p2p/server_test.go: TestServerDial(): accepted
    accepted := make(chan net.Conn)
    go func() {
    conn, err := listener.Accept()
    if err != nil {
    t.Error("accept error:", err)
    return
    }
    accepted <- conn
    }()

    Goroutine may leak because sending to accept is blocked on L141 when select on L154 selects timeout. Although t.Error() is often called in these certain cases, it won't stop the leaking, because "Calling FailNow does not stop those other goroutines.". The fix is to replace the unbuffered channel with a buffered channel (buffer size 1).
  2. p2p/server_test.go: TestServerInboundThrottle: connClosed
    connClosed := make(chan struct{})
    conn, err = net.DialTimeout("tcp", srv.ListenAddr, timeout)
    if err != nil {
    t.Fatalf("could not dial: %v", err)
    }
    defer conn.Close()
    go func() {
    conn.SetDeadline(time.Now().Add(timeout))
    buf := make([]byte, 10)
    if n, err := conn.Read(buf); err != io.EOF || n != 0 {
    t.Errorf("expected io.EOF and n == 0, got error %q and n == %d", err, n)
    }
    connClosed <- struct{}{}

    Similar to 1.
  3. rpc/client_test.go: TestClientSubscribeClose: errc
    errc = make(chan error)
    sub *ClientSubscription
    err error
    )
    go func() {
    sub, err = client.Subscribe(context.Background(), "nftest2", nc, "hangSubscription", 999)
    errc <- err

    Similar to 1.
  4. rpc/subscription_test.go: TestSubscriptions: successes and notifications
    successes = make(chan subConfirmation)
    notifications = make(chan subscriptionResult)
    errors = make(chan error, subCount*notificationCount+1)
    )
    // setup and start server
    for _, namespace := range namespaces {
    if err := server.RegisterName(namespace, service); err != nil {
    t.Fatalf("unable to register test service %v", err)
    }
    }
    go server.ServeCodec(NewCodec(serverConn), 0)
    defer server.Stop()
    // wait for message and write them to the given channels
    go waitForMessages(in, successes, notifications, errors)
  5. rpc/client_test.go: TestClientHTTP: errc
    errc = make(chan error)
    wantResult = echoResult{"a", 1, new(echoArgs)}
    )
    defer client.Close()
    for i := range results {
    i := i
    go func() {
    errc <- client.Call(&results[i], "test_echo",
  6. event/event_test.go: BenchmarkChanSend: closed
    c := make(chan interface{})
    closed := make(chan struct{})
    go func() {
    for range c {
  7. eth/handler_test.go: TestBroadcastMalformedBlock: notify
    notify := make(chan struct{})
    go func() {
    if _, err := sink.app.ReadMsg(); err == nil {
    notify <- struct{}{}
    }
    }()
    // Try to broadcast all malformations and ensure they all get discarded
    for _, header := range []*types.Header{malformedUncles, malformedTransactions, malformedEverything} {
    block := types.NewBlockWithHeader(header).WithBody(chain[0].Transactions(), chain[0].Uncles())
    if err := p2p.Send(source.app, NewBlockMsg, []interface{}{block, big.NewInt(131136)}); err != nil {
    t.Fatalf("failed to broadcast block: %v", err)
    }
    select {
    case <-notify:
  8. miner/worker_test.go: testGenerateBlockAndImport: loopErr
    loopErr := make(chan error)
    newBlock := make(chan struct{})
    listenNewBlock := func() {
    sub := w.mux.Subscribe(core.NewMinedBlockEvent{})
    defer sub.Unsubscribe()
    for item := range sub.Chan() {
    block := item.Data.(core.NewMinedBlockEvent).Block
    _, err := chain.InsertChain([]*types.Block{block})
    if err != nil {
    loopErr <- fmt.Errorf("failed to insert new mined block:%d, error:%v", block.NumberU64(), err)
  9. console/console.go: Interactive: scheduler
    scheduler = make(chan string) // Channel to send the next prompt on and receive the input
    )
    // Start a goroutine to listen for prompt requests and send back inputs
    go func() {
    for {
    // Read the next user input
    line, err := c.prompter.PromptInput(<-scheduler)
    if err != nil {
    // In case of an error, either clear the prompt or fail
    if err == liner.ErrPromptAborted { // ctrl-C
    prompt, indents, input = c.prompt, 0, ""
    scheduler <- ""
    continue
    }
    close(scheduler)
    return
    }
    // User input retrieved, send for interpretation and loop
    scheduler <- line
  10. common/mclock/simclock_test.go: TestSimulatedSleep: done
    done = make(chan AbsTime)
    )
    go func() {
    c.Sleep(timeout)
    done <- c.Now()
  11. common/prque/lazyqueue_test.go: TestLazyQueue: stopCh
    stopCh := make(chan chan struct{})
    go func() {
    for {
    select {
    case <-clock.After(testQueueRefresh):
    lock.Lock()
    q.Refresh()
    lock.Unlock()
    case stop := <-stopCh:

    I have not verified all of them. I will try to fix similar bugs in one patch.

Actual behaviour

No, I found it through static analysis.

Steps to reproduce the behaviour

No.

Backtrace

No.

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions