Skip to content

Memory leak on transport.newBufWriter #6761

Closed
@pablodz

Description

@pablodz

What version of gRPC are you using?

module test

go 1.21

require (
	google.golang.org/grpc v1.59.0
	google.golang.org/protobuf v1.31.0
)

...

compiled with

GO111MODULE=on CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-w -s" -o server

What version of Go are you using (go version)?

go version go1.21.3 linux/amd64

What operating system (Linux, Windows, …) and version?

running inside alpine linux/amd64

What did you do?

proto file

humandetector.proto

syntax = "proto3";

package humandetector;

// Ping-pong request
message PingPongRequest { string message = 1; }

// Ping-pong response
message PingPongResponse { string message = 1; }

// Human Detector service definition
service HumanDetectorService {
  rpc PingPong(PingPongRequest) returns (PingPongResponse);
}

I have a golang grpc server like below

server.go

package server

import (
	"fmt"
	"log"
	"net"
	"time"

	"gitlab.com/vozy/proto-telephony-grpc-models/pkg/humandetector"
	"google.golang.org/grpc"
	"google.golang.org/grpc/keepalive"
	"google.golang.org/grpc/reflection"
)

const (
	host = "0.0.0.0"
	port = 8080 // Changed to integer type
)

type srv struct {
	humandetector.HumanDetectorServiceServer
}

func ServeGrpc() {
	fmt.Println("Starting server...")
	fmt.Printf("Listening on %s:%d\n", host, port)
	listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", host, port))
	if err != nil {
		log.Fatalf("failed to listen: %v", err)
		return // Added return statement
	}
	defer listener.Close()

	opts := []grpc.ServerOption{}
	grpcServer := grpc.NewServer(opts...)
	mysrv := &srv{}
	humandetector.RegisterHumanDetectorServiceServer(grpcServer, mysrv)

	// Register reflection service on gRPC server.
	reflection.Register(grpcServer)

	if err := grpcServer.Serve(listener); err != nil {
		log.Fatalf("failed to serve: %v", err)
	}
}



func (s *srv) PingPong(ctx context.Context, req *humandetector.PingPongRequest) (*humandetector.PingPongResponse, error) {
	return &humandetector.PingPongResponse{
		Message: "PONG!",
	}, nil
}

client.go

package main

import (
	"context"
	"log"
	"sync"

	"google.golang.org/grpc"
	"google.golang.org/grpc/credentials/insecure"

	humandetector_pb2 "gitlab.com/vozy/proto-telephony-grpc-models/pkg/humandetector"
)

func makeRequest(wg *sync.WaitGroup, ch chan string) {
	defer wg.Done()

	// Dial the gRPC server inside the request function
	conn, err := grpc.Dial("localhost:8080", grpc.WithTransportCredentials(insecure.NewCredentials()))
	if err != nil {
		log.Printf("Failed to connect: %v", err)
		return
	}
	defer conn.Close()

	// Create a gRPC client
	client := humandetector_pb2.NewHumanDetectorServiceClient(conn)

	// Create a request with the random number
	request := &humandetector_pb2.PingPongRequest{}

	// Call the PingPong RPC
	response, err := client.PingPong(context.Background(), request)
	if err != nil {
		log.Printf("Error calling PingPong: %v", err)
		return
	}

	// Send the response message through the channel
	ch <- response.Message
}

func main() {
	// Number of concurrent requests
	numRequests := 50000

	// Create a channel to receive responses
	ch := make(chan string, numRequests)

	// Use a WaitGroup to wait for all goroutines to finish
	var wg sync.WaitGroup

	// Start concurrent requests
	for i := 0; i < numRequests; i++ {
		wg.Add(1)
		go makeRequest(&wg, ch)
	}

	// Close the channel once all goroutines are done
	go func() {
		wg.Wait()
		close(ch)
	}()

	// Collect responses from the channel and print them
	for response := range ch {
		log.Printf("PingPong Response: %s", response)
	}
}

What did you expect to see?

low memory usage after 50k requests, stucks on that memory usage

> docker stats (EXPECTED)
CONTAINER ID   NAME                CPU %     MEM USAGE / LIMIT     MEM %     NET I/O       BLOCK I/O   PIDS
aec46e609fe0   tender_mirzakhani   0.00%     5.105MiB / 62.72GiB   0.01%     3.21kB / 0B   0B / 0B     6

What did you see instead?

> docker stats
CONTAINER ID   NAME                   CPU %     MEM USAGE / LIMIT   MEM %     NET I/O          BLOCK I/O    PIDS
a203abc32299   intelligent_torvalds   0.00%     777MiB / 62.72GiB   1.21%     132MB / 17.4MB   0B / 325MB   32

After 50k requests of a simple stream ram is nearly 700MB

Metadata

Metadata

Assignees

Type

No type

Projects

No projects

Milestone

No milestone

Relationships

None yet

Development

No branches or pull requests

Issue actions