Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 11 additions & 18 deletions benchmark/worker/benchmark_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,6 @@ func (h *lockingHistogram) mergeInto(merged *stats.Histogram) {

type benchmarkClient struct {
closeConns func()
stop chan bool
lastResetTime time.Time
histogramOptions stats.HistogramOptions
lockingHistograms []lockingHistogram
Expand Down Expand Up @@ -168,7 +167,7 @@ func createConns(config *testpb.ClientConfig) ([]*grpc.ClientConn, func(), error
}, nil
}

func performRPCs(config *testpb.ClientConfig, conns []*grpc.ClientConn, bc *benchmarkClient) error {
func performRPCs(ctx context.Context, config *testpb.ClientConfig, conns []*grpc.ClientConn, bc *benchmarkClient) error {
// Read payload size and type from config.
var (
payloadReqSize, payloadRespSize int
Expand Down Expand Up @@ -212,17 +211,17 @@ func performRPCs(config *testpb.ClientConfig, conns []*grpc.ClientConn, bc *benc

switch config.RpcType {
case testpb.RpcType_UNARY:
bc.unaryLoop(conns, rpcCountPerConn, payloadReqSize, payloadRespSize, poissonLambda)
bc.unaryLoop(ctx, conns, rpcCountPerConn, payloadReqSize, payloadRespSize, poissonLambda)
case testpb.RpcType_STREAMING:
bc.streamingLoop(conns, rpcCountPerConn, payloadReqSize, payloadRespSize, payloadType, poissonLambda)
bc.streamingLoop(ctx, conns, rpcCountPerConn, payloadReqSize, payloadRespSize, payloadType, poissonLambda)
default:
return status.Errorf(codes.InvalidArgument, "unknown rpc type: %v", config.RpcType)
}

return nil
}

func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) {
func startBenchmarkClient(ctx context.Context, config *testpb.ClientConfig) (*benchmarkClient, error) {
printClientConfig(config)

// Set running environment like how many cores to use.
Expand All @@ -243,13 +242,12 @@ func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error)
},
lockingHistograms: make([]lockingHistogram, rpcCountPerConn*len(conns)),

stop: make(chan bool),
lastResetTime: time.Now(),
closeConns: closeConns,
rusageLastReset: syscall.GetRusage(),
}

if err = performRPCs(config, conns, bc); err != nil {
if err = performRPCs(ctx, config, conns, bc); err != nil {
// Close all connections if performRPCs failed.
closeConns()
return nil, err
Expand All @@ -258,7 +256,7 @@ func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error)
return bc, nil
}

func (bc *benchmarkClient) unaryLoop(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int, poissonLambda *float64) {
func (bc *benchmarkClient) unaryLoop(ctx context.Context, conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int, poissonLambda *float64) {
for ic, conn := range conns {
client := testgrpc.NewBenchmarkServiceClient(conn)
// For each connection, create rpcCountPerConn goroutines to do rpc.
Expand All @@ -274,10 +272,8 @@ func (bc *benchmarkClient) unaryLoop(conns []*grpc.ClientConn, rpcCountPerConn i
// before starting benchmark.
if poissonLambda == nil { // Closed loop.
for {
select {
case <-bc.stop:
return
default:
if ctx.Err() != nil {
break
}
start := time.Now()
if err := benchmark.DoUnaryCall(client, reqSize, respSize); err != nil {
Expand All @@ -292,13 +288,12 @@ func (bc *benchmarkClient) unaryLoop(conns []*grpc.ClientConn, rpcCountPerConn i
bc.poissonUnary(client, idx, reqSize, respSize, *poissonLambda)
})
}

}(idx)
}
}
}

func (bc *benchmarkClient) streamingLoop(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int, payloadType string, poissonLambda *float64) {
func (bc *benchmarkClient) streamingLoop(ctx context.Context, conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int, payloadType string, poissonLambda *float64) {
var doRPC func(testgrpc.BenchmarkService_StreamingCallClient, int, int) error
if payloadType == "bytebuf" {
doRPC = benchmark.DoByteBufStreamingRoundTrip
Expand Down Expand Up @@ -329,10 +324,8 @@ func (bc *benchmarkClient) streamingLoop(conns []*grpc.ClientConn, rpcCountPerCo
}
elapse := time.Since(start)
bc.lockingHistograms[idx].add(int64(elapse))
select {
case <-bc.stop:
if ctx.Err() != nil {
return
default:
}
}
}(idx)
Expand Down Expand Up @@ -364,6 +357,7 @@ func (bc *benchmarkClient) poissonUnary(client testgrpc.BenchmarkServiceClient,
func (bc *benchmarkClient) poissonStreaming(stream testgrpc.BenchmarkService_StreamingCallClient, idx int, reqSize int, respSize int, lambda float64, doRPC func(testgrpc.BenchmarkService_StreamingCallClient, int, int) error) {
go func() {
start := time.Now()

if err := doRPC(stream, reqSize, respSize); err != nil {
return
}
Expand Down Expand Up @@ -430,6 +424,5 @@ func (bc *benchmarkClient) getStats(reset bool) *testpb.ClientStats {
}

func (bc *benchmarkClient) shutdown() {
close(bc.stop)
bc.closeConns()
}
4 changes: 3 additions & 1 deletion benchmark/worker/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,9 @@ func (s *workerServer) RunServer(stream testgrpc.WorkerService_RunServerServer)

func (s *workerServer) RunClient(stream testgrpc.WorkerService_RunClientServer) error {
var bc *benchmarkClient
ctx, cancel := context.WithCancel(stream.Context())
defer func() {
cancel()
// Shut down benchmark client when stream ends.
logger.Infof("shutting down benchmark client")
if bc != nil {
Expand All @@ -163,7 +165,7 @@ func (s *workerServer) RunClient(stream testgrpc.WorkerService_RunClientServer)
logger.Infof("client setup received when client already exists, shutting down the existing client")
bc.shutdown()
}
bc, err = startBenchmarkClient(t.Setup)
bc, err = startBenchmarkClient(ctx, t.Setup)
if err != nil {
return err
}
Expand Down