-
Notifications
You must be signed in to change notification settings - Fork 96
close dropped connection on balancer.Update() #1694
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -175,6 +175,34 @@ func (b *Balancer) clusterDiscoveryAttempt(ctx context.Context, cc *grpc.ClientC | |
return nil | ||
} | ||
|
||
func buildConnectionsState(ctx context.Context, pool interface { | ||
GetIfPresent(endpoint endpoint.Endpoint) conn.Conn | ||
Allow(ctx context.Context, cc conn.Conn) | ||
EndpointsToConnections(endpoints []endpoint.Endpoint) []conn.Conn | ||
}, newest []endpoint.Endpoint, | ||
dropped []endpoint.Endpoint, | ||
config balancerConfig.Config, | ||
selfLocation balancerConfig.Info, | ||
) *connectionsState { | ||
connections := pool.EndpointsToConnections(newest) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Do I understand correctly that it will create a real connection for all endpoints here? If so, it might be a problem for large clusters. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It creates a connection to all database dynnodes. What the problem will be exactly and how to account for it? If a load is large, user might want to have a connection to all dynnodes anyway. |
||
for _, c := range connections { | ||
pool.Allow(ctx, c) | ||
c.Endpoint().Touch() | ||
_ = c.Ping(ctx) | ||
} | ||
|
||
state := newConnectionsState(connections, config.Filter, selfLocation, config.AllowFallback) | ||
|
||
for _, e := range dropped { | ||
c := pool.GetIfPresent(e) | ||
if c != nil { | ||
_ = c.Close(ctx) | ||
} | ||
} | ||
|
||
return state | ||
} | ||
|
||
func (b *Balancer) applyDiscoveredEndpoints(ctx context.Context, newest []endpoint.Endpoint, localDC string) { | ||
var ( | ||
onDone = trace.DriverOnBalancerUpdate( | ||
|
@@ -186,10 +214,12 @@ func (b *Balancer) applyDiscoveredEndpoints(ctx context.Context, newest []endpoi | |
) | ||
previous = b.connections().All() | ||
) | ||
|
||
_, added, dropped := xslices.Diff(previous, newest, func(lhs, rhs endpoint.Endpoint) int { | ||
return strings.Compare(lhs.Address(), rhs.Address()) | ||
}) | ||
|
||
defer func() { | ||
_, added, dropped := xslices.Diff(previous, newest, func(lhs, rhs endpoint.Endpoint) int { | ||
return strings.Compare(lhs.Address(), rhs.Address()) | ||
}) | ||
onDone( | ||
xslices.Transform(newest, func(t endpoint.Endpoint) trace.EndpointInfo { return t }), | ||
xslices.Transform(added, func(t endpoint.Endpoint) trace.EndpointInfo { return t }), | ||
|
@@ -198,21 +228,8 @@ func (b *Balancer) applyDiscoveredEndpoints(ctx context.Context, newest []endpoi | |
) | ||
}() | ||
|
||
connections := endpointsToConnections(b.pool, newest) | ||
for _, c := range connections { | ||
b.pool.Allow(ctx, c) | ||
c.Endpoint().Touch() | ||
} | ||
|
||
info := balancerConfig.Info{SelfLocation: localDC} | ||
state := newConnectionsState(connections, b.balancerConfig.Filter, info, b.balancerConfig.AllowFallback) | ||
|
||
endpointsInfo := make([]endpoint.Info, len(newest)) | ||
for i, e := range newest { | ||
endpointsInfo[i] = e | ||
} | ||
|
||
b.connectionsState.Store(state) | ||
b.connectionsState.Store(buildConnectionsState(ctx, b.pool, newest, dropped, b.balancerConfig, info)) | ||
} | ||
|
||
func (b *Balancer) Close(ctx context.Context) (err error) { | ||
|
@@ -444,12 +461,3 @@ func (b *Balancer) nextConn(ctx context.Context) (c conn.Conn, err error) { | |
|
||
return c, nil | ||
} | ||
|
||
func endpointsToConnections(p *conn.Pool, endpoints []endpoint.Endpoint) []conn.Conn { | ||
conns := make([]conn.Conn, 0, len(endpoints)) | ||
for _, e := range endpoints { | ||
conns = append(conns, p.Get(e)) | ||
} | ||
|
||
return conns | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,124 @@ | ||
package balancer | ||
|
||
import ( | ||
"context" | ||
"testing" | ||
|
||
"github.com/stretchr/testify/assert" | ||
|
||
balancerConfig "github.com/ydb-platform/ydb-go-sdk/v3/internal/balancer/config" | ||
"github.com/ydb-platform/ydb-go-sdk/v3/internal/conn" | ||
"github.com/ydb-platform/ydb-go-sdk/v3/internal/endpoint" | ||
"github.com/ydb-platform/ydb-go-sdk/v3/internal/mock" | ||
) | ||
|
||
type fakePool struct { | ||
connections map[string]*mock.Conn | ||
} | ||
|
||
func (fp *fakePool) EndpointsToConnections(eps []endpoint.Endpoint) []conn.Conn { | ||
var conns []conn.Conn | ||
for _, ep := range eps { | ||
if c, ok := fp.connections[ep.Address()]; ok { | ||
conns = append(conns, c) | ||
} | ||
} | ||
|
||
return conns | ||
} | ||
|
||
func (fp *fakePool) Allow(_ context.Context, _ conn.Conn) {} | ||
|
||
func (fp *fakePool) GetIfPresent(ep endpoint.Endpoint) conn.Conn { | ||
if c, ok := fp.connections[ep.Address()]; ok { | ||
return c | ||
} | ||
|
||
return nil | ||
} | ||
|
||
func TestBuildConnectionsState(t *testing.T) { | ||
ctx := context.Background() | ||
|
||
tests := []struct { | ||
name string | ||
newEndpoints []endpoint.Endpoint | ||
oldEndpoints []endpoint.Endpoint | ||
initialConns map[string]*mock.Conn | ||
conf balancerConfig.Config | ||
selfLoc balancerConfig.Info | ||
expectPinged []string | ||
expectClosed []string | ||
}{ | ||
{ | ||
name: "single new and old endpoint", | ||
newEndpoints: []endpoint.Endpoint{&mock.Endpoint{AddrField: "127.0.0.1"}}, | ||
oldEndpoints: []endpoint.Endpoint{&mock.Endpoint{AddrField: "127.0.0.2"}}, | ||
initialConns: map[string]*mock.Conn{ | ||
"127.0.0.1": { | ||
AddrField: "127.0.0.1", | ||
State: conn.Online, | ||
}, | ||
"127.0.0.2": { | ||
AddrField: "127.0.0.2", | ||
State: conn.Offline, | ||
}, | ||
}, | ||
conf: balancerConfig.Config{ | ||
AllowFallback: true, | ||
DetectNearestDC: true, | ||
}, | ||
selfLoc: balancerConfig.Info{SelfLocation: "local"}, | ||
expectPinged: []string{"127.0.0.1"}, | ||
expectClosed: []string{"127.0.0.2"}, | ||
}, | ||
{ | ||
newEndpoints: []endpoint.Endpoint{&mock.Endpoint{AddrField: "a1"}, &mock.Endpoint{AddrField: "a2"}}, | ||
oldEndpoints: []endpoint.Endpoint{&mock.Endpoint{AddrField: "a3"}}, | ||
initialConns: map[string]*mock.Conn{ | ||
"a1": { | ||
AddrField: "a1", | ||
LocationField: "local", | ||
State: conn.Offline, | ||
}, | ||
"a2": { | ||
AddrField: "a2", | ||
State: conn.Offline, | ||
}, | ||
"a3": { | ||
AddrField: "a3", | ||
State: conn.Online, | ||
}, | ||
}, | ||
conf: balancerConfig.Config{ | ||
AllowFallback: true, | ||
DetectNearestDC: true, | ||
}, | ||
selfLoc: balancerConfig.Info{SelfLocation: "local"}, | ||
expectPinged: []string{"a1", "a2"}, | ||
expectClosed: []string{"a3"}, | ||
}, | ||
} | ||
|
||
for _, tt := range tests { | ||
t.Run(tt.name, func(t *testing.T) { | ||
fp := &fakePool{connections: make(map[string]*mock.Conn)} | ||
for addr, c := range tt.initialConns { | ||
fp.connections[addr] = c | ||
} | ||
|
||
state := buildConnectionsState(ctx, fp, tt.newEndpoints, tt.oldEndpoints, tt.conf, tt.selfLoc) | ||
assert.NotNil(t, state) | ||
for _, addr := range tt.expectPinged { | ||
c := fp.connections[addr] | ||
assert.True(t, c.Pinged.Load(), "connection %s should be pinged", addr) | ||
assert.True(t, c.State == conn.Online || c.PingErr != nil) | ||
} | ||
for _, addr := range tt.expectClosed { | ||
c := fp.connections[addr] | ||
assert.True(t, c.Closed.Load(), "connection %s should be closed", addr) | ||
assert.True(t, c.State == conn.Offline, "connection %s should be offline", addr) | ||
} | ||
}) | ||
} | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
What about adding a newline before
newest
?