Source File
gracefulswitch.go
Belonging Package
google.golang.org/grpc/internal/balancer/gracefulswitch
/*
*
* Copyright 2022 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package gracefulswitch implements a graceful switch load balancer.
package gracefulswitch
import (
)
var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed")
var _ balancer.Balancer = (*Balancer)(nil)
// NewBalancer returns a graceful switch Balancer.
func ( balancer.ClientConn, balancer.BuildOptions) *Balancer {
return &Balancer{
cc: ,
bOpts: ,
}
}
// Balancer is a utility to gracefully switch from one balancer to
// a new balancer. It implements the balancer.Balancer interface.
type Balancer struct {
bOpts balancer.BuildOptions
cc balancer.ClientConn
// mu protects the following fields and all fields within balancerCurrent
// and balancerPending. mu does not need to be held when calling into the
// child balancers, as all calls into these children happen only as a direct
// result of a call into the gracefulSwitchBalancer, which are also
// guaranteed to be synchronous. There is one exception: an UpdateState call
// from a child balancer when current and pending are populated can lead to
// calling Close() on the current. To prevent that racing with an
// UpdateSubConnState from the channel, we hold currentMu during Close and
// UpdateSubConnState calls.
mu sync.Mutex
balancerCurrent *balancerWrapper
balancerPending *balancerWrapper
closed bool // set to true when this balancer is closed
// currentMu must be locked before mu. This mutex guards against this
// sequence of events: UpdateSubConnState() called, finds the
// balancerCurrent, gives up lock, updateState comes in, causes Close() on
// balancerCurrent before the UpdateSubConnState is called on the
// balancerCurrent.
currentMu sync.Mutex
}
// swap swaps out the current lb with the pending lb and updates the ClientConn.
// The caller must hold gsb.mu.
func ( *Balancer) () {
.cc.UpdateState(.balancerPending.lastState)
:= .balancerCurrent
.balancerCurrent = .balancerPending
.balancerPending = nil
go func() {
.currentMu.Lock()
defer .currentMu.Unlock()
.Close()
}()
}
// Helper function that checks if the balancer passed in is current or pending.
// The caller must hold gsb.mu.
func ( *Balancer) ( *balancerWrapper) bool {
return == .balancerCurrent || == .balancerPending
}
// SwitchTo initializes the graceful switch process, which completes based on
// connectivity state changes on the current/pending balancer. Thus, the switch
// process is not complete when this method returns. This method must be called
// synchronously alongside the rest of the balancer.Balancer methods this
// Graceful Switch Balancer implements.
func ( *Balancer) ( balancer.Builder) error {
.mu.Lock()
if .closed {
.mu.Unlock()
return errBalancerClosed
}
:= &balancerWrapper{
gsb: ,
lastState: balancer.State{
ConnectivityState: connectivity.Connecting,
Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable),
},
subconns: make(map[balancer.SubConn]bool),
}
:= .balancerPending // nil if there is no pending balancer
if .balancerCurrent == nil {
.balancerCurrent =
} else {
.balancerPending =
}
.mu.Unlock()
.Close()
// This function takes a builder instead of a balancer because builder.Build
// can call back inline, and this utility needs to handle the callbacks.
:= .Build(, .bOpts)
if == nil {
// This is illegal and should never happen; we clear the balancerWrapper
// we were constructing if it happens to avoid a potential panic.
.mu.Lock()
if .balancerPending != nil {
.balancerPending = nil
} else {
.balancerCurrent = nil
}
.mu.Unlock()
return balancer.ErrBadResolverState
}
// This write doesn't need to take gsb.mu because this field never gets read
// or written to on any calls from the current or pending. Calls from grpc
// to this balancer are guaranteed to be called synchronously, so this
// bw.Balancer field will never be forwarded to until this SwitchTo()
// function returns.
.Balancer =
return nil
}
// Returns nil if the graceful switch balancer is closed.
func ( *Balancer) () *balancerWrapper {
.mu.Lock()
defer .mu.Unlock()
if .balancerPending != nil {
return .balancerPending
}
return .balancerCurrent
}
// UpdateClientConnState forwards the update to the latest balancer created.
func ( *Balancer) ( balancer.ClientConnState) error {
// The resolver data is only relevant to the most recent LB Policy.
:= .latestBalancer()
if == nil {
return errBalancerClosed
}
// Perform this call without gsb.mu to prevent deadlocks if the child calls
// back into the channel. The latest balancer can never be closed during a
// call from the channel, even without gsb.mu held.
return .UpdateClientConnState()
}
// ResolverError forwards the error to the latest balancer created.
func ( *Balancer) ( error) {
// The resolver data is only relevant to the most recent LB Policy.
:= .latestBalancer()
if == nil {
return
}
// Perform this call without gsb.mu to prevent deadlocks if the child calls
// back into the channel. The latest balancer can never be closed during a
// call from the channel, even without gsb.mu held.
.ResolverError()
}
// ExitIdle forwards the call to the latest balancer created.
//
// If the latest balancer does not support ExitIdle, the subConns are
// re-connected to manually.
func ( *Balancer) () {
:= .latestBalancer()
if == nil {
return
}
// There is no need to protect this read with a mutex, as the write to the
// Balancer field happens in SwitchTo, which completes before this can be
// called.
if , := .Balancer.(balancer.ExitIdler); {
.ExitIdle()
return
}
.mu.Lock()
defer .mu.Unlock()
for := range .subconns {
.Connect()
}
}
// UpdateSubConnState forwards the update to the appropriate child.
func ( *Balancer) ( balancer.SubConn, balancer.SubConnState) {
.currentMu.Lock()
defer .currentMu.Unlock()
.mu.Lock()
// Forward update to the appropriate child. Even if there is a pending
// balancer, the current balancer should continue to get SubConn updates to
// maintain the proper state while the pending is still connecting.
var *balancerWrapper
if .balancerCurrent != nil && .balancerCurrent.subconns[] {
= .balancerCurrent
} else if .balancerPending != nil && .balancerPending.subconns[] {
= .balancerPending
}
.mu.Unlock()
if == nil {
// SubConn belonged to a stale lb policy that has not yet fully closed,
// or the balancer was already closed.
return
}
.UpdateSubConnState(, )
}
// Close closes any active child balancers.
func ( *Balancer) () {
.mu.Lock()
.closed = true
:= .balancerCurrent
.balancerCurrent = nil
:= .balancerPending
.balancerPending = nil
.mu.Unlock()
.Close()
.Close()
}
// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer
// methods to help cleanup SubConns created by the wrapped balancer.
//
// It implements the balancer.ClientConn interface and is passed down in that
// capacity to the wrapped balancer. It maintains a set of subConns created by
// the wrapped balancer and calls from the latter to create/update/remove
// SubConns update this set before being forwarded to the parent ClientConn.
// State updates from the wrapped balancer can result in invocation of the
// graceful switch logic.
type balancerWrapper struct {
balancer.Balancer
gsb *Balancer
lastState balancer.State
subconns map[balancer.SubConn]bool // subconns created by this balancer
}
func ( *balancerWrapper) ( balancer.SubConn, balancer.SubConnState) {
if .ConnectivityState == connectivity.Shutdown {
.gsb.mu.Lock()
delete(.subconns, )
.gsb.mu.Unlock()
}
// There is no need to protect this read with a mutex, as the write to the
// Balancer field happens in SwitchTo, which completes before this can be
// called.
.Balancer.UpdateSubConnState(, )
}
// Close closes the underlying LB policy and removes the subconns it created. bw
// must not be referenced via balancerCurrent or balancerPending in gsb when
// called. gsb.mu must not be held. Does not panic with a nil receiver.
func ( *balancerWrapper) () {
// before Close is called.
if == nil {
return
}
// There is no need to protect this read with a mutex, as Close() is
// impossible to be called concurrently with the write in SwitchTo(). The
// callsites of Close() for this balancer in Graceful Switch Balancer will
// never be called until SwitchTo() returns.
.Balancer.Close()
.gsb.mu.Lock()
for := range .subconns {
.gsb.cc.RemoveSubConn()
}
.gsb.mu.Unlock()
}
func ( *balancerWrapper) ( balancer.State) {
// Hold the mutex for this entire call to ensure it cannot occur
// concurrently with other updateState() calls. This causes updates to
// lastState and calls to cc.UpdateState to happen atomically.
.gsb.mu.Lock()
defer .gsb.mu.Unlock()
.lastState =
if !.gsb.balancerCurrentOrPending() {
return
}
if == .gsb.balancerCurrent {
// In the case that the current balancer exits READY, and there is a pending
// balancer, you can forward the pending balancer's cached State up to
// ClientConn and swap the pending into the current. This is because there
// is no reason to gracefully switch from and keep using the old policy as
// the ClientConn is not connected to any backends.
if .ConnectivityState != connectivity.Ready && .gsb.balancerPending != nil {
.gsb.swap()
return
}
// Even if there is a pending balancer waiting to be gracefully switched to,
// continue to forward current balancer updates to the Client Conn. Ignoring
// state + picker from the current would cause undefined behavior/cause the
// system to behave incorrectly from the current LB policies perspective.
// Also, the current LB is still being used by grpc to choose SubConns per
// RPC, and thus should use the most updated form of the current balancer.
.gsb.cc.UpdateState()
return
}
// This method is now dealing with a state update from the pending balancer.
// If the current balancer is currently in a state other than READY, the new
// policy can be swapped into place immediately. This is because there is no
// reason to gracefully switch from and keep using the old policy as the
// ClientConn is not connected to any backends.
if .ConnectivityState != connectivity.Connecting || .gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready {
.gsb.swap()
}
}
func ( *balancerWrapper) ( []resolver.Address, balancer.NewSubConnOptions) (balancer.SubConn, error) {
.gsb.mu.Lock()
if !.gsb.balancerCurrentOrPending() {
.gsb.mu.Unlock()
return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", , )
}
.gsb.mu.Unlock()
, := .gsb.cc.NewSubConn(, )
if != nil {
return nil,
}
.gsb.mu.Lock()
if !.gsb.balancerCurrentOrPending() { // balancer was closed during this call
.gsb.cc.RemoveSubConn()
.gsb.mu.Unlock()
return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", , )
}
.subconns[] = true
.gsb.mu.Unlock()
return , nil
}
func ( *balancerWrapper) ( resolver.ResolveNowOptions) {
// Ignore ResolveNow requests from anything other than the most recent
// balancer, because older balancers were already removed from the config.
if != .gsb.latestBalancer() {
return
}
.gsb.cc.ResolveNow()
}
func ( *balancerWrapper) ( balancer.SubConn) {
.gsb.mu.Lock()
if !.gsb.balancerCurrentOrPending() {
.gsb.mu.Unlock()
return
}
.gsb.mu.Unlock()
.gsb.cc.RemoveSubConn()
}
func ( *balancerWrapper) ( balancer.SubConn, []resolver.Address) {
.gsb.mu.Lock()
if !.gsb.balancerCurrentOrPending() {
.gsb.mu.Unlock()
return
}
.gsb.mu.Unlock()
.gsb.cc.UpdateAddresses(, )
}
func ( *balancerWrapper) () string {
return .gsb.cc.Target()
}
The pages are generated with Golds v0.4.9. (GOOS=linux GOARCH=amd64)