/*
 *
 * Copyright 2017 gRPC authors.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 *
 */

package grpc

import (
	
	
	
	

	
	
	
	
	
	
	
	
	
)

// ccBalancerWrapper sits between the ClientConn and the Balancer.
//
// ccBalancerWrapper implements methods corresponding to the ones on the
// balancer.Balancer interface. The ClientConn is free to call these methods
// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn
// to the Balancer happen synchronously and in order.
//
// ccBalancerWrapper also implements the balancer.ClientConn interface and is
// passed to the Balancer implementations. It invokes unexported methods on the
// ClientConn to handle these calls from the Balancer.
//
// It uses the gracefulswitch.Balancer internally to ensure that balancer
// switches happen in a graceful manner.
type ccBalancerWrapper struct {
	cc *ClientConn

	// Since these fields are accessed only from handleXxx() methods which are
	// synchronized by the watcher goroutine, we do not need a mutex to protect
	// these fields.
	balancer        *gracefulswitch.Balancer
	curBalancerName string

	updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher().
	resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here.
	closed   *grpcsync.Event   // Indicates if close has been called.
	done     *grpcsync.Event   // Indicates if close has completed its work.
}

// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer
// is not created until the switchTo() method is invoked.
func ( *ClientConn,  balancer.BuildOptions) *ccBalancerWrapper {
	 := &ccBalancerWrapper{
		cc:       ,
		updateCh: buffer.NewUnbounded(),
		resultCh: buffer.NewUnbounded(),
		closed:   grpcsync.NewEvent(),
		done:     grpcsync.NewEvent(),
	}
	go .watcher()
	.balancer = gracefulswitch.NewBalancer(, )
	return 
}

// The following xxxUpdate structs wrap the arguments received as part of the
// corresponding update. The watcher goroutine uses the 'type' of the update to
// invoke the appropriate handler routine to handle the update.

type ccStateUpdate struct {
	ccs *balancer.ClientConnState
}

type scStateUpdate struct {
	sc    balancer.SubConn
	state connectivity.State
	err   error
}

type exitIdleUpdate struct{}

type resolverErrorUpdate struct {
	err error
}

type switchToUpdate struct {
	name string
}

type subConnUpdate struct {
	acbw *acBalancerWrapper
}

// watcher is a long-running goroutine which reads updates from a channel and
// invokes corresponding methods on the underlying balancer. It ensures that
// these methods are invoked in a synchronous fashion. It also ensures that
// these methods are invoked in the order in which the updates were received.
func ( *ccBalancerWrapper) () {
	for {
		select {
		case  := <-.updateCh.Get():
			.updateCh.Load()
			if .closed.HasFired() {
				break
			}
			switch update := .(type) {
			case *ccStateUpdate:
				.handleClientConnStateChange(.ccs)
			case *scStateUpdate:
				.handleSubConnStateChange()
			case *exitIdleUpdate:
				.handleExitIdle()
			case *resolverErrorUpdate:
				.handleResolverError(.err)
			case *switchToUpdate:
				.handleSwitchTo(.name)
			case *subConnUpdate:
				.handleRemoveSubConn(.acbw)
			default:
				logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", , )
			}
		case <-.closed.Done():
		}

		if .closed.HasFired() {
			.handleClose()
			return
		}
	}
}

// updateClientConnState is invoked by grpc to push a ClientConnState update to
// the underlying balancer.
//
// Unlike other methods invoked by grpc to push updates to the underlying
// balancer, this method cannot simply push the update onto the update channel
// and return. It needs to return the error returned by the underlying balancer
// back to grpc which propagates that to the resolver.
func ( *ccBalancerWrapper) ( *balancer.ClientConnState) error {
	.updateCh.Put(&ccStateUpdate{ccs: })

	var  interface{}
	select {
	case  = <-.resultCh.Get():
		.resultCh.Load()
	case <-.closed.Done():
		// Return early if the balancer wrapper is closed while we are waiting for
		// the underlying balancer to process a ClientConnState update.
		return nil
	}
	// If the returned error is nil, attempting to type assert to error leads to
	// panic. So, this needs to handled separately.
	if  == nil {
		return nil
	}
	return .(error)
}

// handleClientConnStateChange handles a ClientConnState update from the update
// channel and invokes the appropriate method on the underlying balancer.
//
// If the addresses specified in the update contain addresses of type "grpclb"
// and the selected LB policy is not "grpclb", these addresses will be filtered
// out and ccs will be modified with the updated address list.
func ( *ccBalancerWrapper) ( *balancer.ClientConnState) {
	if .curBalancerName != grpclbName {
		// Filter any grpclb addresses since we don't have the grpclb balancer.
		var  []resolver.Address
		for ,  := range .ResolverState.Addresses {
			if .Type == resolver.GRPCLB {
				continue
			}
			 = append(, )
		}
		.ResolverState.Addresses = 
	}
	.resultCh.Put(.balancer.UpdateClientConnState(*))
}

// updateSubConnState is invoked by grpc to push a subConn state update to the
// underlying balancer.
func ( *ccBalancerWrapper) ( balancer.SubConn,  connectivity.State,  error) {
	// When updating addresses for a SubConn, if the address in use is not in
	// the new addresses, the old ac will be tearDown() and a new ac will be
	// created. tearDown() generates a state change with Shutdown state, we
	// don't want the balancer to receive this state change. So before
	// tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and
	// this function will be called with (nil, Shutdown). We don't need to call
	// balancer method in this case.
	if  == nil {
		return
	}
	.updateCh.Put(&scStateUpdate{
		sc:    ,
		state: ,
		err:   ,
	})
}

// handleSubConnStateChange handles a SubConnState update from the update
// channel and invokes the appropriate method on the underlying balancer.
func ( *ccBalancerWrapper) ( *scStateUpdate) {
	.balancer.UpdateSubConnState(.sc, balancer.SubConnState{ConnectivityState: .state, ConnectionError: .err})
}

func ( *ccBalancerWrapper) () {
	.updateCh.Put(&exitIdleUpdate{})
}

func ( *ccBalancerWrapper) () {
	if .cc.GetState() != connectivity.Idle {
		return
	}
	.balancer.ExitIdle()
}

func ( *ccBalancerWrapper) ( error) {
	.updateCh.Put(&resolverErrorUpdate{err: })
}

func ( *ccBalancerWrapper) ( error) {
	.balancer.ResolverError()
}

// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the
// LB policy identified by name.
//
// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the
// first good update from the name resolver, it determines the LB policy to use
// and invokes the switchTo() method. Upon receipt of every subsequent update
// from the name resolver, it invokes this method.
//
// the ccBalancerWrapper keeps track of the current LB policy name, and skips
// the graceful balancer switching process if the name does not change.
func ( *ccBalancerWrapper) ( string) {
	.updateCh.Put(&switchToUpdate{name: })
}

// handleSwitchTo handles a balancer switch update from the update channel. It
// calls the SwitchTo() method on the gracefulswitch.Balancer with a
// balancer.Builder corresponding to name. If no balancer.Builder is registered
// for the given name, it uses the default LB policy which is "pick_first".
func ( *ccBalancerWrapper) ( string) {
	// TODO: Other languages use case-insensitive balancer registries. We should
	// switch as well. See: https://github.com/grpc/grpc-go/issues/5288.
	if strings.EqualFold(.curBalancerName, ) {
		return
	}

	// TODO: Ensure that name is a registered LB policy when we get here.
	// We currently only validate the `loadBalancingConfig` field. We need to do
	// the same for the `loadBalancingPolicy` field and reject the service config
	// if the specified policy is not registered.
	 := balancer.Get()
	if  == nil {
		channelz.Warningf(logger, .cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, )
		 = newPickfirstBuilder()
	} else {
		channelz.Infof(logger, .cc.channelzID, "Channel switches to new LB policy %q", )
	}

	if  := .balancer.SwitchTo();  != nil {
		channelz.Errorf(logger, .cc.channelzID, "Channel failed to build new LB policy %q: %v", , )
		return
	}
	.curBalancerName = .Name()
}

// handleRemoveSucConn handles a request from the underlying balancer to remove
// a subConn.
//
// See comments in RemoveSubConn() for more details.
func ( *ccBalancerWrapper) ( *acBalancerWrapper) {
	.cc.removeAddrConn(.getAddrConn(), errConnDrain)
}

func ( *ccBalancerWrapper) () {
	.closed.Fire()
	<-.done.Done()
}

func ( *ccBalancerWrapper) () {
	.balancer.Close()
	.done.Fire()
}

func ( *ccBalancerWrapper) ( []resolver.Address,  balancer.NewSubConnOptions) (balancer.SubConn, error) {
	if len() <= 0 {
		return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
	}
	,  := .cc.newAddrConn(, )
	if  != nil {
		channelz.Warningf(logger, .cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", )
		return nil, 
	}
	 := &acBalancerWrapper{ac: , producers: make(map[balancer.ProducerBuilder]*refCountedProducer)}
	.ac.mu.Lock()
	.acbw = 
	.ac.mu.Unlock()
	return , nil
}

func ( *ccBalancerWrapper) ( balancer.SubConn) {
	// Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it
	// was required to handle the RemoveSubConn() method asynchronously by pushing
	// the update onto the update channel. This was done to avoid a deadlock as
	// switchBalancer() was holding cc.mu when calling Close() on the old
	// balancer, which would in turn call RemoveSubConn().
	//
	// With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this
	// asynchronously is probably not required anymore since the switchTo() method
	// handles the balancer switch by pushing the update onto the channel.
	// TODO(easwars): Handle this inline.
	,  := .(*acBalancerWrapper)
	if ! {
		return
	}
	.updateCh.Put(&subConnUpdate{acbw: })
}

func ( *ccBalancerWrapper) ( balancer.SubConn,  []resolver.Address) {
	,  := .(*acBalancerWrapper)
	if ! {
		return
	}
	.UpdateAddresses()
}

func ( *ccBalancerWrapper) ( balancer.State) {
	// Update picker before updating state.  Even though the ordering here does
	// not matter, it can lead to multiple calls of Pick in the common start-up
	// case where we wait for ready and then perform an RPC.  If the picker is
	// updated later, we could call the "connecting" picker when the state is
	// updated, and then call the "ready" picker after the picker gets updated.
	.cc.blockingpicker.updatePicker(.Picker)
	.cc.csMgr.updateState(.ConnectivityState)
}

func ( *ccBalancerWrapper) ( resolver.ResolveNowOptions) {
	.cc.resolveNow()
}

func ( *ccBalancerWrapper) () string {
	return .cc.target
}

// acBalancerWrapper is a wrapper on top of ac for balancers.
// It implements balancer.SubConn interface.
type acBalancerWrapper struct {
	mu        sync.Mutex
	ac        *addrConn
	producers map[balancer.ProducerBuilder]*refCountedProducer
}

func ( *acBalancerWrapper) ( []resolver.Address) {
	.mu.Lock()
	defer .mu.Unlock()
	if len() <= 0 {
		.ac.cc.removeAddrConn(.ac, errConnDrain)
		return
	}
	if !.ac.tryUpdateAddrs() {
		 := .ac.cc
		 := .ac.scopts
		.ac.mu.Lock()
		// Set old ac.acbw to nil so the Shutdown state update will be ignored
		// by balancer.
		//
		// TODO(bar) the state transition could be wrong when tearDown() old ac
		// and creating new ac, fix the transition.
		.ac.acbw = nil
		.ac.mu.Unlock()
		 := .ac.getState()
		.ac.cc.removeAddrConn(.ac, errConnDrain)

		if  == connectivity.Shutdown {
			return
		}

		,  := .newAddrConn(, )
		if  != nil {
			channelz.Warningf(logger, .ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", )
			return
		}
		.ac = 
		.mu.Lock()
		.acbw = 
		.mu.Unlock()
		if  != connectivity.Idle {
			go .connect()
		}
	}
}

func ( *acBalancerWrapper) () {
	.mu.Lock()
	defer .mu.Unlock()
	go .ac.connect()
}

func ( *acBalancerWrapper) () *addrConn {
	.mu.Lock()
	defer .mu.Unlock()
	return .ac
}

var errSubConnNotReady = status.Error(codes.Unavailable, "SubConn not currently connected")

// NewStream begins a streaming RPC on the addrConn.  If the addrConn is not
// ready, returns errSubConnNotReady.
func ( *acBalancerWrapper) ( context.Context,  *StreamDesc,  string,  ...CallOption) (ClientStream, error) {
	 := .ac.getReadyTransport()
	if  == nil {
		return nil, errSubConnNotReady
	}
	return newNonRetryClientStream(, , , , .ac, ...)
}

// Invoke performs a unary RPC.  If the addrConn is not ready, returns
// errSubConnNotReady.
func ( *acBalancerWrapper) ( context.Context,  string,  interface{},  interface{},  ...CallOption) error {
	,  := .NewStream(, unaryStreamDesc, , ...)
	if  != nil {
		return 
	}
	if  := .SendMsg();  != nil {
		return 
	}
	return .RecvMsg()
}

type refCountedProducer struct {
	producer balancer.Producer
	refs     int    // number of current refs to the producer
	close    func() // underlying producer's close function
}

func ( *acBalancerWrapper) ( balancer.ProducerBuilder) (balancer.Producer, func()) {
	.mu.Lock()
	defer .mu.Unlock()

	// Look up existing producer from this builder.
	 := .producers[]
	if  == nil {
		// Not found; create a new one and add it to the producers map.
		,  := .Build()
		 = &refCountedProducer{producer: , close: }
		.producers[] = 
	}
	// Account for this new reference.
	.refs++

	// Return a cleanup function wrapped in a OnceFunc to remove this reference
	// and delete the refCountedProducer from the map if the total reference
	// count goes to zero.
	 := func() {
		.mu.Lock()
		.refs--
		if .refs == 0 {
			defer .close() // Run outside the acbw mutex
			delete(.producers, )
		}
		.mu.Unlock()
	}
	return .producer, grpcsync.OnceFunc()
}