/*
 *
 * Copyright 2018 gRPC authors.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 *
 */

package channelz

import (
	
	
	
	
)

// entry represents a node in the channelz database.
type entry interface {
	// addChild adds a child e, whose channelz id is id to child list
	addChild(id int64, e entry)
	// deleteChild deletes a child with channelz id to be id from child list
	deleteChild(id int64)
	// triggerDelete tries to delete self from channelz database. However, if
	// child list is not empty, then deletion from the database is on hold until
	// the last child is deleted from database.
	triggerDelete()
	// deleteSelfIfReady check whether triggerDelete() has been called before,
	// and whether child list is now empty. If both conditions are met, then
	// delete self from database.
	deleteSelfIfReady()
	// getParentID returns parent ID of the entry. 0 value parent ID means no parent.
	getParentID() int64
	Entity
}

// channelMap is the storage data structure for channelz.
//
// Methods of channelMap can be divided into two categories with respect to
// locking.
//
// 1. Methods acquire the global lock.
// 2. Methods that can only be called when global lock is held.
//
// A second type of method need always to be called inside a first type of method.
type channelMap struct {
	mu               sync.RWMutex
	topLevelChannels map[int64]struct{}
	channels         map[int64]*Channel
	subChannels      map[int64]*SubChannel
	sockets          map[int64]*Socket
	servers          map[int64]*Server
}

func () *channelMap {
	return &channelMap{
		topLevelChannels: make(map[int64]struct{}),
		channels:         make(map[int64]*Channel),
		subChannels:      make(map[int64]*SubChannel),
		sockets:          make(map[int64]*Socket),
		servers:          make(map[int64]*Server),
	}
}

func ( *channelMap) ( int64,  *Server) {
	.mu.Lock()
	defer .mu.Unlock()
	.cm = 
	.servers[] = 
}

func ( *channelMap) ( int64,  *Channel,  bool,  int64) {
	.mu.Lock()
	defer .mu.Unlock()
	.trace.cm = 
	.channels[] = 
	if  {
		.topLevelChannels[] = struct{}{}
	} else if  := .channels[];  != nil {
		.addChild(, )
	} else {
		logger.Infof("channel %d references invalid parent ID %d", , )
	}
}

func ( *channelMap) ( int64,  *SubChannel,  int64) {
	.mu.Lock()
	defer .mu.Unlock()
	.trace.cm = 
	.subChannels[] = 
	if  := .channels[];  != nil {
		.addChild(, )
	} else {
		logger.Infof("subchannel %d references invalid parent ID %d", , )
	}
}

func ( *channelMap) ( *Socket) {
	.mu.Lock()
	defer .mu.Unlock()
	.cm = 
	.sockets[.ID] = 
	if .Parent == nil {
		logger.Infof("normal socket %d has no parent", .ID)
	}
	.Parent.(entry).addChild(.ID, )
}

// removeEntry triggers the removal of an entry, which may not indeed delete the
// entry, if it has to wait on the deletion of its children and until no other
// entity's channel trace references it.  It may lead to a chain of entry
// deletion. For example, deleting the last socket of a gracefully shutting down
// server will lead to the server being also deleted.
func ( *channelMap) ( int64) {
	.mu.Lock()
	defer .mu.Unlock()
	.findEntry().triggerDelete()
}

// tracedChannel represents tracing operations which are present on both
// channels and subChannels.
type tracedChannel interface {
	getChannelTrace() *ChannelTrace
	incrTraceRefCount()
	decrTraceRefCount()
	getRefName() string
}

// c.mu must be held by the caller
func ( *channelMap) ( int64) {
	 := .findEntry()
	if ,  := .(tracedChannel);  {
		.decrTraceRefCount()
		.deleteSelfIfReady()
	}
}

// c.mu must be held by the caller.
func ( *channelMap) ( int64) entry {
	if ,  := .channels[];  {
		return 
	}
	if ,  := .subChannels[];  {
		return 
	}
	if ,  := .servers[];  {
		return 
	}
	if ,  := .sockets[];  {
		return 
	}
	return &dummyEntry{idNotFound: }
}

// c.mu must be held by the caller
//
// deleteEntry deletes an entry from the channelMap. Before calling this method,
// caller must check this entry is ready to be deleted, i.e removeEntry() has
// been called on it, and no children still exist.
func ( *channelMap) ( int64) entry {
	if ,  := .sockets[];  {
		delete(.sockets, )
		return 
	}
	if ,  := .subChannels[];  {
		delete(.subChannels, )
		return 
	}
	if ,  := .channels[];  {
		delete(.channels, )
		delete(.topLevelChannels, )
		return 
	}
	if ,  := .servers[];  {
		delete(.servers, )
		return 
	}
	return &dummyEntry{idNotFound: }
}

func ( *channelMap) ( int64,  *TraceEvent) {
	.mu.Lock()
	defer .mu.Unlock()
	 := .findEntry()
	,  := .(tracedChannel)
	if ! {
		return
	}
	.getChannelTrace().append(&traceEvent{Desc: .Desc, Severity: .Severity, Timestamp: time.Now()})
	if .Parent != nil {
		 := .findEntry(.getParentID())
		var  RefChannelType
		switch .(type) {
		case *Channel:
			 = RefChannel
		case *SubChannel:
			 = RefSubChannel
		}
		if ,  := .(tracedChannel);  {
			.getChannelTrace().append(&traceEvent{
				Desc:      .Parent.Desc,
				Severity:  .Parent.Severity,
				Timestamp: time.Now(),
				RefID:     ,
				RefName:   .getRefName(),
				RefType:   ,
			})
			.incrTraceRefCount()
		}
	}
}

type int64Slice []int64

func ( int64Slice) () int           { return len() }
func ( int64Slice) (,  int)      { [], [] = [], [] }
func ( int64Slice) (,  int) bool { return [] < [] }

func ( map[int64]string) map[int64]string {
	 := make(map[int64]string)
	for ,  := range  {
		[] = 
	}
	return 
}

func ( *channelMap) ( int64,  int) ([]*Channel, bool) {
	if  <= 0 {
		 = EntriesPerPage
	}
	.mu.RLock()
	defer .mu.RUnlock()
	 := int64(len(.topLevelChannels))
	 := make([]int64, 0, )

	for  := range .topLevelChannels {
		 = append(, )
	}
	sort.Sort(int64Slice())
	 := sort.Search(len(), func( int) bool { return [] >=  })
	 := true
	var  []*Channel
	for ,  := range [:] {
		if len() ==  {
			 = false
			break
		}
		if ,  := .channels[];  {
			 = append(, )
		}
	}
	return , 
}

func ( *channelMap) ( int64,  int) ([]*Server, bool) {
	if  <= 0 {
		 = EntriesPerPage
	}
	.mu.RLock()
	defer .mu.RUnlock()
	 := make([]int64, 0, len(.servers))
	for  := range .servers {
		 = append(, )
	}
	sort.Sort(int64Slice())
	 := sort.Search(len(), func( int) bool { return [] >=  })
	 := true
	var  []*Server
	for ,  := range [:] {
		if len() ==  {
			 = false
			break
		}
		if ,  := .servers[];  {
			 = append(, )
		}
	}
	return , 
}

func ( *channelMap) ( int64,  int64,  int) ([]*Socket, bool) {
	if  <= 0 {
		 = EntriesPerPage
	}
	.mu.RLock()
	defer .mu.RUnlock()
	,  := .servers[]
	if ! {
		// server with id doesn't exist.
		return nil, true
	}
	 := .sockets
	 := make([]int64, 0, len())
	 := make([]*Socket, 0, min(len(), ))
	for  := range  {
		 = append(, )
	}
	sort.Sort(int64Slice())
	 := sort.Search(len(), func( int) bool { return [] >=  })
	 := true
	for ,  := range [:] {
		if len() ==  {
			 = false
			break
		}
		if ,  := .sockets[];  {
			 = append(, )
		}
	}
	return , 
}

func ( *channelMap) ( int64) *Channel {
	.mu.RLock()
	defer .mu.RUnlock()
	return .channels[]
}

func ( *channelMap) ( int64) *SubChannel {
	.mu.RLock()
	defer .mu.RUnlock()
	return .subChannels[]
}

func ( *channelMap) ( int64) *Socket {
	.mu.RLock()
	defer .mu.RUnlock()
	return .sockets[]
}

func ( *channelMap) ( int64) *Server {
	.mu.RLock()
	defer .mu.RUnlock()
	return .servers[]
}

type dummyEntry struct {
	// dummyEntry is a fake entry to handle entry not found case.
	idNotFound int64
	Entity
}

func ( *dummyEntry) () string {
	return fmt.Sprintf("non-existent entity #%d", .idNotFound)
}

func ( *dummyEntry) () int64 { return .idNotFound }

func ( *dummyEntry) ( int64,  entry) {
	// Note: It is possible for a normal program to reach here under race
	// condition.  For example, there could be a race between ClientConn.Close()
	// info being propagated to addrConn and http2Client. ClientConn.Close()
	// cancel the context and result in http2Client to error. The error info is
	// then caught by transport monitor and before addrConn.tearDown() is called
	// in side ClientConn.Close(). Therefore, the addrConn will create a new
	// transport. And when registering the new transport in channelz, its parent
	// addrConn could have already been torn down and deleted from channelz
	// tracking, and thus reach the code here.
	logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", , , .idNotFound)
}

func ( *dummyEntry) ( int64) {
	// It is possible for a normal program to reach here under race condition.
	// Refer to the example described in addChild().
	logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", , .idNotFound)
}

func ( *dummyEntry) () {
	logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", .idNotFound)
}

func (*dummyEntry) () {
	// code should not reach here. deleteSelfIfReady is always called on an existing entry.
}

func (*dummyEntry) () int64 {
	return 0
}

// Entity is implemented by all channelz types.
type Entity interface {
	isEntity()
	fmt.Stringer
	id() int64
}