package pgx

import (
	
	
	

	
)

// QueuedQuery is a query that has been queued for execution via a Batch.
type QueuedQuery struct {
	SQL       string
	Arguments []any
	Fn        batchItemFunc
	sd        *pgconn.StatementDescription
}

type batchItemFunc func(br BatchResults) error

// Query sets fn to be called when the response to qq is received.
func ( *QueuedQuery) ( func( Rows) error) {
	.Fn = func( BatchResults) error {
		,  := .Query()
		defer .Close()

		 := ()
		if  != nil {
			return 
		}
		.Close()

		return .Err()
	}
}

// Query sets fn to be called when the response to qq is received.
func ( *QueuedQuery) ( func( Row) error) {
	.Fn = func( BatchResults) error {
		 := .QueryRow()
		return ()
	}
}

// Exec sets fn to be called when the response to qq is received.
//
// Note: for simple batch insert uses where it is not required to handle
// each potential error individually, it's sufficient to not set any callbacks,
// and just handle the return value of BatchResults.Close.
func ( *QueuedQuery) ( func( pgconn.CommandTag) error) {
	.Fn = func( BatchResults) error {
		,  := .Exec()
		if  != nil {
			return 
		}

		return ()
	}
}

// Batch queries are a way of bundling multiple queries together to avoid
// unnecessary network round trips. A Batch must only be sent once.
type Batch struct {
	QueuedQueries []*QueuedQuery
}

// Queue queues a query to batch b. query can be an SQL query or the name of a prepared statement. The only pgx option
// argument that is supported is QueryRewriter. Queries are executed using the connection's DefaultQueryExecMode.
//
// While query can contain multiple statements if the connection's DefaultQueryExecMode is QueryModeSimple, this should
// be avoided. QueuedQuery.Fn must not be set as it will only be called for the first query. That is, QueuedQuery.Query,
// QueuedQuery.QueryRow, and QueuedQuery.Exec must not be called. In addition, any error messages or tracing that
// include the current query may reference the wrong query.
func ( *Batch) ( string,  ...any) *QueuedQuery {
	 := &QueuedQuery{
		SQL:       ,
		Arguments: ,
	}
	.QueuedQueries = append(.QueuedQueries, )
	return 
}

// Len returns number of queries that have been queued so far.
func ( *Batch) () int {
	return len(.QueuedQueries)
}

type BatchResults interface {
	// Exec reads the results from the next query in the batch as if the query has been sent with Conn.Exec. Prefer
	// calling Exec on the QueuedQuery, or just calling Close.
	Exec() (pgconn.CommandTag, error)

	// Query reads the results from the next query in the batch as if the query has been sent with Conn.Query. Prefer
	// calling Query on the QueuedQuery.
	Query() (Rows, error)

	// QueryRow reads the results from the next query in the batch as if the query has been sent with Conn.QueryRow.
	// Prefer calling QueryRow on the QueuedQuery.
	QueryRow() Row

	// Close closes the batch operation. All unread results are read and any callback functions registered with
	// QueuedQuery.Query, QueuedQuery.QueryRow, or QueuedQuery.Exec will be called. If a callback function returns an
	// error or the batch encounters an error subsequent callback functions will not be called.
	//
	// For simple batch inserts inside a transaction or similar queries, it's sufficient to not set any callbacks,
	// and just handle the return value of Close.
	//
	// Close must be called before the underlying connection can be used again. Any error that occurred during a batch
	// operation may have made it impossible to resyncronize the connection with the server. In this case the underlying
	// connection will have been closed.
	//
	// Close is safe to call multiple times. If it returns an error subsequent calls will return the same error. Callback
	// functions will not be rerun.
	Close() error
}

type batchResults struct {
	ctx       context.Context
	conn      *Conn
	mrr       *pgconn.MultiResultReader
	err       error
	b         *Batch
	qqIdx     int
	closed    bool
	endTraced bool
}

// Exec reads the results from the next query in the batch as if the query has been sent with Exec.
func ( *batchResults) () (pgconn.CommandTag, error) {
	if .err != nil {
		return pgconn.CommandTag{}, .err
	}
	if .closed {
		return pgconn.CommandTag{}, fmt.Errorf("batch already closed")
	}

	, ,  := .nextQueryAndArgs()

	if !.mrr.NextResult() {
		 := .mrr.Close()
		if  == nil {
			 = errors.New("no more results in batch")
		}
		if .conn.batchTracer != nil {
			.conn.batchTracer.TraceBatchQuery(.ctx, .conn, TraceBatchQueryData{
				SQL:  ,
				Args: ,
				Err:  ,
			})
		}
		return pgconn.CommandTag{}, 
	}

	,  := .mrr.ResultReader().Close()
	if  != nil {
		.err = 
		.mrr.Close()
	}

	if .conn.batchTracer != nil {
		.conn.batchTracer.TraceBatchQuery(.ctx, .conn, TraceBatchQueryData{
			SQL:        ,
			Args:       ,
			CommandTag: ,
			Err:        .err,
		})
	}

	return , .err
}

// Query reads the results from the next query in the batch as if the query has been sent with Query.
func ( *batchResults) () (Rows, error) {
	, ,  := .nextQueryAndArgs()
	if ! {
		 = "batch query"
	}

	if .err != nil {
		return &baseRows{err: .err, closed: true}, .err
	}

	if .closed {
		 := fmt.Errorf("batch already closed")
		return &baseRows{err: , closed: true}, 
	}

	 := .conn.getRows(.ctx, , )
	.batchTracer = .conn.batchTracer

	if !.mrr.NextResult() {
		.err = .mrr.Close()
		if .err == nil {
			.err = errors.New("no more results in batch")
		}
		.closed = true

		if .conn.batchTracer != nil {
			.conn.batchTracer.TraceBatchQuery(.ctx, .conn, TraceBatchQueryData{
				SQL:  ,
				Args: ,
				Err:  .err,
			})
		}

		return , .err
	}

	.resultReader = .mrr.ResultReader()
	return , nil
}

// QueryRow reads the results from the next query in the batch as if the query has been sent with QueryRow.
func ( *batchResults) () Row {
	,  := .Query()
	return (*connRow)(.(*baseRows))
}

// Close closes the batch operation. Any error that occurred during a batch operation may have made it impossible to
// resyncronize the connection with the server. In this case the underlying connection will have been closed.
func ( *batchResults) () error {
	defer func() {
		if !.endTraced {
			if .conn != nil && .conn.batchTracer != nil {
				.conn.batchTracer.TraceBatchEnd(.ctx, .conn, TraceBatchEndData{Err: .err})
			}
			.endTraced = true
		}

		invalidateCachesOnBatchResultsError(.conn, .b, .err)
	}()

	if .err != nil {
		return .err
	}

	if .closed {
		return nil
	}

	// Read and run fn for all remaining items
	for .err == nil && !.closed && .b != nil && .qqIdx < len(.b.QueuedQueries) {
		if .b.QueuedQueries[.qqIdx].Fn != nil {
			 := .b.QueuedQueries[.qqIdx].Fn()
			if  != nil {
				.err = 
			}
		} else {
			.Exec()
		}
	}

	.closed = true

	 := .mrr.Close()
	if .err == nil {
		.err = 
	}

	return .err
}

func ( *batchResults) () error {
	return .err
}

func ( *batchResults) () ( string,  []any,  bool) {
	if .b != nil && .qqIdx < len(.b.QueuedQueries) {
		 := .b.QueuedQueries[.qqIdx]
		 = .SQL
		 = .Arguments
		 = true
		.qqIdx++
	}
	return , , 
}

type pipelineBatchResults struct {
	ctx       context.Context
	conn      *Conn
	pipeline  *pgconn.Pipeline
	lastRows  *baseRows
	err       error
	b         *Batch
	qqIdx     int
	closed    bool
	endTraced bool
}

// Exec reads the results from the next query in the batch as if the query has been sent with Exec.
func ( *pipelineBatchResults) () (pgconn.CommandTag, error) {
	if .err != nil {
		return pgconn.CommandTag{}, .err
	}
	if .closed {
		return pgconn.CommandTag{}, fmt.Errorf("batch already closed")
	}
	if .lastRows != nil && .lastRows.err != nil {
		.err = .lastRows.err
		return pgconn.CommandTag{}, .err
	}

	, ,  := .nextQueryAndArgs()
	if  != nil {
		return pgconn.CommandTag{}, 
	}

	,  := .pipeline.GetResults()
	if  != nil {
		.err = 
		return pgconn.CommandTag{}, .err
	}
	var  pgconn.CommandTag
	switch results := .(type) {
	case *pgconn.ResultReader:
		, .err = .Close()
	default:
		return pgconn.CommandTag{}, fmt.Errorf("unexpected pipeline result: %T", )
	}

	if .conn.batchTracer != nil {
		.conn.batchTracer.TraceBatchQuery(.ctx, .conn, TraceBatchQueryData{
			SQL:        ,
			Args:       ,
			CommandTag: ,
			Err:        .err,
		})
	}

	return , .err
}

// Query reads the results from the next query in the batch as if the query has been sent with Query.
func ( *pipelineBatchResults) () (Rows, error) {
	if .err != nil {
		return &baseRows{err: .err, closed: true}, .err
	}

	if .closed {
		 := fmt.Errorf("batch already closed")
		return &baseRows{err: , closed: true}, 
	}

	if .lastRows != nil && .lastRows.err != nil {
		.err = .lastRows.err
		return &baseRows{err: .err, closed: true}, .err
	}

	, ,  := .nextQueryAndArgs()
	if  != nil {
		return &baseRows{err: , closed: true}, 
	}

	 := .conn.getRows(.ctx, , )
	.batchTracer = .conn.batchTracer
	.lastRows = 

	,  := .pipeline.GetResults()
	if  != nil {
		.err = 
		.err = 
		.closed = true

		if .conn.batchTracer != nil {
			.conn.batchTracer.TraceBatchQuery(.ctx, .conn, TraceBatchQueryData{
				SQL:  ,
				Args: ,
				Err:  ,
			})
		}
	} else {
		switch results := .(type) {
		case *pgconn.ResultReader:
			.resultReader = 
		default:
			 = fmt.Errorf("unexpected pipeline result: %T", )
			.err = 
			.err = 
			.closed = true
		}
	}

	return , .err
}

// QueryRow reads the results from the next query in the batch as if the query has been sent with QueryRow.
func ( *pipelineBatchResults) () Row {
	,  := .Query()
	return (*connRow)(.(*baseRows))
}

// Close closes the batch operation. Any error that occurred during a batch operation may have made it impossible to
// resyncronize the connection with the server. In this case the underlying connection will have been closed.
func ( *pipelineBatchResults) () error {
	defer func() {
		if !.endTraced {
			if .conn.batchTracer != nil {
				.conn.batchTracer.TraceBatchEnd(.ctx, .conn, TraceBatchEndData{Err: .err})
			}
			.endTraced = true
		}

		invalidateCachesOnBatchResultsError(.conn, .b, .err)
	}()

	if .err == nil && .lastRows != nil && .lastRows.err != nil {
		.err = .lastRows.err
	}

	if .closed {
		return .err
	}

	// Read and run fn for all remaining items
	for .err == nil && !.closed && .b != nil && .qqIdx < len(.b.QueuedQueries) {
		if .b.QueuedQueries[.qqIdx].Fn != nil {
			 := .b.QueuedQueries[.qqIdx].Fn()
			if  != nil {
				.err = 
			}
		} else {
			.Exec()
		}
	}

	.closed = true

	 := .pipeline.Close()
	if .err == nil {
		.err = 
	}

	return .err
}

func ( *pipelineBatchResults) () error {
	return .err
}

func ( *pipelineBatchResults) () ( string,  []any,  error) {
	if .b == nil {
		return "", nil, errors.New("no reference to batch")
	}

	if .qqIdx >= len(.b.QueuedQueries) {
		return "", nil, errors.New("no more results in batch")
	}

	 := .b.QueuedQueries[.qqIdx]
	.qqIdx++
	return .SQL, .Arguments, nil
}

type emptyBatchResults struct {
	conn   *Conn
	closed bool
}

// Exec reads the results from the next query in the batch as if the query has been sent with Exec.
func ( *emptyBatchResults) () (pgconn.CommandTag, error) {
	if .closed {
		return pgconn.CommandTag{}, fmt.Errorf("batch already closed")
	}
	return pgconn.CommandTag{}, errors.New("no more results in batch")
}

// Query reads the results from the next query in the batch as if the query has been sent with Query.
func ( *emptyBatchResults) () (Rows, error) {
	if .closed {
		 := fmt.Errorf("batch already closed")
		return &baseRows{err: , closed: true}, 
	}

	 := .conn.getRows(context.Background(), "", nil)
	.err = errors.New("no more results in batch")
	.closed = true
	return , .err
}

// QueryRow reads the results from the next query in the batch as if the query has been sent with QueryRow.
func ( *emptyBatchResults) () Row {
	,  := .Query()
	return (*connRow)(.(*baseRows))
}

// Close closes the batch operation. Any error that occurred during a batch operation may have made it impossible to
// resyncronize the connection with the server. In this case the underlying connection will have been closed.
func ( *emptyBatchResults) () error {
	.closed = true
	return nil
}

// invalidates statement and description caches on batch results error
func ( *Conn,  *Batch,  error) {
	if  != nil &&  != nil &&  != nil {
		if  := .statementCache;  != nil {
			for ,  := range .QueuedQueries {
				.Invalidate(.SQL)
			}
		}

		if  := .descriptionCache;  != nil {
			for ,  := range .QueuedQueries {
				.Invalidate(.SQL)
			}
		}
	}
}

// ErrPreprocessingBatch occurs when an error is encountered while preprocessing a batch.
// The two preprocessing steps are "prepare" (server-side SQL parse/plan) and
// "build" (client-side argument encoding).
type ErrPreprocessingBatch struct {
	step string // "prepare" or "build"
	sql  string
	err  error
}

func (,  string,  error) ErrPreprocessingBatch {
	return ErrPreprocessingBatch{step: , sql: , err: }
}

func ( ErrPreprocessingBatch) () string {
	// intentionally not including the SQL query in the error message
	// to avoid leaking potentially sensitive information into logs.
	// If the user wants the SQL, they can call SQL().
	return fmt.Sprintf("error preprocessing batch (%s): %v", .step, .err)
}

func ( ErrPreprocessingBatch) () error {
	return .err
}

func ( ErrPreprocessingBatch) () string {
	return .sql
}