package pgx

import (
	
	
	
	
	
	
	

	
	
)

// Rows is the result set returned from *Conn.Query. Rows must be closed before
// the *Conn can be used again. Rows are closed by explicitly calling Close(),
// calling Next() until it returns false, or when a fatal error occurs.
//
// Once a Rows is closed the only methods that may be called are Close(), Err(),
// and CommandTag().
//
// Rows is an interface instead of a struct to allow tests to mock Query. However,
// adding a method to an interface is technically a breaking change. Because of this
// the Rows interface is partially excluded from semantic version requirements.
// Methods will not be removed or changed, but new methods may be added.
type Rows interface {
	// Close closes the rows, making the connection ready for use again. It is safe
	// to call Close after rows is already closed.
	Close()

	// Err returns any error that occurred while executing a query or reading its results. Err must be called after the
	// Rows is closed (either by calling Close or by Next returning false) to check if the query was successful. If it is
	// called before the Rows is closed it may return nil even if the query failed on the server.
	Err() error

	// CommandTag returns the command tag from this query. It is only available after Rows is closed.
	CommandTag() pgconn.CommandTag

	// FieldDescriptions returns the field descriptions of the columns. It may return nil. In particular this can occur
	// when there was an error executing the query.
	FieldDescriptions() []pgconn.FieldDescription

	// Next prepares the next row for reading. It returns true if there is another row and false if no more rows are
	// available or a fatal error has occurred. It automatically closes rows upon returning false (whether due to all rows
	// having been read or due to an error).
	//
	// Callers should check rows.Err() after rows.Next() returns false to detect whether result-set reading ended
	// prematurely due to an error. See Conn.Query for details.
	//
	// For simpler error handling, consider using the higher-level pgx v5 CollectRows() and ForEachRow() helpers instead.
	Next() bool

	// Scan reads the values from the current row into dest values positionally. dest can include pointers to core types,
	// values implementing the Scanner interface, and nil. nil will skip the value entirely. It is an error to call Scan
	// without first calling Next() and checking that it returned true. Rows is automatically closed upon error.
	Scan(dest ...any) error

	// Values returns the decoded row values. As with Scan(), it is an error to
	// call Values without first calling Next() and checking that it returned
	// true.
	Values() ([]any, error)

	// RawValues returns the unparsed bytes of the row values. The returned data is only valid until the next Next
	// call or the Rows is closed.
	RawValues() [][]byte

	// Conn returns the underlying *Conn on which the query was executed. This may return nil if Rows did not come from a
	// *Conn (e.g. if it was created by RowsFromResultReader)
	Conn() *Conn
}

// Row is a convenience wrapper over Rows that is returned by QueryRow.
//
// Row is an interface instead of a struct to allow tests to mock QueryRow. However,
// adding a method to an interface is technically a breaking change. Because of this
// the Row interface is partially excluded from semantic version requirements.
// Methods will not be removed or changed, but new methods may be added.
type Row interface {
	// Scan works the same as Rows. with the following exceptions. If no
	// rows were found it returns ErrNoRows. If multiple rows are returned it
	// ignores all but the first.
	Scan(dest ...any) error
}

// RowScanner scans an entire row at a time into the RowScanner.
type RowScanner interface {
	// ScanRows scans the row.
	ScanRow(rows Rows) error
}

// connRow implements the Row interface for Conn.QueryRow.
type connRow baseRows

func ( *connRow) ( ...any) ( error) {
	 := (*baseRows)()

	if .Err() != nil {
		return .Err()
	}

	for ,  := range  {
		if ,  := .(*pgtype.DriverBytes);  {
			.Close()
			return fmt.Errorf("cannot scan into *pgtype.DriverBytes from QueryRow")
		}
	}

	if !.Next() {
		if .Err() == nil {
			return ErrNoRows
		}
		return .Err()
	}

	.Scan(...)
	.Close()
	return .Err()
}

// baseRows implements the Rows interface for Conn.Query.
type baseRows struct {
	typeMap      *pgtype.Map
	resultReader *pgconn.ResultReader

	values [][]byte

	commandTag pgconn.CommandTag
	err        error
	closed     bool

	scanPlans []pgtype.ScanPlan
	scanTypes []reflect.Type

	conn              *Conn
	multiResultReader *pgconn.MultiResultReader

	queryTracer QueryTracer
	batchTracer BatchTracer
	ctx         context.Context
	startTime   time.Time
	sql         string
	args        []any
	rowCount    int
}

func ( *baseRows) () []pgconn.FieldDescription {
	return .resultReader.FieldDescriptions()
}

func ( *baseRows) () {
	if .closed {
		return
	}

	.closed = true

	if .resultReader != nil {
		var  error
		.commandTag,  = .resultReader.Close()
		if .err == nil {
			.err = 
		}
	}

	if .multiResultReader != nil {
		 := .multiResultReader.Close()
		if .err == nil {
			.err = 
		}
	}

	if .err != nil && .conn != nil && .sql != "" {
		if  := .conn.statementCache;  != nil {
			.Invalidate(.sql)
		}

		if  := .conn.descriptionCache;  != nil {
			.Invalidate(.sql)
		}
	}

	if .batchTracer != nil {
		.batchTracer.TraceBatchQuery(.ctx, .conn, TraceBatchQueryData{SQL: .sql, Args: .args, CommandTag: .commandTag, Err: .err})
	} else if .queryTracer != nil {
		.queryTracer.TraceQueryEnd(.ctx, .conn, TraceQueryEndData{.commandTag, .err})
	}

	// Zero references to other memory allocations. This allows them to be GC'd even when the Rows still referenced. In
	// particular, when using pgxpool GC could be delayed as pgxpool.poolRows are allocated in large slices.
	//
	// https://github.com/jackc/pgx/pull/2269
	.values = nil
	.scanPlans = nil
	.scanTypes = nil
	.ctx = nil
	.sql = ""
	.args = nil
}

func ( *baseRows) () pgconn.CommandTag {
	return .commandTag
}

func ( *baseRows) () error {
	return .err
}

// fatal signals an error occurred after the query was sent to the server. It
// closes the rows automatically.
func ( *baseRows) ( error) {
	if .err != nil {
		return
	}

	.err = 
	.Close()
}

func ( *baseRows) () bool {
	if .closed {
		return false
	}

	if .resultReader.NextRow() {
		.rowCount++
		.values = .resultReader.Values()
		return true
	} else {
		.Close()
		return false
	}
}

func ( *baseRows) ( ...any) error {
	 := .typeMap
	 := .FieldDescriptions()
	 := .values

	if len() != len() {
		 := fmt.Errorf("number of field descriptions must equal number of values, got %d and %d", len(), len())
		.fatal()
		return 
	}

	if len() == 1 {
		if ,  := [0].(RowScanner);  {
			 := .ScanRow()
			if  != nil {
				.fatal()
			}
			return 
		}
	}

	if len() != len() {
		 := fmt.Errorf("number of field descriptions must equal number of destinations, got %d and %d", len(), len())
		.fatal()
		return 
	}

	if .scanPlans == nil {
		.scanPlans = make([]pgtype.ScanPlan, len())
		.scanTypes = make([]reflect.Type, len())
		for  := range  {
			.scanPlans[] = .PlanScan([].DataTypeOID, [].Format, [])
			.scanTypes[] = reflect.TypeOf([])
		}
	}

	for ,  := range  {
		if  == nil {
			continue
		}

		if .scanTypes[] != reflect.TypeOf() {
			.scanPlans[] = .PlanScan([].DataTypeOID, [].Format, [])
			.scanTypes[] = reflect.TypeOf([])
		}

		 := .scanPlans[].Scan([], )
		if  != nil {
			 = ScanArgError{ColumnIndex: , FieldName: [].Name, Err: }
			.fatal()
			return 
		}
	}

	return nil
}

func ( *baseRows) () ([]any, error) {
	if .closed {
		return nil, errors.New("rows is closed")
	}

	 := make([]any, 0, len(.FieldDescriptions()))

	for  := range .FieldDescriptions() {
		 := .values[]
		 := &.FieldDescriptions()[]

		if  == nil {
			 = append(, nil)
			continue
		}

		if ,  := .typeMap.TypeForOID(.DataTypeOID);  {
			,  := .Codec.DecodeValue(.typeMap, .DataTypeOID, .Format, )
			if  != nil {
				.fatal()
			}
			 = append(, )
		} else {
			switch .Format {
			case TextFormatCode:
				 = append(, string())
			case BinaryFormatCode:
				 := make([]byte, len())
				copy(, )
				 = append(, )
			default:
				.fatal(errors.New("unknown format code"))
			}
		}

		if .Err() != nil {
			return nil, .Err()
		}
	}

	return , .Err()
}

func ( *baseRows) () [][]byte {
	return .values
}

func ( *baseRows) () *Conn {
	return .conn
}

type ScanArgError struct {
	ColumnIndex int
	FieldName   string
	Err         error
}

func ( ScanArgError) () string {
	if .FieldName == "?column?" { // Don't include the fieldname if it's unknown
		return fmt.Sprintf("can't scan into dest[%d]: %v", .ColumnIndex, .Err)
	}

	return fmt.Sprintf("can't scan into dest[%d] (col: %s): %v", .ColumnIndex, .FieldName, .Err)
}

func ( ScanArgError) () error {
	return .Err
}

// ScanRow decodes raw row data into dest. It can be used to scan rows read from the lower level pgconn interface.
//
// typeMap - OID to Go type mapping.
// fieldDescriptions - OID and format of values
// values - the raw data as returned from the PostgreSQL server
// dest - the destination that values will be decoded into
func ( *pgtype.Map,  []pgconn.FieldDescription,  [][]byte,  ...any) error {
	if len() != len() {
		return fmt.Errorf("number of field descriptions must equal number of values, got %d and %d", len(), len())
	}
	if len() != len() {
		return fmt.Errorf("number of field descriptions must equal number of destinations, got %d and %d", len(), len())
	}

	for ,  := range  {
		if  == nil {
			continue
		}

		 := .Scan([].DataTypeOID, [].Format, [], )
		if  != nil {
			return ScanArgError{ColumnIndex: , FieldName: [].Name, Err: }
		}
	}

	return nil
}

// RowsFromResultReader returns a Rows that will read from values resultReader and decode with typeMap. It can be used
// to read from the lower level pgconn interface.
func ( *pgtype.Map,  *pgconn.ResultReader) Rows {
	return &baseRows{
		typeMap:      ,
		resultReader: ,
	}
}

// ForEachRow iterates through rows. For each row it scans into the elements of scans and calls fn. If any row
// fails to scan or fn returns an error the query will be aborted and the error will be returned. Rows will be closed
// when ForEachRow returns.
func ( Rows,  []any,  func() error) (pgconn.CommandTag, error) {
	defer .Close()

	for .Next() {
		 := .Scan(...)
		if  != nil {
			return pgconn.CommandTag{}, 
		}

		 = ()
		if  != nil {
			return pgconn.CommandTag{}, 
		}
	}

	if  := .Err();  != nil {
		return pgconn.CommandTag{}, 
	}

	return .CommandTag(), nil
}

// CollectableRow is the subset of Rows methods that a RowToFunc is allowed to call.
type CollectableRow interface {
	FieldDescriptions() []pgconn.FieldDescription
	Scan(dest ...any) error
	Values() ([]any, error)
	RawValues() [][]byte
}

// RowToFunc is a function that scans or otherwise converts row to a T.
type RowToFunc[ any] func(row CollectableRow) (, error)

// AppendRows iterates through rows, calling fn for each row, and appending the results into a slice of T.
//
// This function closes the rows automatically on return.
func [ any,  ~[]]( ,  Rows,  RowToFunc[]) (, error) {
	defer .Close()

	for .Next() {
		,  := ()
		if  != nil {
			return nil, 
		}
		 = append(, )
	}

	if  := .Err();  != nil {
		return nil, 
	}

	return , nil
}

// CollectRows iterates through rows, calling fn for each row, and collecting the results into a slice of T.
//
// This function closes the rows automatically on return.
func [ any]( Rows,  RowToFunc[]) ([], error) {
	return AppendRows([]{}, , )
}

// CollectOneRow calls fn for the first row in rows and returns the result. If no rows are found returns an error where errors.Is(ErrNoRows) is true.
// CollectOneRow is to CollectRows as QueryRow is to Query.
//
// This function closes the rows automatically on return.
func [ any]( Rows,  RowToFunc[]) (, error) {
	defer .Close()

	var  
	var  error

	if !.Next() {
		if  = .Err();  != nil {
			return , 
		}
		return , ErrNoRows
	}

	,  = ()
	if  != nil {
		return , 
	}

	// The defer rows.Close() won't have executed yet. If the query returned more than one row, rows would still be open.
	// rows.Close() must be called before rows.Err() so we explicitly call it here.
	.Close()
	return , .Err()
}

// CollectExactlyOneRow calls fn for the first row in rows and returns the result.
//   - If no rows are found returns an error where errors.Is(ErrNoRows) is true.
//   - If more than 1 row is found returns an error where errors.Is(ErrTooManyRows) is true.
//
// This function closes the rows automatically on return.
func [ any]( Rows,  RowToFunc[]) (, error) {
	defer .Close()

	var (
		   error
		 
	)

	if !.Next() {
		if  = .Err();  != nil {
			return , 
		}

		return , ErrNoRows
	}

	,  = ()
	if  != nil {
		return , 
	}

	if .Next() {
		var  

		return , ErrTooManyRows
	}

	return , .Err()
}

// RowTo returns a T scanned from row.
func [ any]( CollectableRow) (, error) {
	var  
	 := .Scan(&)
	return , 
}

// RowToAddrOf returns the address of a T scanned from row.
func [ any]( CollectableRow) (*, error) {
	var  
	 := .Scan(&)
	return &, 
}

// RowToMap returns a map scanned from row.
func ( CollectableRow) (map[string]any, error) {
	var  map[string]any
	 := .Scan((*mapRowScanner)(&))
	return , 
}

type mapRowScanner map[string]any

func ( *mapRowScanner) ( Rows) error {
	,  := .Values()
	if  != nil {
		return 
	}

	* = make(mapRowScanner, len())

	for  := range  {
		(*)[string(.FieldDescriptions()[].Name)] = []
	}

	return nil
}

// RowToStructByPos returns a T scanned from row. T must be a struct. T must have the same number of public fields as row
// has fields. The row and T fields will be matched by position. If the "db" struct tag is "-" then the field will be
// ignored.
func [ any]( CollectableRow) (, error) {
	var  
	 := (&positionalStructRowScanner{ptrToStruct: &}).ScanRow()
	return , 
}

// RowToAddrOfStructByPos returns the address of a T scanned from row. T must be a struct. T must have the same number a
// public fields as row has fields. The row and T fields will be matched by position. If the "db" struct tag is "-" then
// the field will be ignored.
func [ any]( CollectableRow) (*, error) {
	var  
	 := (&positionalStructRowScanner{ptrToStruct: &}).ScanRow()
	return &, 
}

type positionalStructRowScanner struct {
	ptrToStruct any
}

func ( *positionalStructRowScanner) ( CollectableRow) error {
	 := reflect.TypeOf(.ptrToStruct).Elem()
	 := lookupStructFields()
	if len(.RawValues()) > len() {
		return fmt.Errorf(
			"got %d values, but dst struct has only %d fields",
			len(.RawValues()),
			len(),
		)
	}
	 := setupStructScanTargets(.ptrToStruct, )
	return .Scan(...)
}

// Map from reflect.Type -> []structRowField
var positionalStructFieldMap sync.Map

func ( reflect.Type) []structRowField {
	if ,  := positionalStructFieldMap.Load();  {
		return .([]structRowField)
	}

	 := make([]int, 0, 1)
	 := computeStructFields(, make([]structRowField, 0, .NumField()), &)
	,  := positionalStructFieldMap.LoadOrStore(, )
	return .([]structRowField)
}

func (
	 reflect.Type,
	 []structRowField,
	 *[]int,
) []structRowField {
	 := len(*)
	* = append(*, 0)
	for  := 0;  < .NumField(); ++ {
		 := .Field()
		(*)[] = 
		// Handle anonymous struct embedding, but do not try to handle embedded pointers.
		if .Anonymous && .Type.Kind() == reflect.Struct {
			 = (.Type, , )
		} else if .PkgPath == "" {
			,  := .Tag.Lookup(structTagKey)
			if  == "-" {
				// Field is ignored, skip it.
				continue
			}
			 = append(, structRowField{
				path: append([]int(nil), *...),
			})
		}
	}
	* = (*)[:]
	return 
}

// RowToStructByName returns a T scanned from row. T must be a struct. T must have the same number of named public
// fields as row has fields. The row and T fields will be matched by name. The match is case-insensitive. The database
// column name can be overridden with a "db" struct tag. If the "db" struct tag is "-" then the field will be ignored.
func [ any]( CollectableRow) (, error) {
	var  
	 := (&namedStructRowScanner{ptrToStruct: &}).ScanRow()
	return , 
}

// RowToAddrOfStructByName returns the address of a T scanned from row. T must be a struct. T must have the same number
// of named public fields as row has fields. The row and T fields will be matched by name. The match is
// case-insensitive. The database column name can be overridden with a "db" struct tag. If the "db" struct tag is "-"
// then the field will be ignored.
func [ any]( CollectableRow) (*, error) {
	var  
	 := (&namedStructRowScanner{ptrToStruct: &}).ScanRow()
	return &, 
}

// RowToStructByNameLax returns a T scanned from row. T must be a struct. T must have greater than or equal number of named public
// fields as row has fields. The row and T fields will be matched by name. The match is case-insensitive. The database
// column name can be overridden with a "db" struct tag. If the "db" struct tag is "-" then the field will be ignored.
func [ any]( CollectableRow) (, error) {
	var  
	 := (&namedStructRowScanner{ptrToStruct: &, lax: true}).ScanRow()
	return , 
}

// RowToAddrOfStructByNameLax returns the address of a T scanned from row. T must be a struct. T must have greater than or
// equal number of named public fields as row has fields. The row and T fields will be matched by name. The match is
// case-insensitive. The database column name can be overridden with a "db" struct tag. If the "db" struct tag is "-"
// then the field will be ignored.
func [ any]( CollectableRow) (*, error) {
	var  
	 := (&namedStructRowScanner{ptrToStruct: &, lax: true}).ScanRow()
	return &, 
}

type namedStructRowScanner struct {
	ptrToStruct any
	lax         bool
}

func ( *namedStructRowScanner) ( CollectableRow) error {
	 := reflect.TypeOf(.ptrToStruct).Elem()
	 := .FieldDescriptions()
	,  := lookupNamedStructFields(, )
	if  != nil {
		return 
	}
	if !.lax && .missingField != "" {
		return fmt.Errorf("cannot find field %s in returned row", .missingField)
	}
	 := .fields
	 := setupStructScanTargets(.ptrToStruct, )
	return .Scan(...)
}

// Map from namedStructFieldMap -> *namedStructFields
var namedStructFieldMap sync.Map

type namedStructFieldsKey struct {
	t        reflect.Type
	colNames string
}

type namedStructFields struct {
	fields []structRowField
	// missingField is the first field from the struct without a corresponding row field.
	// This is used to construct the correct error message for non-lax queries.
	missingField string
}

func (
	 reflect.Type,
	 []pgconn.FieldDescription,
) (*namedStructFields, error) {
	 := namedStructFieldsKey{
		t:        ,
		colNames: joinFieldNames(),
	}
	if ,  := namedStructFieldMap.Load();  {
		return .(*namedStructFields), nil
	}

	// We could probably do two-levels of caching, where we compute the key -> fields mapping
	// for a type only once, cache it by type, then use that to compute the column -> fields
	// mapping for a given set of columns.
	 := make([]int, 0, 1)
	,  := computeNamedStructFields(
		,
		,
		make([]structRowField, len()),
		&,
	)
	for ,  := range  {
		if .path == nil {
			return nil, fmt.Errorf(
				"struct doesn't have corresponding row field %s",
				[].Name,
			)
		}
	}

	,  := namedStructFieldMap.LoadOrStore(
		,
		&namedStructFields{fields: , missingField: },
	)
	return .(*namedStructFields), nil
}

func ( []pgconn.FieldDescription) string {
	switch len() {
	case 0:
		return ""
	case 1:
		return [0].Name
	}

	 := len() - 1 // Space for separator bytes.
	for ,  := range  {
		 += len(.Name)
	}
	var  strings.Builder
	.Grow()
	.WriteString([0].Name)
	for ,  := range [1:] {
		.WriteByte(0) // Join with NUL byte as it's (presumably) not a valid column character.
		.WriteString(.Name)
	}
	return .String()
}

func (
	 []pgconn.FieldDescription,
	 reflect.Type,
	 []structRowField,
	 *[]int,
) ([]structRowField, string) {
	var  string
	 := len(*)
	* = append(*, 0)
	for  := 0;  < .NumField(); ++ {
		 := .Field()
		(*)[] = 
		if .PkgPath != "" && !.Anonymous {
			// Field is unexported, skip it.
			continue
		}
		// Handle anonymous struct embedding, but do not try to handle embedded pointers.
		if .Anonymous && .Type.Kind() == reflect.Struct {
			var  string
			,  = (
				,
				.Type,
				,
				,
			)
			if  == "" {
				 = 
			}
		} else {
			,  := .Tag.Lookup(structTagKey)
			if  {
				, _, _ = strings.Cut(, ",")
			}
			if  == "-" {
				// Field is ignored, skip it.
				continue
			}
			 := 
			if ! {
				 = .Name
			}
			 := fieldPosByName(, , !)
			if  == -1 {
				if  == "" {
					 = 
				}
				continue
			}
			[] = structRowField{
				path: append([]int(nil), *...),
			}
		}
	}
	* = (*)[:]

	return , 
}

const structTagKey = "db"

func ( []pgconn.FieldDescription,  string,  bool) ( int) {
	 = -1

	if  {
		 = strings.ReplaceAll(, "_", "")
	}
	for ,  := range  {
		if  {
			if strings.EqualFold(strings.ReplaceAll(.Name, "_", ""), ) {
				return 
			}
		} else {
			if .Name ==  {
				return 
			}
		}
	}
	return 
}

// structRowField describes a field of a struct.
//
// TODO: It would be a bit more efficient to track the path using the pointer
// offset within the (outermost) struct and use unsafe.Pointer arithmetic to
// construct references when scanning rows. However, it's not clear it's worth
// using unsafe for this.
type structRowField struct {
	path []int
}

func ( any,  []structRowField) []any {
	 := make([]any, len())
	 := reflect.ValueOf().Elem()
	for ,  := range  {
		[] = .FieldByIndex(.path).Addr().Interface()
	}
	return 
}