package pgx

import (
	
	
	

	
)

// QueuedQuery is a query that has been queued for execution via a Batch.
type QueuedQuery struct {
	query     string
	arguments []any
	fn        batchItemFunc
	sd        *pgconn.StatementDescription
}

type batchItemFunc func(br BatchResults) error

// Query sets fn to be called when the response to qq is received.
func ( *QueuedQuery) ( func( Rows) error) {
	.fn = func( BatchResults) error {
		,  := .Query()
		defer .Close()

		 := ()
		if  != nil {
			return 
		}
		.Close()

		return .Err()
	}
}

// Query sets fn to be called when the response to qq is received.
func ( *QueuedQuery) ( func( Row) error) {
	.fn = func( BatchResults) error {
		 := .QueryRow()
		return ()
	}
}

// Exec sets fn to be called when the response to qq is received.
func ( *QueuedQuery) ( func( pgconn.CommandTag) error) {
	.fn = func( BatchResults) error {
		,  := .Exec()
		if  != nil {
			return 
		}

		return ()
	}
}

// Batch queries are a way of bundling multiple queries together to avoid
// unnecessary network round trips. A Batch must only be sent once.
type Batch struct {
	queuedQueries []*QueuedQuery
}

// Queue queues a query to batch b. query can be an SQL query or the name of a prepared statement.
func ( *Batch) ( string,  ...any) *QueuedQuery {
	 := &QueuedQuery{
		query:     ,
		arguments: ,
	}
	.queuedQueries = append(.queuedQueries, )
	return 
}

// Len returns number of queries that have been queued so far.
func ( *Batch) () int {
	return len(.queuedQueries)
}

type BatchResults interface {
	// Exec reads the results from the next query in the batch as if the query has been sent with Conn.Exec. Prefer
	// calling Exec on the QueuedQuery.
	Exec() (pgconn.CommandTag, error)

	// Query reads the results from the next query in the batch as if the query has been sent with Conn.Query. Prefer
	// calling Query on the QueuedQuery.
	Query() (Rows, error)

	// QueryRow reads the results from the next query in the batch as if the query has been sent with Conn.QueryRow.
	// Prefer calling QueryRow on the QueuedQuery.
	QueryRow() Row

	// Close closes the batch operation. All unread results are read and any callback functions registered with
	// QueuedQuery.Query, QueuedQuery.QueryRow, or QueuedQuery.Exec will be called. If a callback function returns an
	// error or the batch encounters an error subsequent callback functions will not be called.
	//
	// Close must be called before the underlying connection can be used again. Any error that occurred during a batch
	// operation may have made it impossible to resyncronize the connection with the server. In this case the underlying
	// connection will have been closed.
	//
	// Close is safe to call multiple times. If it returns an error subsequent calls will return the same error. Callback
	// functions will not be rerun.
	Close() error
}

type batchResults struct {
	ctx       context.Context
	conn      *Conn
	mrr       *pgconn.MultiResultReader
	err       error
	b         *Batch
	qqIdx     int
	closed    bool
	endTraced bool
}

// Exec reads the results from the next query in the batch as if the query has been sent with Exec.
func ( *batchResults) () (pgconn.CommandTag, error) {
	if .err != nil {
		return pgconn.CommandTag{}, .err
	}
	if .closed {
		return pgconn.CommandTag{}, fmt.Errorf("batch already closed")
	}

	, ,  := .nextQueryAndArgs()

	if !.mrr.NextResult() {
		 := .mrr.Close()
		if  == nil {
			 = errors.New("no result")
		}
		if .conn.batchTracer != nil {
			.conn.batchTracer.TraceBatchQuery(.ctx, .conn, TraceBatchQueryData{
				SQL:  ,
				Args: ,
				Err:  ,
			})
		}
		return pgconn.CommandTag{}, 
	}

	,  := .mrr.ResultReader().Close()
	if  != nil {
		.err = 
		.mrr.Close()
	}

	if .conn.batchTracer != nil {
		.conn.batchTracer.TraceBatchQuery(.ctx, .conn, TraceBatchQueryData{
			SQL:        ,
			Args:       ,
			CommandTag: ,
			Err:        .err,
		})
	}

	return , .err
}

// Query reads the results from the next query in the batch as if the query has been sent with Query.
func ( *batchResults) () (Rows, error) {
	, ,  := .nextQueryAndArgs()
	if ! {
		 = "batch query"
	}

	if .err != nil {
		return &baseRows{err: .err, closed: true}, .err
	}

	if .closed {
		 := fmt.Errorf("batch already closed")
		return &baseRows{err: , closed: true}, 
	}

	 := .conn.getRows(.ctx, , )
	.batchTracer = .conn.batchTracer

	if !.mrr.NextResult() {
		.err = .mrr.Close()
		if .err == nil {
			.err = errors.New("no result")
		}
		.closed = true

		if .conn.batchTracer != nil {
			.conn.batchTracer.TraceBatchQuery(.ctx, .conn, TraceBatchQueryData{
				SQL:  ,
				Args: ,
				Err:  .err,
			})
		}

		return , .err
	}

	.resultReader = .mrr.ResultReader()
	return , nil
}

// QueryRow reads the results from the next query in the batch as if the query has been sent with QueryRow.
func ( *batchResults) () Row {
	,  := .Query()
	return (*connRow)(.(*baseRows))

}

// Close closes the batch operation. Any error that occurred during a batch operation may have made it impossible to
// resyncronize the connection with the server. In this case the underlying connection will have been closed.
func ( *batchResults) () error {
	defer func() {
		if !.endTraced {
			if .conn != nil && .conn.batchTracer != nil {
				.conn.batchTracer.TraceBatchEnd(.ctx, .conn, TraceBatchEndData{Err: .err})
			}
			.endTraced = true
		}
	}()

	if .err != nil {
		return .err
	}

	if .closed {
		return nil
	}

	// Read and run fn for all remaining items
	for .err == nil && !.closed && .b != nil && .qqIdx < len(.b.queuedQueries) {
		if .b.queuedQueries[.qqIdx].fn != nil {
			 := .b.queuedQueries[.qqIdx].fn()
			if  != nil {
				.err = 
			}
		} else {
			.Exec()
		}
	}

	.closed = true

	 := .mrr.Close()
	if .err == nil {
		.err = 
	}

	return .err
}

func ( *batchResults) () error {
	return .err
}

func ( *batchResults) () ( string,  []any,  bool) {
	if .b != nil && .qqIdx < len(.b.queuedQueries) {
		 := .b.queuedQueries[.qqIdx]
		 = .query
		 = .arguments
		 = true
		.qqIdx++
	}
	return
}

type pipelineBatchResults struct {
	ctx       context.Context
	conn      *Conn
	pipeline  *pgconn.Pipeline
	lastRows  *baseRows
	err       error
	b         *Batch
	qqIdx     int
	closed    bool
	endTraced bool
}

// Exec reads the results from the next query in the batch as if the query has been sent with Exec.
func ( *pipelineBatchResults) () (pgconn.CommandTag, error) {
	if .err != nil {
		return pgconn.CommandTag{}, .err
	}
	if .closed {
		return pgconn.CommandTag{}, fmt.Errorf("batch already closed")
	}
	if .lastRows != nil && .lastRows.err != nil {
		return pgconn.CommandTag{}, .err
	}

	, ,  := .nextQueryAndArgs()

	,  := .pipeline.GetResults()
	if  != nil {
		.err = 
		return pgconn.CommandTag{}, .err
	}
	var  pgconn.CommandTag
	switch results := .(type) {
	case *pgconn.ResultReader:
		, .err = .Close()
	default:
		return pgconn.CommandTag{}, fmt.Errorf("unexpected pipeline result: %T", )
	}

	if .conn.batchTracer != nil {
		.conn.batchTracer.TraceBatchQuery(.ctx, .conn, TraceBatchQueryData{
			SQL:        ,
			Args:       ,
			CommandTag: ,
			Err:        .err,
		})
	}

	return , .err
}

// Query reads the results from the next query in the batch as if the query has been sent with Query.
func ( *pipelineBatchResults) () (Rows, error) {
	if .err != nil {
		return &baseRows{err: .err, closed: true}, .err
	}

	if .closed {
		 := fmt.Errorf("batch already closed")
		return &baseRows{err: , closed: true}, 
	}

	if .lastRows != nil && .lastRows.err != nil {
		.err = .lastRows.err
		return &baseRows{err: .err, closed: true}, .err
	}

	, ,  := .nextQueryAndArgs()
	if ! {
		 = "batch query"
	}

	 := .conn.getRows(.ctx, , )
	.batchTracer = .conn.batchTracer
	.lastRows = 

	,  := .pipeline.GetResults()
	if  != nil {
		.err = 
		.err = 
		.closed = true

		if .conn.batchTracer != nil {
			.conn.batchTracer.TraceBatchQuery(.ctx, .conn, TraceBatchQueryData{
				SQL:  ,
				Args: ,
				Err:  ,
			})
		}
	} else {
		switch results := .(type) {
		case *pgconn.ResultReader:
			.resultReader = 
		default:
			 = fmt.Errorf("unexpected pipeline result: %T", )
			.err = 
			.err = 
			.closed = true
		}
	}

	return , .err
}

// QueryRow reads the results from the next query in the batch as if the query has been sent with QueryRow.
func ( *pipelineBatchResults) () Row {
	,  := .Query()
	return (*connRow)(.(*baseRows))

}

// Close closes the batch operation. Any error that occurred during a batch operation may have made it impossible to
// resyncronize the connection with the server. In this case the underlying connection will have been closed.
func ( *pipelineBatchResults) () error {
	defer func() {
		if !.endTraced {
			if .conn.batchTracer != nil {
				.conn.batchTracer.TraceBatchEnd(.ctx, .conn, TraceBatchEndData{Err: .err})
			}
			.endTraced = true
		}
	}()

	if .err == nil && .lastRows != nil && .lastRows.err != nil {
		.err = .lastRows.err
		return .err
	}

	if .closed {
		return .err
	}

	// Read and run fn for all remaining items
	for .err == nil && !.closed && .b != nil && .qqIdx < len(.b.queuedQueries) {
		if .b.queuedQueries[.qqIdx].fn != nil {
			 := .b.queuedQueries[.qqIdx].fn()
			if  != nil {
				.err = 
			}
		} else {
			.Exec()
		}
	}

	.closed = true

	 := .pipeline.Close()
	if .err == nil {
		.err = 
	}

	return .err
}

func ( *pipelineBatchResults) () error {
	return .err
}

func ( *pipelineBatchResults) () ( string,  []any,  bool) {
	if .b != nil && .qqIdx < len(.b.queuedQueries) {
		 := .b.queuedQueries[.qqIdx]
		 = .query
		 = .arguments
		 = true
		.qqIdx++
	}
	return
}