package pgx

import (
	
	
	
	

	
	
)

// CopyFromRows returns a CopyFromSource interface over the provided rows slice
// making it usable by *Conn.CopyFrom.
func ( [][]any) CopyFromSource {
	return &copyFromRows{rows: , idx: -1}
}

type copyFromRows struct {
	rows [][]any
	idx  int
}

func ( *copyFromRows) () bool {
	.idx++
	return .idx < len(.rows)
}

func ( *copyFromRows) () ([]any, error) {
	return .rows[.idx], nil
}

func ( *copyFromRows) () error {
	return nil
}

// CopyFromSlice returns a CopyFromSource interface over a dynamic func
// making it usable by *Conn.CopyFrom.
func ( int,  func(int) ([]any, error)) CopyFromSource {
	return &copyFromSlice{next: , idx: -1, len: }
}

type copyFromSlice struct {
	next func(int) ([]any, error)
	idx  int
	len  int
	err  error
}

func ( *copyFromSlice) () bool {
	.idx++
	return .idx < .len
}

func ( *copyFromSlice) () ([]any, error) {
	,  := .next(.idx)
	if  != nil {
		.err = 
	}
	return , 
}

func ( *copyFromSlice) () error {
	return .err
}

// CopyFromSource is the interface used by *Conn.CopyFrom as the source for copy data.
type CopyFromSource interface {
	// Next returns true if there is another row and makes the next row data
	// available to Values(). When there are no more rows available or an error
	// has occurred it returns false.
	Next() bool

	// Values returns the values for the current row.
	Values() ([]any, error)

	// Err returns any error that has been encountered by the CopyFromSource. If
	// this is not nil *Conn.CopyFrom will abort the copy.
	Err() error
}

type copyFrom struct {
	conn          *Conn
	tableName     Identifier
	columnNames   []string
	rowSrc        CopyFromSource
	readerErrChan chan error
	mode          QueryExecMode
}

func ( *copyFrom) ( context.Context) (int64, error) {
	if .conn.copyFromTracer != nil {
		 = .conn.copyFromTracer.TraceCopyFromStart(, .conn, TraceCopyFromStartData{
			TableName:   .tableName,
			ColumnNames: .columnNames,
		})
	}

	 := .tableName.Sanitize()
	 := &bytes.Buffer{}
	for ,  := range .columnNames {
		if  != 0 {
			.WriteString(", ")
		}
		.WriteString(quoteIdentifier())
	}
	 := .String()

	var  *pgconn.StatementDescription
	switch .mode {
	case QueryExecModeExec, QueryExecModeSimpleProtocol:
		// These modes don't support the binary format. Before the inclusion of the
		// QueryExecModes, Conn.Prepare was called on every COPY operation to get
		// the OIDs. These prepared statements were not cached.
		//
		// Since that's the same behavior provided by QueryExecModeDescribeExec,
		// we'll default to that mode.
		.mode = QueryExecModeDescribeExec
		fallthrough
	case QueryExecModeCacheStatement, QueryExecModeCacheDescribe, QueryExecModeDescribeExec:
		var  error
		,  = .conn.getStatementDescription(
			,
			.mode,
			fmt.Sprintf("select %s from %s", , ),
		)
		if  != nil {
			return 0, fmt.Errorf("statement description failed: %w", )
		}
	default:
		return 0, fmt.Errorf("unknown QueryExecMode: %v", .mode)
	}

	,  := io.Pipe()
	 := make(chan struct{})

	go func() {
		defer close()

		// Purposely NOT using defer w.Close(). See https://github.com/golang/go/issues/24283.
		 := .conn.wbuf

		 = append(, "PGCOPY\n\377\r\n\000"...)
		 = pgio.AppendInt32(, 0)
		 = pgio.AppendInt32(, 0)

		 := true
		for  {
			var  error
			, ,  = .buildCopyBuf(, )
			if  != nil {
				.CloseWithError()
				return
			}

			if .rowSrc.Err() != nil {
				.CloseWithError(.rowSrc.Err())
				return
			}

			if len() > 0 {
				_,  = .Write()
				if  != nil {
					.Close()
					return
				}
			}

			 = [:0]
		}

		.Close()
	}()

	,  := .conn.pgConn.CopyFrom(, , fmt.Sprintf("copy %s ( %s ) from stdin binary;", , ))

	.Close()
	<-

	if .conn.copyFromTracer != nil {
		.conn.copyFromTracer.TraceCopyFromEnd(, .conn, TraceCopyFromEndData{
			CommandTag: ,
			Err:        ,
		})
	}

	return .RowsAffected(), 
}

func ( *copyFrom) ( []byte,  *pgconn.StatementDescription) (bool, []byte, error) {
	const  = 65536 - 5 // The packet has a 5-byte header
	 := 0
	 := 0

	for .rowSrc.Next() {
		 = len()

		,  := .rowSrc.Values()
		if  != nil {
			return false, nil, 
		}
		if len() != len(.columnNames) {
			return false, nil, fmt.Errorf("expected %d values, got %d values", len(.columnNames), len())
		}

		 = pgio.AppendInt16(, int16(len(.columnNames)))
		for ,  := range  {
			,  = encodeCopyValue(.conn.typeMap, , .Fields[].DataTypeOID, )
			if  != nil {
				return false, nil, 
			}
		}

		 := len() - 
		if  >  {
			 = 
		}

		// Try not to overflow size of the buffer PgConn.CopyFrom will be reading into. If that happens then the nature of
		// io.Pipe means that the next Read will be short. This can lead to pathological send sizes such as 65531, 13, 65531
		// 13, 65531, 13, 65531, 13.
		if len() > - {
			return true, , nil
		}
	}

	return false, , nil
}

// CopyFrom uses the PostgreSQL copy protocol to perform bulk data insertion. It returns the number of rows copied and
// an error.
//
// CopyFrom requires all values use the binary format. A pgtype.Type that supports the binary format must be registered
// for the type of each column. Almost all types implemented by pgx support the binary format.
//
// Even though enum types appear to be strings they still must be registered to use with CopyFrom. This can be done with
// Conn.LoadType and pgtype.Map.RegisterType.
func ( *Conn) ( context.Context,  Identifier,  []string,  CopyFromSource) (int64, error) {
	 := &copyFrom{
		conn:          ,
		tableName:     ,
		columnNames:   ,
		rowSrc:        ,
		readerErrChan: make(chan error),
		mode:          .config.DefaultQueryExecMode,
	}

	return .run()
}