package fasthttp

import (
	
	
	
	
	
	
)

// workerPool serves incoming connections via a pool of workers
// in FILO order, i.e. the most recently stopped worker will serve the next
// incoming connection.
//
// Such a scheme keeps CPU caches hot (in theory).
type workerPool struct {
	// Function for serving server connections.
	// It must leave c unclosed.
	WorkerFunc ServeHandler

	MaxWorkersCount int

	LogAllErrors bool

	MaxIdleWorkerDuration time.Duration

	Logger Logger

	lock         sync.Mutex
	workersCount int
	mustStop     bool

	ready []*workerChan

	stopCh chan struct{}

	workerChanPool sync.Pool

	connState func(net.Conn, ConnState)
}

type workerChan struct {
	lastUseTime time.Time
	ch          chan net.Conn
}

func ( *workerPool) () {
	if .stopCh != nil {
		return
	}
	.stopCh = make(chan struct{})
	 := .stopCh
	.workerChanPool.New = func() interface{} {
		return &workerChan{
			ch: make(chan net.Conn, workerChanCap),
		}
	}
	go func() {
		var  []*workerChan
		for {
			.clean(&)
			select {
			case <-:
				return
			default:
				time.Sleep(.getMaxIdleWorkerDuration())
			}
		}
	}()
}

func ( *workerPool) () {
	if .stopCh == nil {
		return
	}
	close(.stopCh)
	.stopCh = nil

	// Stop all the workers waiting for incoming connections.
	// Do not wait for busy workers - they will stop after
	// serving the connection and noticing wp.mustStop = true.
	.lock.Lock()
	 := .ready
	for  := range  {
		[].ch <- nil
		[] = nil
	}
	.ready = [:0]
	.mustStop = true
	.lock.Unlock()
}

func ( *workerPool) () time.Duration {
	if .MaxIdleWorkerDuration <= 0 {
		return 10 * time.Second
	}
	return .MaxIdleWorkerDuration
}

func ( *workerPool) ( *[]*workerChan) {
	 := .getMaxIdleWorkerDuration()

	// Clean least recently used workers if they didn't serve connections
	// for more than maxIdleWorkerDuration.
	 := time.Now().Add(-)

	.lock.Lock()
	 := .ready
	 := len()

	// Use binary-search algorithm to find out the index of the least recently worker which can be cleaned up.
	, ,  := 0, -1, 0
	for  <=  {
		 = ( + ) / 2
		if .After(.ready[].lastUseTime) {
			 =  + 1
		} else {
			 =  - 1
		}
	}
	 := 
	if  == -1 {
		.lock.Unlock()
		return
	}

	* = append((*)[:0], [:+1]...)
	 := copy(, [+1:])
	for  = ;  < ; ++ {
		[] = nil
	}
	.ready = [:]
	.lock.Unlock()

	// Notify obsolete workers to stop.
	// This notification must be outside the wp.lock, since ch.ch
	// may be blocking and may consume a lot of time if many workers
	// are located on non-local CPUs.
	 := *
	for  := range  {
		[].ch <- nil
		[] = nil
	}
}

func ( *workerPool) ( net.Conn) bool {
	 := .getCh()
	if  == nil {
		return false
	}
	.ch <- 
	return true
}

var workerChanCap = func() int {
	// Use blocking workerChan if GOMAXPROCS=1.
	// This immediately switches Serve to WorkerFunc, which results
	// in higher performance (under go1.5 at least).
	if runtime.GOMAXPROCS(0) == 1 {
		return 0
	}

	// Use non-blocking workerChan if GOMAXPROCS>1,
	// since otherwise the Serve caller (Acceptor) may lag accepting
	// new connections if WorkerFunc is CPU-bound.
	return 1
}()

func ( *workerPool) () *workerChan {
	var  *workerChan
	 := false

	.lock.Lock()
	 := .ready
	 := len() - 1
	if  < 0 {
		if .workersCount < .MaxWorkersCount {
			 = true
			.workersCount++
		}
	} else {
		 = []
		[] = nil
		.ready = [:]
	}
	.lock.Unlock()

	if  == nil {
		if ! {
			return nil
		}
		 := .workerChanPool.Get()
		 = .(*workerChan)
		go func() {
			.workerFunc()
			.workerChanPool.Put()
		}()
	}
	return 
}

func ( *workerPool) ( *workerChan) bool {
	.lastUseTime = time.Now()
	.lock.Lock()
	if .mustStop {
		.lock.Unlock()
		return false
	}
	.ready = append(.ready, )
	.lock.Unlock()
	return true
}

func ( *workerPool) ( *workerChan) {
	var  net.Conn

	var  error
	for  = range .ch {
		if  == nil {
			break
		}

		if  = .WorkerFunc();  != nil &&  != errHijacked {
			 := .Error()
			if .LogAllErrors || !(strings.Contains(, "broken pipe") ||
				strings.Contains(, "reset by peer") ||
				strings.Contains(, "request headers: small read buffer") ||
				strings.Contains(, "unexpected EOF") ||
				strings.Contains(, "i/o timeout") ||
				errors.Is(, ErrBadTrailer)) {
				.Logger.Printf("error when serving connection %q<->%q: %v", .LocalAddr(), .RemoteAddr(), )
			}
		}
		if  == errHijacked {
			.connState(, StateHijacked)
		} else {
			_ = .Close()
			.connState(, StateClosed)
		}
		 = nil

		if !.release() {
			break
		}
	}

	.lock.Lock()
	.workersCount--
	.lock.Unlock()
}