package fasthttp

import (
	
	
	
	
	
	
	
	
	
	
	
	
	

	
	
	
)

// ServeFileBytesUncompressed returns HTTP response containing file contents
// from the given path.
//
// Directory contents is returned if path points to directory.
//
// ServeFileBytes may be used for saving network traffic when serving files
// with good compression ratio.
//
// See also RequestCtx.SendFileBytes.
//
// WARNING: do not pass any user supplied paths to this function!
// WARNING: if path is based on user input users will be able to request
// any file on your filesystem! Use fasthttp.FS with a sane Root instead.
func ( *RequestCtx,  []byte) {
	ServeFileUncompressed(, b2s())
}

// ServeFileUncompressed returns HTTP response containing file contents
// from the given path.
//
// Directory contents is returned if path points to directory.
//
// ServeFile may be used for saving network traffic when serving files
// with good compression ratio.
//
// See also RequestCtx.SendFile.
//
// WARNING: do not pass any user supplied paths to this function!
// WARNING: if path is based on user input users will be able to request
// any file on your filesystem! Use fasthttp.FS with a sane Root instead.
func ( *RequestCtx,  string) {
	.Request.Header.DelBytes(strAcceptEncoding)
	ServeFile(, )
}

// ServeFileBytes returns HTTP response containing compressed file contents
// from the given path.
//
// HTTP response may contain uncompressed file contents in the following cases:
//
//   - Missing 'Accept-Encoding: gzip' request header.
//   - No write access to directory containing the file.
//
// Directory contents is returned if path points to directory.
//
// Use ServeFileBytesUncompressed is you don't need serving compressed
// file contents.
//
// See also RequestCtx.SendFileBytes.
//
// WARNING: do not pass any user supplied paths to this function!
// WARNING: if path is based on user input users will be able to request
// any file on your filesystem! Use fasthttp.FS with a sane Root instead.
func ( *RequestCtx,  []byte) {
	ServeFile(, b2s())
}

// ServeFile returns HTTP response containing compressed file contents
// from the given path.
//
// HTTP response may contain uncompressed file contents in the following cases:
//
//   - Missing 'Accept-Encoding: gzip' request header.
//   - No write access to directory containing the file.
//
// Directory contents is returned if path points to directory.
//
// Use ServeFileUncompressed is you don't need serving compressed file contents.
//
// See also RequestCtx.SendFile.
//
// WARNING: do not pass any user supplied paths to this function!
// WARNING: if path is based on user input users will be able to request
// any file on your filesystem! Use fasthttp.FS with a sane Root instead.
func ( *RequestCtx,  string) {
	rootFSOnce.Do(func() {
		rootFSHandler = rootFS.NewRequestHandler()
	})

	if len() == 0 || !filepath.IsAbs() {
		// extend relative path to absolute path
		 := len() > 0 && ([len()-1] == '/' || [len()-1] == '\\')

		var  error
		 = filepath.FromSlash()
		if ,  = filepath.Abs();  != nil {
			.Logger().Printf("cannot resolve path %q to absolute file path: %v", , )
			.Error("Internal Server Error", StatusInternalServerError)
			return
		}
		if  {
			 += "/"
		}
	}

	// convert the path to forward slashes regardless the OS in order to set the URI properly
	// the handler will convert back to OS path separator before opening the file
	 = filepath.ToSlash()

	.Request.SetRequestURI()
	rootFSHandler()
}

var (
	rootFSOnce sync.Once
	rootFS     = &FS{
		Root:               "",
		AllowEmptyRoot:     true,
		GenerateIndexPages: true,
		Compress:           true,
		CompressBrotli:     true,
		AcceptByteRange:    true,
	}
	rootFSHandler RequestHandler
)

// PathRewriteFunc must return new request path based on arbitrary ctx
// info such as ctx.Path().
//
// Path rewriter is used in FS for translating the current request
// to the local filesystem path relative to FS.Root.
//
// The returned path must not contain '/../' substrings due to security reasons,
// since such paths may refer files outside FS.Root.
//
// The returned path may refer to ctx members. For example, ctx.Path().
type PathRewriteFunc func(ctx *RequestCtx) []byte

// NewVHostPathRewriter returns path rewriter, which strips slashesCount
// leading slashes from the path and prepends the path with request's host,
// thus simplifying virtual hosting for static files.
//
// Examples:
//
//   - host=foobar.com, slashesCount=0, original path="/foo/bar".
//     Resulting path: "/foobar.com/foo/bar"
//
//   - host=img.aaa.com, slashesCount=1, original path="/images/123/456.jpg"
//     Resulting path: "/img.aaa.com/123/456.jpg"
func ( int) PathRewriteFunc {
	return func( *RequestCtx) []byte {
		 := stripLeadingSlashes(.Path(), )
		 := .Host()
		if  := bytes.IndexByte(, '/');  >= 0 {
			 = nil
		}
		if len() == 0 {
			 = strInvalidHost
		}
		 := bytebufferpool.Get()
		.B = append(.B, '/')
		.B = append(.B, ...)
		.B = append(.B, ...)
		.URI().SetPathBytes(.B)
		bytebufferpool.Put()

		return .Path()
	}
}

var strInvalidHost = []byte("invalid-host")

// NewPathSlashesStripper returns path rewriter, which strips slashesCount
// leading slashes from the path.
//
// Examples:
//
//   - slashesCount = 0, original path: "/foo/bar", result: "/foo/bar"
//   - slashesCount = 1, original path: "/foo/bar", result: "/bar"
//   - slashesCount = 2, original path: "/foo/bar", result: ""
//
// The returned path rewriter may be used as FS.PathRewrite .
func ( int) PathRewriteFunc {
	return func( *RequestCtx) []byte {
		return stripLeadingSlashes(.Path(), )
	}
}

// NewPathPrefixStripper returns path rewriter, which removes prefixSize bytes
// from the path prefix.
//
// Examples:
//
//   - prefixSize = 0, original path: "/foo/bar", result: "/foo/bar"
//   - prefixSize = 3, original path: "/foo/bar", result: "o/bar"
//   - prefixSize = 7, original path: "/foo/bar", result: "r"
//
// The returned path rewriter may be used as FS.PathRewrite .
func ( int) PathRewriteFunc {
	return func( *RequestCtx) []byte {
		 := .Path()
		if len() >=  {
			 = [:]
		}
		return 
	}
}

// FS represents settings for request handler serving static files
// from the local filesystem.
//
// It is prohibited copying FS values. Create new values instead.
type FS struct {
	noCopy noCopy

	// Path to the root directory to serve files from.
	Root string

	// AllowEmptyRoot controls what happens when Root is empty. When false (default) it will default to the
	// current working directory. An empty root is mostly useful when you want to use absolute paths
	// on windows that are on different filesystems. On linux setting your Root to "/" already allows you to use
	// absolute paths on any filesystem.
	AllowEmptyRoot bool

	// List of index file names to try opening during directory access.
	//
	// For example:
	//
	//     * index.html
	//     * index.htm
	//     * my-super-index.xml
	//
	// By default the list is empty.
	IndexNames []string

	// Index pages for directories without files matching IndexNames
	// are automatically generated if set.
	//
	// Directory index generation may be quite slow for directories
	// with many files (more than 1K), so it is discouraged enabling
	// index pages' generation for such directories.
	//
	// By default index pages aren't generated.
	GenerateIndexPages bool

	// Transparently compresses responses if set to true.
	//
	// The server tries minimizing CPU usage by caching compressed files.
	// It adds CompressedFileSuffix suffix to the original file name and
	// tries saving the resulting compressed file under the new file name.
	// So it is advisable to give the server write access to Root
	// and to all inner folders in order to minimize CPU usage when serving
	// compressed responses.
	//
	// Transparent compression is disabled by default.
	Compress bool

	// Uses brotli encoding and fallbacks to gzip in responses if set to true, uses gzip if set to false.
	//
	// This value has sense only if Compress is set.
	//
	// Brotli encoding is disabled by default.
	CompressBrotli bool

	// Path to the compressed root directory to serve files from. If this value
	// is empty, Root is used.
	CompressRoot string

	// Enables byte range requests if set to true.
	//
	// Byte range requests are disabled by default.
	AcceptByteRange bool

	// Path rewriting function.
	//
	// By default request path is not modified.
	PathRewrite PathRewriteFunc

	// PathNotFound fires when file is not found in filesystem
	// this functions tries to replace "Cannot open requested path"
	// server response giving to the programmer the control of server flow.
	//
	// By default PathNotFound returns
	// "Cannot open requested path"
	PathNotFound RequestHandler

	// Expiration duration for inactive file handlers.
	//
	// FSHandlerCacheDuration is used by default.
	CacheDuration time.Duration

	// Suffix to add to the name of cached compressed file.
	//
	// This value has sense only if Compress is set.
	//
	// FSCompressedFileSuffix is used by default.
	CompressedFileSuffix string

	// Suffixes list to add to compressedFileSuffix depending on encoding
	//
	// This value has sense only if Compress is set.
	//
	// FSCompressedFileSuffixes is used by default.
	CompressedFileSuffixes map[string]string

	// If CleanStop is set, the channel can be closed to stop the cleanup handlers
	// for the FS RequestHandlers created with NewRequestHandler.
	// NEVER close this channel while the handler is still being used!
	CleanStop chan struct{}

	once sync.Once
	h    RequestHandler
}

// FSCompressedFileSuffix is the suffix FS adds to the original file names
// when trying to store compressed file under the new file name.
// See FS.Compress for details.
const FSCompressedFileSuffix = ".fasthttp.gz"

// FSCompressedFileSuffixes is the suffixes FS adds to the original file names depending on encoding
// when trying to store compressed file under the new file name.
// See FS.Compress for details.
var FSCompressedFileSuffixes = map[string]string{
	"gzip": ".fasthttp.gz",
	"br":   ".fasthttp.br",
}

// FSHandlerCacheDuration is the default expiration duration for inactive
// file handlers opened by FS.
const FSHandlerCacheDuration = 10 * time.Second

// FSHandler returns request handler serving static files from
// the given root folder.
//
// stripSlashes indicates how many leading slashes must be stripped
// from requested path before searching requested file in the root folder.
// Examples:
//
//   - stripSlashes = 0, original path: "/foo/bar", result: "/foo/bar"
//   - stripSlashes = 1, original path: "/foo/bar", result: "/bar"
//   - stripSlashes = 2, original path: "/foo/bar", result: ""
//
// The returned request handler automatically generates index pages
// for directories without index.html.
//
// The returned handler caches requested file handles
// for FSHandlerCacheDuration.
// Make sure your program has enough 'max open files' limit aka
// 'ulimit -n' if root folder contains many files.
//
// Do not create multiple request handler instances for the same
// (root, stripSlashes) arguments - just reuse a single instance.
// Otherwise goroutine leak will occur.
func ( string,  int) RequestHandler {
	 := &FS{
		Root:               ,
		IndexNames:         []string{"index.html"},
		GenerateIndexPages: true,
		AcceptByteRange:    true,
	}
	if  > 0 {
		.PathRewrite = NewPathSlashesStripper()
	}
	return .NewRequestHandler()
}

// NewRequestHandler returns new request handler with the given FS settings.
//
// The returned handler caches requested file handles
// for FS.CacheDuration.
// Make sure your program has enough 'max open files' limit aka
// 'ulimit -n' if FS.Root folder contains many files.
//
// Do not create multiple request handlers from a single FS instance -
// just reuse a single request handler.
func ( *FS) () RequestHandler {
	.once.Do(.initRequestHandler)
	return .h
}

func ( *FS) ( string) string {
	// Serve files from the current working directory if Root is empty or if Root is a relative path.
	if (!.AllowEmptyRoot && len() == 0) || (len() > 0 && !filepath.IsAbs()) {
		,  := os.Getwd()
		if  != nil {
			 = "."
		}
		 =  + "/" + 
	}
	// convert the root directory slashes to the native format
	 = filepath.FromSlash()

	// strip trailing slashes from the root path
	for len() > 0 && [len()-1] == os.PathSeparator {
		 = [:len()-1]
	}
	return 
}

func ( *FS) () {
	 := .normalizeRoot(.Root)

	 := .CompressRoot
	if len() == 0 {
		 = 
	} else {
		 = .normalizeRoot()
	}

	 := .CacheDuration
	if  <= 0 {
		 = FSHandlerCacheDuration
	}

	 := .CompressedFileSuffixes
	if len(["br"]) == 0 || len(["gzip"]) == 0 ||
		["br"] == ["gzip"] {
		// Copy global map
		 = make(map[string]string, len(FSCompressedFileSuffixes))
		for ,  := range FSCompressedFileSuffixes {
			[] = 
		}
	}

	if len(.CompressedFileSuffix) > 0 {
		["gzip"] = .CompressedFileSuffix
		["br"] = FSCompressedFileSuffixes["br"]
	}

	 := &fsHandler{
		root:                   ,
		indexNames:             .IndexNames,
		pathRewrite:            .PathRewrite,
		generateIndexPages:     .GenerateIndexPages,
		compress:               .Compress,
		compressBrotli:         .CompressBrotli,
		compressRoot:           ,
		pathNotFound:           .PathNotFound,
		acceptByteRange:        .AcceptByteRange,
		cacheDuration:          ,
		compressedFileSuffixes: ,
		cache:                  make(map[string]*fsFile),
		cacheBrotli:            make(map[string]*fsFile),
		cacheGzip:              make(map[string]*fsFile),
	}

	go func() {
		var  []*fsFile

		 := func() {
			 = .cleanCache()
		}

		if .CleanStop != nil {
			 := time.NewTicker( / 2)
			for {
				select {
				case <-.C:
					()
				case ,  := <-.CleanStop:
					// Ignore values send on the channel, only stop when it is closed.
					if ! {
						.Stop()
						return
					}
				}
			}
		}
		for {
			time.Sleep( / 2)
			()
		}
	}()

	.h = .handleRequest
}

type fsHandler struct {
	root                   string
	indexNames             []string
	pathRewrite            PathRewriteFunc
	pathNotFound           RequestHandler
	generateIndexPages     bool
	compress               bool
	compressBrotli         bool
	compressRoot           string
	acceptByteRange        bool
	cacheDuration          time.Duration
	compressedFileSuffixes map[string]string

	cache       map[string]*fsFile
	cacheBrotli map[string]*fsFile
	cacheGzip   map[string]*fsFile
	cacheLock   sync.Mutex

	smallFileReaderPool sync.Pool
}

type fsFile struct {
	h             *fsHandler
	f             *os.File
	dirIndex      []byte
	contentType   string
	contentLength int
	compressed    bool

	lastModified    time.Time
	lastModifiedStr []byte

	t            time.Time
	readersCount int

	bigFiles     []*bigFileReader
	bigFilesLock sync.Mutex
}

func ( *fsFile) () (io.Reader, error) {
	if .isBig() {
		,  := .bigFileReader()
		if  != nil {
			.decReadersCount()
		}
		return , 
	}
	return .smallFileReader()
}

func ( *fsFile) () (io.Reader, error) {
	 := .h.smallFileReaderPool.Get()
	if  == nil {
		 = &fsSmallFileReader{}
	}
	 := .(*fsSmallFileReader)
	.ff = 
	.endPos = .contentLength
	if .startPos > 0 {
		return nil, errors.New("bug: fsSmallFileReader with non-nil startPos found in the pool")
	}
	return , nil
}

// files bigger than this size are sent with sendfile
const maxSmallFileSize = 2 * 4096

func ( *fsFile) () bool {
	return .contentLength > maxSmallFileSize && len(.dirIndex) == 0
}

func ( *fsFile) () (io.Reader, error) {
	if .f == nil {
		return nil, errors.New("bug: ff.f must be non-nil in bigFileReader")
	}

	var  io.Reader

	.bigFilesLock.Lock()
	 := len(.bigFiles)
	if  > 0 {
		 = .bigFiles[-1]
		.bigFiles = .bigFiles[:-1]
	}
	.bigFilesLock.Unlock()

	if  != nil {
		return , nil
	}

	,  := os.Open(.f.Name())
	if  != nil {
		return nil, fmt.Errorf("cannot open already opened file: %w", )
	}
	return &bigFileReader{
		f:  ,
		ff: ,
		r:  ,
	}, nil
}

func ( *fsFile) () {
	if .f != nil {
		_ = .f.Close()

		if .isBig() {
			.bigFilesLock.Lock()
			for ,  := range .bigFiles {
				_ = .f.Close()
			}
			.bigFilesLock.Unlock()
		}
	}
}

func ( *fsFile) () {
	.h.cacheLock.Lock()
	.readersCount--
	if .readersCount < 0 {
		.readersCount = 0
	}
	.h.cacheLock.Unlock()
}

// bigFileReader attempts to trigger sendfile
// for sending big files over the wire.
type bigFileReader struct {
	f  *os.File
	ff *fsFile
	r  io.Reader
	lr io.LimitedReader
}

func ( *bigFileReader) (,  int) error {
	if ,  := .f.Seek(int64(), 0);  != nil {
		return 
	}
	.r = &.lr
	.lr.R = .f
	.lr.N = int64( -  + 1)
	return nil
}

func ( *bigFileReader) ( []byte) (int, error) {
	return .r.Read()
}

func ( *bigFileReader) ( io.Writer) (int64, error) {
	if ,  := .(io.ReaderFrom);  {
		// fast path. Send file must be triggered
		return .ReadFrom(.r)
	}

	// slow path
	return copyZeroAlloc(, .r)
}

func ( *bigFileReader) () error {
	.r = .f
	,  := .f.Seek(0, 0)
	if  == nil {
		if  == 0 {
			 := .ff
			.bigFilesLock.Lock()
			.bigFiles = append(.bigFiles, )
			.bigFilesLock.Unlock()
		} else {
			_ = .f.Close()
			 = errors.New("bug: File.Seek(0,0) returned (non-zero, nil)")
		}
	} else {
		_ = .f.Close()
	}
	.ff.decReadersCount()
	return 
}

type fsSmallFileReader struct {
	ff       *fsFile
	startPos int
	endPos   int
}

func ( *fsSmallFileReader) () error {
	 := .ff
	.decReadersCount()
	.ff = nil
	.startPos = 0
	.endPos = 0
	.h.smallFileReaderPool.Put()
	return nil
}

func ( *fsSmallFileReader) (,  int) error {
	.startPos = 
	.endPos =  + 1
	return nil
}

func ( *fsSmallFileReader) ( []byte) (int, error) {
	 := .endPos - .startPos
	if  <= 0 {
		return 0, io.EOF
	}
	if len() >  {
		 = [:]
	}

	 := .ff
	if .f != nil {
		,  := .f.ReadAt(, int64(.startPos))
		.startPos += 
		return , 
	}

	 := copy(, .dirIndex[.startPos:])
	.startPos += 
	return , nil
}

func ( *fsSmallFileReader) ( io.Writer) (int64, error) {
	 := .ff

	var  int
	var  error
	if .f == nil {
		,  = .Write(.dirIndex[.startPos:.endPos])
		return int64(), 
	}

	if ,  := .(io.ReaderFrom);  {
		return .ReadFrom()
	}

	 := .startPos
	 := copyBufPool.Get()
	 := .([]byte)
	for  == nil {
		 := .endPos - 
		if  <= 0 {
			break
		}
		if len() >  {
			 = [:]
		}
		,  = .f.ReadAt(, int64())
		,  := .Write([:])
		 += 
		if  == nil &&  !=  {
			 = errors.New("bug: Write(p) returned (n, nil), where n != len(p)")
		}
		if  == nil {
			 = 
		}
	}
	copyBufPool.Put()

	if  == io.EOF {
		 = nil
	}
	return int64( - .startPos), 
}

func ( *fsHandler) ( []*fsFile) []*fsFile {
	var  []*fsFile

	.cacheLock.Lock()

	// Close files which couldn't be closed before due to non-zero
	// readers count on the previous run.
	var  []*fsFile
	for ,  := range  {
		if .readersCount > 0 {
			 = append(, )
		} else {
			 = append(, )
		}
	}
	 = 

	,  = cleanCacheNolock(.cache, , , .cacheDuration)
	,  = cleanCacheNolock(.cacheBrotli, , , .cacheDuration)
	,  = cleanCacheNolock(.cacheGzip, , , .cacheDuration)

	.cacheLock.Unlock()

	for ,  := range  {
		.Release()
	}

	return 
}

func cleanCacheNolock( map[string]*fsFile, ,  []*fsFile,  time.Duration) ([]*fsFile, []*fsFile) {
	 := time.Now()
	for ,  := range  {
		if .Sub(.t) >  {
			if .readersCount > 0 {
				// There are pending readers on stale file handle,
				// so we cannot close it. Put it into pendingFiles
				// so it will be closed later.
				 = append(, )
			} else {
				 = append(, )
			}
			delete(, )
		}
	}
	return , 
}

func ( *fsHandler) ( string) string {
	return filepath.FromSlash(.root + )
}

func ( *fsHandler) ( string) string {
	if .root == .compressRoot {
		return 
	}
	if !strings.HasPrefix(, .root) {
		return 
	}
	return filepath.FromSlash(.compressRoot + [len(.root):])
}

func ( *fsHandler) ( *RequestCtx) {
	var  []byte
	if .pathRewrite != nil {
		 = .pathRewrite()
	} else {
		 = .Path()
	}
	 := len() > 0 && [len()-1] == '/'
	 = stripTrailingSlashes()

	if  := bytes.IndexByte(, 0);  >= 0 {
		.Logger().Printf("cannot serve path with nil byte at position %d: %q", , )
		.Error("Are you a hacker?", StatusBadRequest)
		return
	}
	if .pathRewrite != nil {
		// There is no need to check for '/../' if path = ctx.Path(),
		// since ctx.Path must normalize and sanitize the path.

		if  := bytes.Index(, strSlashDotDotSlash);  >= 0 {
			.Logger().Printf("cannot serve path with '/../' at position %d due to security reasons: %q", , )
			.Error("Internal Server Error", StatusInternalServerError)
			return
		}
	}

	 := false
	 := .cache
	 := ""
	 := .Request.Header.peek(strRange)
	if len() == 0 && .compress {
		if .compressBrotli && .Request.Header.HasAcceptEncodingBytes(strBr) {
			 = true
			 = .cacheBrotli
			 = "br"
		} else if .Request.Header.HasAcceptEncodingBytes(strGzip) {
			 = true
			 = .cacheGzip
			 = "gzip"
		}
	}

	.cacheLock.Lock()
	,  := [string()]
	if  {
		.readersCount++
	}
	.cacheLock.Unlock()

	if ! {
		 := string()
		 := .pathToFilePath()

		var  error
		,  = .openFSFile(, , )
		if  &&  == errNoCreatePermission {
			.Logger().Printf("insufficient permissions for saving compressed file for %q. Serving uncompressed file. "+
				"Allow write access to the directory with this file in order to improve fasthttp performance", )
			 = false
			,  = .openFSFile(, , )
		}
		if  == errDirIndexRequired {
			if ! {
				.RedirectBytes(append(, '/'), StatusFound)
				return
			}
			,  = .openIndexFile(, , , )
			if  != nil {
				.Logger().Printf("cannot open dir index %q: %v", , )
				.Error("Directory index is forbidden", StatusForbidden)
				return
			}
		} else if  != nil {
			.Logger().Printf("cannot open file %q: %v", , )
			if .pathNotFound == nil {
				.Error("Cannot open requested path", StatusNotFound)
			} else {
				.SetStatusCode(StatusNotFound)
				.pathNotFound()
			}
			return
		}

		.cacheLock.Lock()
		,  := []
		if ! {
			[] = 
			.readersCount++
		} else {
			.readersCount++
		}
		.cacheLock.Unlock()

		if  {
			// The file has been already opened by another
			// goroutine, so close the current file and use
			// the file opened by another goroutine instead.
			.Release()
			 = 
		}
	}

	if !.IfModifiedSince(.lastModified) {
		.decReadersCount()
		.NotModified()
		return
	}

	,  := .NewReader()
	if  != nil {
		.Logger().Printf("cannot obtain file reader for path=%q: %v", , )
		.Error("Internal Server Error", StatusInternalServerError)
		return
	}

	 := &.Response.Header
	if .compressed {
		if  == "br" {
			.SetContentEncodingBytes(strBr)
		} else if  == "gzip" {
			.SetContentEncodingBytes(strGzip)
		}
	}

	 := StatusOK
	 := .contentLength
	if .acceptByteRange {
		.setNonSpecial(strAcceptRanges, strBytes)
		if len() > 0 {
			, ,  := ParseByteRange(, )
			if  != nil {
				_ = .(io.Closer).Close()
				.Logger().Printf("cannot parse byte range %q for path=%q: %v", , , )
				.Error("Range Not Satisfiable", StatusRequestedRangeNotSatisfiable)
				return
			}

			if  = .(byteRangeUpdater).UpdateByteRange(, );  != nil {
				_ = .(io.Closer).Close()
				.Logger().Printf("cannot seek byte range %q for path=%q: %v", , , )
				.Error("Internal Server Error", StatusInternalServerError)
				return
			}

			.SetContentRange(, , )
			 =  -  + 1
			 = StatusPartialContent
		}
	}

	.setNonSpecial(strLastModified, .lastModifiedStr)
	if !.IsHead() {
		.SetBodyStream(, )
	} else {
		.Response.ResetBody()
		.Response.SkipBody = true
		.Response.Header.SetContentLength()
		if ,  := .(io.Closer);  {
			if  := .Close();  != nil {
				.Logger().Printf("cannot close file reader: %v", )
				.Error("Internal Server Error", StatusInternalServerError)
				return
			}
		}
	}
	.noDefaultContentType = true
	if len(.ContentType()) == 0 {
		.SetContentType(.contentType)
	}
	.SetStatusCode()
}

type byteRangeUpdater interface {
	UpdateByteRange(startPos, endPos int) error
}

// ParseByteRange parses 'Range: bytes=...' header value.
//
// It follows https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 .
func ( []byte,  int) (,  int,  error) {
	 := 
	if !bytes.HasPrefix(, strBytes) {
		return 0, 0, fmt.Errorf("unsupported range units: %q. Expecting %q", , strBytes)
	}

	 = [len(strBytes):]
	if len() == 0 || [0] != '=' {
		return 0, 0, fmt.Errorf("missing byte range in %q", )
	}
	 = [1:]

	 := bytes.IndexByte(, '-')
	if  < 0 {
		return 0, 0, fmt.Errorf("missing the end position of byte range in %q", )
	}

	if  == 0 {
		,  := ParseUint([+1:])
		if  != nil {
			return 0, 0, 
		}
		 :=  - 
		if  < 0 {
			 = 0
		}
		return ,  - 1, nil
	}

	if ,  = ParseUint([:]);  != nil {
		return 0, 0, 
	}
	if  >=  {
		return 0, 0, fmt.Errorf("the start position of byte range cannot exceed %d. byte range %q", -1, )
	}

	 = [+1:]
	if len() == 0 {
		return ,  - 1, nil
	}

	if ,  = ParseUint();  != nil {
		return 0, 0, 
	}
	if  >=  {
		 =  - 1
	}
	if  <  {
		return 0, 0, fmt.Errorf("the start position of byte range cannot exceed the end position. byte range %q", )
	}
	return , , nil
}

func ( *fsHandler) ( *RequestCtx,  string,  bool,  string) (*fsFile, error) {
	for ,  := range .indexNames {
		 :=  + "/" + 
		,  := .openFSFile(, , )
		if  == nil {
			return , nil
		}
		if !os.IsNotExist() {
			return nil, fmt.Errorf("cannot open file %q: %w", , )
		}
	}

	if !.generateIndexPages {
		return nil, fmt.Errorf("cannot access directory without index page. Directory %q", )
	}

	return .createDirIndex(.URI(), , , )
}

var (
	errDirIndexRequired   = errors.New("directory index required")
	errNoCreatePermission = errors.New("no 'create file' permissions")
)

func ( *fsHandler) ( *URI,  string,  bool,  string) (*fsFile, error) {
	 := &bytebufferpool.ByteBuffer{}

	 := html.EscapeString(string(.Path()))
	_, _ = fmt.Fprintf(, "<html><head><title>%s</title><style>.dir { font-weight: bold }</style></head><body>", )
	_, _ = fmt.Fprintf(, "<h1>%s</h1>", )
	_, _ = fmt.Fprintf(, "<ul>")

	if len() > 1 {
		var  URI
		.CopyTo(&)
		.Update(string(.Path()) + "/..")
		 := html.EscapeString(string(.Path()))
		_, _ = fmt.Fprintf(, `<li><a href="%s" class="dir">..</a></li>`, )
	}

	,  := os.Open()
	if  != nil {
		return nil, 
	}

	,  := .Readdir(0)
	_ = .Close()
	if  != nil {
		return nil, 
	}

	 := make(map[string]os.FileInfo, len())
	 := make([]string, 0, len())
:
	for ,  := range  {
		 := .Name()
		for ,  := range .compressedFileSuffixes {
			if strings.HasSuffix(, ) {
				// Do not show compressed files on index page.
				continue 
			}
		}
		[] = 
		 = append(, )
	}

	var  URI
	.CopyTo(&)
	.Update(string(.Path()) + "/")

	sort.Strings()
	for ,  := range  {
		.Update()
		 := html.EscapeString(string(.Path()))
		 := []
		 := "dir"
		 := "dir"
		if !.IsDir() {
			 = fmt.Sprintf("file, %d bytes", .Size())
			 = "file"
		}
		_, _ = fmt.Fprintf(, `<li><a href="%s" class="%s">%s</a>, %s, last modified %s</li>`,
			, , html.EscapeString(), , fsModTime(.ModTime()))
	}

	_, _ = fmt.Fprintf(, "</ul></body></html>")

	if  {
		var  bytebufferpool.ByteBuffer
		if  == "br" {
			.B = AppendBrotliBytesLevel(.B, .B, CompressDefaultCompression)
		} else if  == "gzip" {
			.B = AppendGzipBytesLevel(.B, .B, CompressDefaultCompression)
		}
		 = &
	}

	 := .B
	 := time.Now()
	 := &fsFile{
		h:               ,
		dirIndex:        ,
		contentType:     "text/html; charset=utf-8",
		contentLength:   len(),
		compressed:      ,
		lastModified:    ,
		lastModifiedStr: AppendHTTPDate(nil, ),

		t: ,
	}
	return , nil
}

const (
	fsMinCompressRatio        = 0.8
	fsMaxCompressibleFileSize = 8 * 1024 * 1024
)

func ( *fsHandler) ( string,  string) (*fsFile, error) {
	,  := os.Open()
	if  != nil {
		return nil, 
	}

	,  := .Stat()
	if  != nil {
		_ = .Close()
		return nil, fmt.Errorf("cannot obtain info for file %q: %w", , )
	}

	if .IsDir() {
		_ = .Close()
		return nil, errDirIndexRequired
	}

	if strings.HasSuffix(, .compressedFileSuffixes[]) ||
		.Size() > fsMaxCompressibleFileSize ||
		!isFileCompressible(, fsMinCompressRatio) {
		return .newFSFile(, , false, "")
	}

	 := .filePathToCompressed()
	if  !=  {
		if  := os.MkdirAll(filepath.Dir(), os.ModePerm);  != nil {
			return nil, 
		}
	}
	 += .compressedFileSuffixes[]

	,  := filepath.Abs()
	if  != nil {
		_ = .Close()
		return nil, fmt.Errorf("cannot determine absolute path for %q: %v", , )
	}

	 := getFileLock()
	.Lock()
	,  := .compressFileNolock(, , , , )
	.Unlock()

	return , 
}

func ( *fsHandler) ( *os.File,  os.FileInfo, ,  string,  string) (*fsFile, error) {
	// Attempt to open compressed file created by another concurrent
	// goroutine.
	// It is safe opening such a file, since the file creation
	// is guarded by file mutex - see getFileLock call.
	if ,  := os.Stat();  == nil {
		_ = .Close()
		return .newCompressedFSFile(, )
	}

	// Create temporary file, so concurrent goroutines don't use
	// it until it is created.
	 :=  + ".tmp"
	,  := os.Create()
	if  != nil {
		_ = .Close()
		if !os.IsPermission() {
			return nil, fmt.Errorf("cannot create temporary file %q: %w", , )
		}
		return nil, errNoCreatePermission
	}
	if  == "br" {
		 := acquireStacklessBrotliWriter(, CompressDefaultCompression)
		_,  = copyZeroAlloc(, )
		if  := .Flush();  == nil {
			 = 
		}
		releaseStacklessBrotliWriter(, CompressDefaultCompression)
	} else if  == "gzip" {
		 := acquireStacklessGzipWriter(, CompressDefaultCompression)
		_,  = copyZeroAlloc(, )
		if  := .Flush();  == nil {
			 = 
		}
		releaseStacklessGzipWriter(, CompressDefaultCompression)
	}
	_ = .Close()
	_ = .Close()
	if  != nil {
		return nil, fmt.Errorf("error when compressing file %q to %q: %w", , , )
	}
	if  = os.Chtimes(, time.Now(), .ModTime());  != nil {
		return nil, fmt.Errorf("cannot change modification time to %v for tmp file %q: %v",
			.ModTime(), , )
	}
	if  = os.Rename(, );  != nil {
		return nil, fmt.Errorf("cannot move compressed file from %q to %q: %w", , , )
	}
	return .newCompressedFSFile(, )
}

func ( *fsHandler) ( string,  string) (*fsFile, error) {
	,  := os.Open()
	if  != nil {
		return nil, fmt.Errorf("cannot open compressed file %q: %w", , )
	}
	,  := .Stat()
	if  != nil {
		_ = .Close()
		return nil, fmt.Errorf("cannot obtain info for compressed file %q: %w", , )
	}
	return .newFSFile(, , true, )
}

func ( *fsHandler) ( string,  bool,  string) (*fsFile, error) {
	 := 
	if  {
		 += .compressedFileSuffixes[]
	}

	,  := os.Open()
	if  != nil {
		if  && os.IsNotExist() {
			return .compressAndOpenFSFile(, )
		}
		return nil, 
	}

	,  := .Stat()
	if  != nil {
		_ = .Close()
		return nil, fmt.Errorf("cannot obtain info for file %q: %w", , )
	}

	if .IsDir() {
		_ = .Close()
		if  {
			return nil, fmt.Errorf("directory with unexpected suffix found: %q. Suffix: %q",
				, .compressedFileSuffixes[])
		}
		return nil, errDirIndexRequired
	}

	if  {
		,  := os.Stat()
		if  != nil {
			_ = .Close()
			return nil, fmt.Errorf("cannot obtain info for original file %q: %w", , )
		}

		// Only re-create the compressed file if there was more than a second between the mod times.
		// On macOS the gzip seems to truncate the nanoseconds in the mod time causing the original file
		// to look newer than the gzipped file.
		if .ModTime().Sub(.ModTime()) >= time.Second {
			// The compressed file became stale. Re-create it.
			_ = .Close()
			_ = os.Remove()
			return .compressAndOpenFSFile(, )
		}
	}

	return .newFSFile(, , , )
}

func ( *fsHandler) ( *os.File,  os.FileInfo,  bool,  string) (*fsFile, error) {
	 := .Size()
	 := int()
	if  != int64() {
		_ = .Close()
		return nil, fmt.Errorf("too big file: %d bytes", )
	}

	// detect content-type
	 := fileExtension(.Name(), , .compressedFileSuffixes[])
	 := mime.TypeByExtension()
	if len() == 0 {
		,  := readFileHeader(, , )
		if  != nil {
			return nil, fmt.Errorf("cannot read header of the file %q: %w", .Name(), )
		}
		 = http.DetectContentType()
	}

	 := .ModTime()
	 := &fsFile{
		h:               ,
		f:               ,
		contentType:     ,
		contentLength:   ,
		compressed:      ,
		lastModified:    ,
		lastModifiedStr: AppendHTTPDate(nil, ),

		t: time.Now(),
	}
	return , nil
}

func readFileHeader( *os.File,  bool,  string) ([]byte, error) {
	 := io.Reader()
	var (
		 *brotli.Reader
		 *gzip.Reader
	)
	if  {
		var  error
		if  == "br" {
			if ,  = acquireBrotliReader();  != nil {
				return nil, 
			}
			 = 
		} else if  == "gzip" {
			if ,  = acquireGzipReader();  != nil {
				return nil, 
			}
			 = 
		}
	}

	 := &io.LimitedReader{
		R: ,
		N: 512,
	}
	,  := io.ReadAll()
	if ,  := .Seek(0, 0);  != nil {
		return nil, 
	}

	if  != nil {
		releaseBrotliReader()
	}

	if  != nil {
		releaseGzipReader()
	}

	return , 
}

func stripLeadingSlashes( []byte,  int) []byte {
	for  > 0 && len() > 0 {
		if [0] != '/' {
			// developer sanity-check
			panic("BUG: path must start with slash")
		}
		 := bytes.IndexByte([1:], '/')
		if  < 0 {
			 = [:0]
			break
		}
		 = [+1:]
		--
	}
	return 
}

func stripTrailingSlashes( []byte) []byte {
	for len() > 0 && [len()-1] == '/' {
		 = [:len()-1]
	}
	return 
}

func fileExtension( string,  bool,  string) string {
	if  && strings.HasSuffix(, ) {
		 = [:len()-len()]
	}
	 := strings.LastIndexByte(, '.')
	if  < 0 {
		return ""
	}
	return [:]
}

// FileLastModified returns last modified time for the file.
func ( string) (time.Time, error) {
	,  := os.Open()
	if  != nil {
		return zeroTime, 
	}
	,  := .Stat()
	_ = .Close()
	if  != nil {
		return zeroTime, 
	}
	return fsModTime(.ModTime()), nil
}

func fsModTime( time.Time) time.Time {
	return .In(time.UTC).Truncate(time.Second)
}

var filesLockMap sync.Map

func getFileLock( string) *sync.Mutex {
	,  := filesLockMap.LoadOrStore(, &sync.Mutex{})
	 := .(*sync.Mutex)
	return 
}