package fasthttp
import (
"bytes"
"errors"
"fmt"
"html"
"io"
"mime"
"net/http"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"time"
"github.com/andybalholm/brotli"
"github.com/klauspost/compress/gzip"
"github.com/valyala/bytebufferpool"
)
func ServeFileBytesUncompressed (ctx *RequestCtx , path []byte ) {
ServeFileUncompressed (ctx , b2s (path ))
}
func ServeFileUncompressed (ctx *RequestCtx , path string ) {
ctx .Request .Header .DelBytes (strAcceptEncoding )
ServeFile (ctx , path )
}
func ServeFileBytes (ctx *RequestCtx , path []byte ) {
ServeFile (ctx , b2s (path ))
}
func ServeFile (ctx *RequestCtx , path string ) {
rootFSOnce .Do (func () {
rootFSHandler = rootFS .NewRequestHandler ()
})
if len (path ) == 0 || !filepath .IsAbs (path ) {
hasTrailingSlash := len (path ) > 0 && (path [len (path )-1 ] == '/' || path [len (path )-1 ] == '\\' )
var err error
path = filepath .FromSlash (path )
if path , err = filepath .Abs (path ); err != nil {
ctx .Logger ().Printf ("cannot resolve path %q to absolute file path: %v" , path , err )
ctx .Error ("Internal Server Error" , StatusInternalServerError )
return
}
if hasTrailingSlash {
path += "/"
}
}
path = filepath .ToSlash (path )
ctx .Request .SetRequestURI (path )
rootFSHandler (ctx )
}
var (
rootFSOnce sync .Once
rootFS = &FS {
Root : "" ,
AllowEmptyRoot : true ,
GenerateIndexPages : true ,
Compress : true ,
CompressBrotli : true ,
AcceptByteRange : true ,
}
rootFSHandler RequestHandler
)
type PathRewriteFunc func (ctx *RequestCtx ) []byte
func NewVHostPathRewriter (slashesCount int ) PathRewriteFunc {
return func (ctx *RequestCtx ) []byte {
path := stripLeadingSlashes (ctx .Path (), slashesCount )
host := ctx .Host ()
if n := bytes .IndexByte (host , '/' ); n >= 0 {
host = nil
}
if len (host ) == 0 {
host = strInvalidHost
}
b := bytebufferpool .Get ()
b .B = append (b .B , '/' )
b .B = append (b .B , host ...)
b .B = append (b .B , path ...)
ctx .URI ().SetPathBytes (b .B )
bytebufferpool .Put (b )
return ctx .Path ()
}
}
var strInvalidHost = []byte ("invalid-host" )
func NewPathSlashesStripper (slashesCount int ) PathRewriteFunc {
return func (ctx *RequestCtx ) []byte {
return stripLeadingSlashes (ctx .Path (), slashesCount )
}
}
func NewPathPrefixStripper (prefixSize int ) PathRewriteFunc {
return func (ctx *RequestCtx ) []byte {
path := ctx .Path ()
if len (path ) >= prefixSize {
path = path [prefixSize :]
}
return path
}
}
type FS struct {
noCopy noCopy
Root string
AllowEmptyRoot bool
IndexNames []string
GenerateIndexPages bool
Compress bool
CompressBrotli bool
CompressRoot string
AcceptByteRange bool
PathRewrite PathRewriteFunc
PathNotFound RequestHandler
CacheDuration time .Duration
CompressedFileSuffix string
CompressedFileSuffixes map [string ]string
CleanStop chan struct {}
once sync .Once
h RequestHandler
}
const FSCompressedFileSuffix = ".fasthttp.gz"
var FSCompressedFileSuffixes = map [string ]string {
"gzip" : ".fasthttp.gz" ,
"br" : ".fasthttp.br" ,
}
const FSHandlerCacheDuration = 10 * time .Second
func FSHandler (root string , stripSlashes int ) RequestHandler {
fs := &FS {
Root : root ,
IndexNames : []string {"index.html" },
GenerateIndexPages : true ,
AcceptByteRange : true ,
}
if stripSlashes > 0 {
fs .PathRewrite = NewPathSlashesStripper (stripSlashes )
}
return fs .NewRequestHandler ()
}
func (fs *FS ) NewRequestHandler () RequestHandler {
fs .once .Do (fs .initRequestHandler )
return fs .h
}
func (fs *FS ) normalizeRoot (root string ) string {
if (!fs .AllowEmptyRoot && len (root ) == 0 ) || (len (root ) > 0 && !filepath .IsAbs (root )) {
path , err := os .Getwd ()
if err != nil {
path = "."
}
root = path + "/" + root
}
root = filepath .FromSlash (root )
for len (root ) > 0 && root [len (root )-1 ] == os .PathSeparator {
root = root [:len (root )-1 ]
}
return root
}
func (fs *FS ) initRequestHandler () {
root := fs .normalizeRoot (fs .Root )
compressRoot := fs .CompressRoot
if len (compressRoot ) == 0 {
compressRoot = root
} else {
compressRoot = fs .normalizeRoot (compressRoot )
}
cacheDuration := fs .CacheDuration
if cacheDuration <= 0 {
cacheDuration = FSHandlerCacheDuration
}
compressedFileSuffixes := fs .CompressedFileSuffixes
if len (compressedFileSuffixes ["br" ]) == 0 || len (compressedFileSuffixes ["gzip" ]) == 0 ||
compressedFileSuffixes ["br" ] == compressedFileSuffixes ["gzip" ] {
compressedFileSuffixes = make (map [string ]string , len (FSCompressedFileSuffixes ))
for k , v := range FSCompressedFileSuffixes {
compressedFileSuffixes [k ] = v
}
}
if len (fs .CompressedFileSuffix ) > 0 {
compressedFileSuffixes ["gzip" ] = fs .CompressedFileSuffix
compressedFileSuffixes ["br" ] = FSCompressedFileSuffixes ["br" ]
}
h := &fsHandler {
root : root ,
indexNames : fs .IndexNames ,
pathRewrite : fs .PathRewrite ,
generateIndexPages : fs .GenerateIndexPages ,
compress : fs .Compress ,
compressBrotli : fs .CompressBrotli ,
compressRoot : compressRoot ,
pathNotFound : fs .PathNotFound ,
acceptByteRange : fs .AcceptByteRange ,
cacheDuration : cacheDuration ,
compressedFileSuffixes : compressedFileSuffixes ,
cache : make (map [string ]*fsFile ),
cacheBrotli : make (map [string ]*fsFile ),
cacheGzip : make (map [string ]*fsFile ),
}
go func () {
var pendingFiles []*fsFile
clean := func () {
pendingFiles = h .cleanCache (pendingFiles )
}
if fs .CleanStop != nil {
t := time .NewTicker (cacheDuration / 2 )
for {
select {
case <- t .C :
clean ()
case _ , stillOpen := <- fs .CleanStop :
if !stillOpen {
t .Stop ()
return
}
}
}
}
for {
time .Sleep (cacheDuration / 2 )
clean ()
}
}()
fs .h = h .handleRequest
}
type fsHandler struct {
root string
indexNames []string
pathRewrite PathRewriteFunc
pathNotFound RequestHandler
generateIndexPages bool
compress bool
compressBrotli bool
compressRoot string
acceptByteRange bool
cacheDuration time .Duration
compressedFileSuffixes map [string ]string
cache map [string ]*fsFile
cacheBrotli map [string ]*fsFile
cacheGzip map [string ]*fsFile
cacheLock sync .Mutex
smallFileReaderPool sync .Pool
}
type fsFile struct {
h *fsHandler
f *os .File
dirIndex []byte
contentType string
contentLength int
compressed bool
lastModified time .Time
lastModifiedStr []byte
t time .Time
readersCount int
bigFiles []*bigFileReader
bigFilesLock sync .Mutex
}
func (ff *fsFile ) NewReader () (io .Reader , error ) {
if ff .isBig () {
r , err := ff .bigFileReader ()
if err != nil {
ff .decReadersCount ()
}
return r , err
}
return ff .smallFileReader ()
}
func (ff *fsFile ) smallFileReader () (io .Reader , error ) {
v := ff .h .smallFileReaderPool .Get ()
if v == nil {
v = &fsSmallFileReader {}
}
r := v .(*fsSmallFileReader )
r .ff = ff
r .endPos = ff .contentLength
if r .startPos > 0 {
return nil , errors .New ("bug: fsSmallFileReader with non-nil startPos found in the pool" )
}
return r , nil
}
const maxSmallFileSize = 2 * 4096
func (ff *fsFile ) isBig () bool {
return ff .contentLength > maxSmallFileSize && len (ff .dirIndex ) == 0
}
func (ff *fsFile ) bigFileReader () (io .Reader , error ) {
if ff .f == nil {
return nil , errors .New ("bug: ff.f must be non-nil in bigFileReader" )
}
var r io .Reader
ff .bigFilesLock .Lock ()
n := len (ff .bigFiles )
if n > 0 {
r = ff .bigFiles [n -1 ]
ff .bigFiles = ff .bigFiles [:n -1 ]
}
ff .bigFilesLock .Unlock ()
if r != nil {
return r , nil
}
f , err := os .Open (ff .f .Name ())
if err != nil {
return nil , fmt .Errorf ("cannot open already opened file: %w" , err )
}
return &bigFileReader {
f : f ,
ff : ff ,
r : f ,
}, nil
}
func (ff *fsFile ) Release () {
if ff .f != nil {
_ = ff .f .Close ()
if ff .isBig () {
ff .bigFilesLock .Lock ()
for _ , r := range ff .bigFiles {
_ = r .f .Close ()
}
ff .bigFilesLock .Unlock ()
}
}
}
func (ff *fsFile ) decReadersCount () {
ff .h .cacheLock .Lock ()
ff .readersCount --
if ff .readersCount < 0 {
ff .readersCount = 0
}
ff .h .cacheLock .Unlock ()
}
type bigFileReader struct {
f *os .File
ff *fsFile
r io .Reader
lr io .LimitedReader
}
func (r *bigFileReader ) UpdateByteRange (startPos , endPos int ) error {
if _ , err := r .f .Seek (int64 (startPos ), 0 ); err != nil {
return err
}
r .r = &r .lr
r .lr .R = r .f
r .lr .N = int64 (endPos - startPos + 1 )
return nil
}
func (r *bigFileReader ) Read (p []byte ) (int , error ) {
return r .r .Read (p )
}
func (r *bigFileReader ) WriteTo (w io .Writer ) (int64 , error ) {
if rf , ok := w .(io .ReaderFrom ); ok {
return rf .ReadFrom (r .r )
}
return copyZeroAlloc (w , r .r )
}
func (r *bigFileReader ) Close () error {
r .r = r .f
n , err := r .f .Seek (0 , 0 )
if err == nil {
if n == 0 {
ff := r .ff
ff .bigFilesLock .Lock ()
ff .bigFiles = append (ff .bigFiles , r )
ff .bigFilesLock .Unlock ()
} else {
_ = r .f .Close ()
err = errors .New ("bug: File.Seek(0,0) returned (non-zero, nil)" )
}
} else {
_ = r .f .Close ()
}
r .ff .decReadersCount ()
return err
}
type fsSmallFileReader struct {
ff *fsFile
startPos int
endPos int
}
func (r *fsSmallFileReader ) Close () error {
ff := r .ff
ff .decReadersCount ()
r .ff = nil
r .startPos = 0
r .endPos = 0
ff .h .smallFileReaderPool .Put (r )
return nil
}
func (r *fsSmallFileReader ) UpdateByteRange (startPos , endPos int ) error {
r .startPos = startPos
r .endPos = endPos + 1
return nil
}
func (r *fsSmallFileReader ) Read (p []byte ) (int , error ) {
tailLen := r .endPos - r .startPos
if tailLen <= 0 {
return 0 , io .EOF
}
if len (p ) > tailLen {
p = p [:tailLen ]
}
ff := r .ff
if ff .f != nil {
n , err := ff .f .ReadAt (p , int64 (r .startPos ))
r .startPos += n
return n , err
}
n := copy (p , ff .dirIndex [r .startPos :])
r .startPos += n
return n , nil
}
func (r *fsSmallFileReader ) WriteTo (w io .Writer ) (int64 , error ) {
ff := r .ff
var n int
var err error
if ff .f == nil {
n , err = w .Write (ff .dirIndex [r .startPos :r .endPos ])
return int64 (n ), err
}
if rf , ok := w .(io .ReaderFrom ); ok {
return rf .ReadFrom (r )
}
curPos := r .startPos
bufv := copyBufPool .Get ()
buf := bufv .([]byte )
for err == nil {
tailLen := r .endPos - curPos
if tailLen <= 0 {
break
}
if len (buf ) > tailLen {
buf = buf [:tailLen ]
}
n , err = ff .f .ReadAt (buf , int64 (curPos ))
nw , errw := w .Write (buf [:n ])
curPos += nw
if errw == nil && nw != n {
errw = errors .New ("bug: Write(p) returned (n, nil), where n != len(p)" )
}
if err == nil {
err = errw
}
}
copyBufPool .Put (bufv )
if err == io .EOF {
err = nil
}
return int64 (curPos - r .startPos ), err
}
func (h *fsHandler ) cleanCache (pendingFiles []*fsFile ) []*fsFile {
var filesToRelease []*fsFile
h .cacheLock .Lock ()
var remainingFiles []*fsFile
for _ , ff := range pendingFiles {
if ff .readersCount > 0 {
remainingFiles = append (remainingFiles , ff )
} else {
filesToRelease = append (filesToRelease , ff )
}
}
pendingFiles = remainingFiles
pendingFiles , filesToRelease = cleanCacheNolock (h .cache , pendingFiles , filesToRelease , h .cacheDuration )
pendingFiles , filesToRelease = cleanCacheNolock (h .cacheBrotli , pendingFiles , filesToRelease , h .cacheDuration )
pendingFiles , filesToRelease = cleanCacheNolock (h .cacheGzip , pendingFiles , filesToRelease , h .cacheDuration )
h .cacheLock .Unlock ()
for _ , ff := range filesToRelease {
ff .Release ()
}
return pendingFiles
}
func cleanCacheNolock(cache map [string ]*fsFile , pendingFiles , filesToRelease []*fsFile , cacheDuration time .Duration ) ([]*fsFile , []*fsFile ) {
t := time .Now ()
for k , ff := range cache {
if t .Sub (ff .t ) > cacheDuration {
if ff .readersCount > 0 {
pendingFiles = append (pendingFiles , ff )
} else {
filesToRelease = append (filesToRelease , ff )
}
delete (cache , k )
}
}
return pendingFiles , filesToRelease
}
func (h *fsHandler ) pathToFilePath (path string ) string {
return filepath .FromSlash (h .root + path )
}
func (h *fsHandler ) filePathToCompressed (filePath string ) string {
if h .root == h .compressRoot {
return filePath
}
if !strings .HasPrefix (filePath , h .root ) {
return filePath
}
return filepath .FromSlash (h .compressRoot + filePath [len (h .root ):])
}
func (h *fsHandler ) handleRequest (ctx *RequestCtx ) {
var path []byte
if h .pathRewrite != nil {
path = h .pathRewrite (ctx )
} else {
path = ctx .Path ()
}
hasTrailingSlash := len (path ) > 0 && path [len (path )-1 ] == '/'
path = stripTrailingSlashes (path )
if n := bytes .IndexByte (path , 0 ); n >= 0 {
ctx .Logger ().Printf ("cannot serve path with nil byte at position %d: %q" , n , path )
ctx .Error ("Are you a hacker?" , StatusBadRequest )
return
}
if h .pathRewrite != nil {
if n := bytes .Index (path , strSlashDotDotSlash ); n >= 0 {
ctx .Logger ().Printf ("cannot serve path with '/../' at position %d due to security reasons: %q" , n , path )
ctx .Error ("Internal Server Error" , StatusInternalServerError )
return
}
}
mustCompress := false
fileCache := h .cache
fileEncoding := ""
byteRange := ctx .Request .Header .peek (strRange )
if len (byteRange ) == 0 && h .compress {
if h .compressBrotli && ctx .Request .Header .HasAcceptEncodingBytes (strBr ) {
mustCompress = true
fileCache = h .cacheBrotli
fileEncoding = "br"
} else if ctx .Request .Header .HasAcceptEncodingBytes (strGzip ) {
mustCompress = true
fileCache = h .cacheGzip
fileEncoding = "gzip"
}
}
h .cacheLock .Lock ()
ff , ok := fileCache [string (path )]
if ok {
ff .readersCount ++
}
h .cacheLock .Unlock ()
if !ok {
pathStr := string (path )
filePath := h .pathToFilePath (pathStr )
var err error
ff , err = h .openFSFile (filePath , mustCompress , fileEncoding )
if mustCompress && err == errNoCreatePermission {
ctx .Logger ().Printf ("insufficient permissions for saving compressed file for %q. Serving uncompressed file. " +
"Allow write access to the directory with this file in order to improve fasthttp performance" , filePath )
mustCompress = false
ff , err = h .openFSFile (filePath , mustCompress , fileEncoding )
}
if err == errDirIndexRequired {
if !hasTrailingSlash {
ctx .RedirectBytes (append (path , '/' ), StatusFound )
return
}
ff , err = h .openIndexFile (ctx , filePath , mustCompress , fileEncoding )
if err != nil {
ctx .Logger ().Printf ("cannot open dir index %q: %v" , filePath , err )
ctx .Error ("Directory index is forbidden" , StatusForbidden )
return
}
} else if err != nil {
ctx .Logger ().Printf ("cannot open file %q: %v" , filePath , err )
if h .pathNotFound == nil {
ctx .Error ("Cannot open requested path" , StatusNotFound )
} else {
ctx .SetStatusCode (StatusNotFound )
h .pathNotFound (ctx )
}
return
}
h .cacheLock .Lock ()
ff1 , ok := fileCache [pathStr ]
if !ok {
fileCache [pathStr ] = ff
ff .readersCount ++
} else {
ff1 .readersCount ++
}
h .cacheLock .Unlock ()
if ok {
ff .Release ()
ff = ff1
}
}
if !ctx .IfModifiedSince (ff .lastModified ) {
ff .decReadersCount ()
ctx .NotModified ()
return
}
r , err := ff .NewReader ()
if err != nil {
ctx .Logger ().Printf ("cannot obtain file reader for path=%q: %v" , path , err )
ctx .Error ("Internal Server Error" , StatusInternalServerError )
return
}
hdr := &ctx .Response .Header
if ff .compressed {
if fileEncoding == "br" {
hdr .SetContentEncodingBytes (strBr )
} else if fileEncoding == "gzip" {
hdr .SetContentEncodingBytes (strGzip )
}
}
statusCode := StatusOK
contentLength := ff .contentLength
if h .acceptByteRange {
hdr .setNonSpecial (strAcceptRanges , strBytes )
if len (byteRange ) > 0 {
startPos , endPos , err := ParseByteRange (byteRange , contentLength )
if err != nil {
_ = r .(io .Closer ).Close ()
ctx .Logger ().Printf ("cannot parse byte range %q for path=%q: %v" , byteRange , path , err )
ctx .Error ("Range Not Satisfiable" , StatusRequestedRangeNotSatisfiable )
return
}
if err = r .(byteRangeUpdater ).UpdateByteRange (startPos , endPos ); err != nil {
_ = r .(io .Closer ).Close ()
ctx .Logger ().Printf ("cannot seek byte range %q for path=%q: %v" , byteRange , path , err )
ctx .Error ("Internal Server Error" , StatusInternalServerError )
return
}
hdr .SetContentRange (startPos , endPos , contentLength )
contentLength = endPos - startPos + 1
statusCode = StatusPartialContent
}
}
hdr .setNonSpecial (strLastModified , ff .lastModifiedStr )
if !ctx .IsHead () {
ctx .SetBodyStream (r , contentLength )
} else {
ctx .Response .ResetBody ()
ctx .Response .SkipBody = true
ctx .Response .Header .SetContentLength (contentLength )
if rc , ok := r .(io .Closer ); ok {
if err := rc .Close (); err != nil {
ctx .Logger ().Printf ("cannot close file reader: %v" , err )
ctx .Error ("Internal Server Error" , StatusInternalServerError )
return
}
}
}
hdr .noDefaultContentType = true
if len (hdr .ContentType ()) == 0 {
ctx .SetContentType (ff .contentType )
}
ctx .SetStatusCode (statusCode )
}
type byteRangeUpdater interface {
UpdateByteRange(startPos, endPos int ) error
}
func ParseByteRange (byteRange []byte , contentLength int ) (startPos , endPos int , err error ) {
b := byteRange
if !bytes .HasPrefix (b , strBytes ) {
return 0 , 0 , fmt .Errorf ("unsupported range units: %q. Expecting %q" , byteRange , strBytes )
}
b = b [len (strBytes ):]
if len (b ) == 0 || b [0 ] != '=' {
return 0 , 0 , fmt .Errorf ("missing byte range in %q" , byteRange )
}
b = b [1 :]
n := bytes .IndexByte (b , '-' )
if n < 0 {
return 0 , 0 , fmt .Errorf ("missing the end position of byte range in %q" , byteRange )
}
if n == 0 {
v , err := ParseUint (b [n +1 :])
if err != nil {
return 0 , 0 , err
}
startPos := contentLength - v
if startPos < 0 {
startPos = 0
}
return startPos , contentLength - 1 , nil
}
if startPos , err = ParseUint (b [:n ]); err != nil {
return 0 , 0 , err
}
if startPos >= contentLength {
return 0 , 0 , fmt .Errorf ("the start position of byte range cannot exceed %d. byte range %q" , contentLength -1 , byteRange )
}
b = b [n +1 :]
if len (b ) == 0 {
return startPos , contentLength - 1 , nil
}
if endPos , err = ParseUint (b ); err != nil {
return 0 , 0 , err
}
if endPos >= contentLength {
endPos = contentLength - 1
}
if endPos < startPos {
return 0 , 0 , fmt .Errorf ("the start position of byte range cannot exceed the end position. byte range %q" , byteRange )
}
return startPos , endPos , nil
}
func (h *fsHandler ) openIndexFile (ctx *RequestCtx , dirPath string , mustCompress bool , fileEncoding string ) (*fsFile , error ) {
for _ , indexName := range h .indexNames {
indexFilePath := dirPath + "/" + indexName
ff , err := h .openFSFile (indexFilePath , mustCompress , fileEncoding )
if err == nil {
return ff , nil
}
if !os .IsNotExist (err ) {
return nil , fmt .Errorf ("cannot open file %q: %w" , indexFilePath , err )
}
}
if !h .generateIndexPages {
return nil , fmt .Errorf ("cannot access directory without index page. Directory %q" , dirPath )
}
return h .createDirIndex (ctx .URI (), dirPath , mustCompress , fileEncoding )
}
var (
errDirIndexRequired = errors .New ("directory index required" )
errNoCreatePermission = errors .New ("no 'create file' permissions" )
)
func (h *fsHandler ) createDirIndex (base *URI , dirPath string , mustCompress bool , fileEncoding string ) (*fsFile , error ) {
w := &bytebufferpool .ByteBuffer {}
basePathEscaped := html .EscapeString (string (base .Path ()))
_, _ = fmt .Fprintf (w , "<html><head><title>%s</title><style>.dir { font-weight: bold }</style></head><body>" , basePathEscaped )
_, _ = fmt .Fprintf (w , "<h1>%s</h1>" , basePathEscaped )
_, _ = fmt .Fprintf (w , "<ul>" )
if len (basePathEscaped ) > 1 {
var parentURI URI
base .CopyTo (&parentURI )
parentURI .Update (string (base .Path ()) + "/.." )
parentPathEscaped := html .EscapeString (string (parentURI .Path ()))
_, _ = fmt .Fprintf (w , `<li><a href="%s" class="dir">..</a></li>` , parentPathEscaped )
}
f , err := os .Open (dirPath )
if err != nil {
return nil , err
}
fileinfos , err := f .Readdir (0 )
_ = f .Close ()
if err != nil {
return nil , err
}
fm := make (map [string ]os .FileInfo , len (fileinfos ))
filenames := make ([]string , 0 , len (fileinfos ))
nestedContinue :
for _ , fi := range fileinfos {
name := fi .Name ()
for _ , cfs := range h .compressedFileSuffixes {
if strings .HasSuffix (name , cfs ) {
continue nestedContinue
}
}
fm [name ] = fi
filenames = append (filenames , name )
}
var u URI
base .CopyTo (&u )
u .Update (string (u .Path ()) + "/" )
sort .Strings (filenames )
for _ , name := range filenames {
u .Update (name )
pathEscaped := html .EscapeString (string (u .Path ()))
fi := fm [name ]
auxStr := "dir"
className := "dir"
if !fi .IsDir () {
auxStr = fmt .Sprintf ("file, %d bytes" , fi .Size ())
className = "file"
}
_, _ = fmt .Fprintf (w , `<li><a href="%s" class="%s">%s</a>, %s, last modified %s</li>` ,
pathEscaped , className , html .EscapeString (name ), auxStr , fsModTime (fi .ModTime ()))
}
_, _ = fmt .Fprintf (w , "</ul></body></html>" )
if mustCompress {
var zbuf bytebufferpool .ByteBuffer
if fileEncoding == "br" {
zbuf .B = AppendBrotliBytesLevel (zbuf .B , w .B , CompressDefaultCompression )
} else if fileEncoding == "gzip" {
zbuf .B = AppendGzipBytesLevel (zbuf .B , w .B , CompressDefaultCompression )
}
w = &zbuf
}
dirIndex := w .B
lastModified := time .Now ()
ff := &fsFile {
h : h ,
dirIndex : dirIndex ,
contentType : "text/html; charset=utf-8" ,
contentLength : len (dirIndex ),
compressed : mustCompress ,
lastModified : lastModified ,
lastModifiedStr : AppendHTTPDate (nil , lastModified ),
t : lastModified ,
}
return ff , nil
}
const (
fsMinCompressRatio = 0.8
fsMaxCompressibleFileSize = 8 * 1024 * 1024
)
func (h *fsHandler ) compressAndOpenFSFile (filePath string , fileEncoding string ) (*fsFile , error ) {
f , err := os .Open (filePath )
if err != nil {
return nil , err
}
fileInfo , err := f .Stat ()
if err != nil {
_ = f .Close ()
return nil , fmt .Errorf ("cannot obtain info for file %q: %w" , filePath , err )
}
if fileInfo .IsDir () {
_ = f .Close ()
return nil , errDirIndexRequired
}
if strings .HasSuffix (filePath , h .compressedFileSuffixes [fileEncoding ]) ||
fileInfo .Size () > fsMaxCompressibleFileSize ||
!isFileCompressible (f , fsMinCompressRatio ) {
return h .newFSFile (f , fileInfo , false , "" )
}
compressedFilePath := h .filePathToCompressed (filePath )
if compressedFilePath != filePath {
if err := os .MkdirAll (filepath .Dir (compressedFilePath ), os .ModePerm ); err != nil {
return nil , err
}
}
compressedFilePath += h .compressedFileSuffixes [fileEncoding ]
absPath , err := filepath .Abs (compressedFilePath )
if err != nil {
_ = f .Close ()
return nil , fmt .Errorf ("cannot determine absolute path for %q: %v" , compressedFilePath , err )
}
flock := getFileLock (absPath )
flock .Lock ()
ff , err := h .compressFileNolock (f , fileInfo , filePath , compressedFilePath , fileEncoding )
flock .Unlock ()
return ff , err
}
func (h *fsHandler ) compressFileNolock (f *os .File , fileInfo os .FileInfo , filePath , compressedFilePath string , fileEncoding string ) (*fsFile , error ) {
if _ , err := os .Stat (compressedFilePath ); err == nil {
_ = f .Close ()
return h .newCompressedFSFile (compressedFilePath , fileEncoding )
}
tmpFilePath := compressedFilePath + ".tmp"
zf , err := os .Create (tmpFilePath )
if err != nil {
_ = f .Close ()
if !os .IsPermission (err ) {
return nil , fmt .Errorf ("cannot create temporary file %q: %w" , tmpFilePath , err )
}
return nil , errNoCreatePermission
}
if fileEncoding == "br" {
zw := acquireStacklessBrotliWriter (zf , CompressDefaultCompression )
_, err = copyZeroAlloc (zw , f )
if err1 := zw .Flush (); err == nil {
err = err1
}
releaseStacklessBrotliWriter (zw , CompressDefaultCompression )
} else if fileEncoding == "gzip" {
zw := acquireStacklessGzipWriter (zf , CompressDefaultCompression )
_, err = copyZeroAlloc (zw , f )
if err1 := zw .Flush (); err == nil {
err = err1
}
releaseStacklessGzipWriter (zw , CompressDefaultCompression )
}
_ = zf .Close ()
_ = f .Close ()
if err != nil {
return nil , fmt .Errorf ("error when compressing file %q to %q: %w" , filePath , tmpFilePath , err )
}
if err = os .Chtimes (tmpFilePath , time .Now (), fileInfo .ModTime ()); err != nil {
return nil , fmt .Errorf ("cannot change modification time to %v for tmp file %q: %v" ,
fileInfo .ModTime (), tmpFilePath , err )
}
if err = os .Rename (tmpFilePath , compressedFilePath ); err != nil {
return nil , fmt .Errorf ("cannot move compressed file from %q to %q: %w" , tmpFilePath , compressedFilePath , err )
}
return h .newCompressedFSFile (compressedFilePath , fileEncoding )
}
func (h *fsHandler ) newCompressedFSFile (filePath string , fileEncoding string ) (*fsFile , error ) {
f , err := os .Open (filePath )
if err != nil {
return nil , fmt .Errorf ("cannot open compressed file %q: %w" , filePath , err )
}
fileInfo , err := f .Stat ()
if err != nil {
_ = f .Close ()
return nil , fmt .Errorf ("cannot obtain info for compressed file %q: %w" , filePath , err )
}
return h .newFSFile (f , fileInfo , true , fileEncoding )
}
func (h *fsHandler ) openFSFile (filePath string , mustCompress bool , fileEncoding string ) (*fsFile , error ) {
filePathOriginal := filePath
if mustCompress {
filePath += h .compressedFileSuffixes [fileEncoding ]
}
f , err := os .Open (filePath )
if err != nil {
if mustCompress && os .IsNotExist (err ) {
return h .compressAndOpenFSFile (filePathOriginal , fileEncoding )
}
return nil , err
}
fileInfo , err := f .Stat ()
if err != nil {
_ = f .Close ()
return nil , fmt .Errorf ("cannot obtain info for file %q: %w" , filePath , err )
}
if fileInfo .IsDir () {
_ = f .Close ()
if mustCompress {
return nil , fmt .Errorf ("directory with unexpected suffix found: %q. Suffix: %q" ,
filePath , h .compressedFileSuffixes [fileEncoding ])
}
return nil , errDirIndexRequired
}
if mustCompress {
fileInfoOriginal , err := os .Stat (filePathOriginal )
if err != nil {
_ = f .Close ()
return nil , fmt .Errorf ("cannot obtain info for original file %q: %w" , filePathOriginal , err )
}
if fileInfoOriginal .ModTime ().Sub (fileInfo .ModTime ()) >= time .Second {
_ = f .Close ()
_ = os .Remove (filePath )
return h .compressAndOpenFSFile (filePathOriginal , fileEncoding )
}
}
return h .newFSFile (f , fileInfo , mustCompress , fileEncoding )
}
func (h *fsHandler ) newFSFile (f *os .File , fileInfo os .FileInfo , compressed bool , fileEncoding string ) (*fsFile , error ) {
n := fileInfo .Size ()
contentLength := int (n )
if n != int64 (contentLength ) {
_ = f .Close ()
return nil , fmt .Errorf ("too big file: %d bytes" , n )
}
ext := fileExtension (fileInfo .Name (), compressed , h .compressedFileSuffixes [fileEncoding ])
contentType := mime .TypeByExtension (ext )
if len (contentType ) == 0 {
data , err := readFileHeader (f , compressed , fileEncoding )
if err != nil {
return nil , fmt .Errorf ("cannot read header of the file %q: %w" , f .Name (), err )
}
contentType = http .DetectContentType (data )
}
lastModified := fileInfo .ModTime ()
ff := &fsFile {
h : h ,
f : f ,
contentType : contentType ,
contentLength : contentLength ,
compressed : compressed ,
lastModified : lastModified ,
lastModifiedStr : AppendHTTPDate (nil , lastModified ),
t : time .Now (),
}
return ff , nil
}
func readFileHeader(f *os .File , compressed bool , fileEncoding string ) ([]byte , error ) {
r := io .Reader (f )
var (
br *brotli .Reader
zr *gzip .Reader
)
if compressed {
var err error
if fileEncoding == "br" {
if br , err = acquireBrotliReader (f ); err != nil {
return nil , err
}
r = br
} else if fileEncoding == "gzip" {
if zr , err = acquireGzipReader (f ); err != nil {
return nil , err
}
r = zr
}
}
lr := &io .LimitedReader {
R : r ,
N : 512 ,
}
data , err := io .ReadAll (lr )
if _ , err := f .Seek (0 , 0 ); err != nil {
return nil , err
}
if br != nil {
releaseBrotliReader (br )
}
if zr != nil {
releaseGzipReader (zr )
}
return data , err
}
func stripLeadingSlashes(path []byte , stripSlashes int ) []byte {
for stripSlashes > 0 && len (path ) > 0 {
if path [0 ] != '/' {
panic ("BUG: path must start with slash" )
}
n := bytes .IndexByte (path [1 :], '/' )
if n < 0 {
path = path [:0 ]
break
}
path = path [n +1 :]
stripSlashes --
}
return path
}
func stripTrailingSlashes(path []byte ) []byte {
for len (path ) > 0 && path [len (path )-1 ] == '/' {
path = path [:len (path )-1 ]
}
return path
}
func fileExtension(path string , compressed bool , compressedFileSuffix string ) string {
if compressed && strings .HasSuffix (path , compressedFileSuffix ) {
path = path [:len (path )-len (compressedFileSuffix )]
}
n := strings .LastIndexByte (path , '.' )
if n < 0 {
return ""
}
return path [n :]
}
func FileLastModified (path string ) (time .Time , error ) {
f , err := os .Open (path )
if err != nil {
return zeroTime , err
}
fileInfo , err := f .Stat ()
_ = f .Close ()
if err != nil {
return zeroTime , err
}
return fsModTime (fileInfo .ModTime ()), nil
}
func fsModTime(t time .Time ) time .Time {
return t .In (time .UTC ).Truncate (time .Second )
}
var filesLockMap sync .Map
func getFileLock(absPath string ) *sync .Mutex {
v , _ := filesLockMap .LoadOrStore (absPath , &sync .Mutex {})
filelock := v .(*sync .Mutex )
return filelock
}
The pages are generated with Golds v0.6.7 . (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu .
PR and bug reports are welcome and can be submitted to the issue list .
Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds .