Source File
testing.go
Belonging Package
testing
// Copyright 2009 The Go Authors. All rights reserved.// Use of this source code is governed by a BSD-style// license that can be found in the LICENSE file.// Package testing provides support for automated testing of Go packages.// It is intended to be used in concert with the "go test" command, which automates// execution of any function of the form//// func TestXxx(*testing.T)//// where Xxx does not start with a lowercase letter. The function name// serves to identify the test routine.//// Within these functions, use the Error, Fail or related methods to signal failure.//// To write a new test suite, create a file that// contains the TestXxx functions as described here,// and give that file a name ending in "_test.go".// The file will be excluded from regular// package builds but will be included when the "go test" command is run.//// The test file can be in the same package as the one being tested,// or in a corresponding package with the suffix "_test".//// If the test file is in the same package, it may refer to unexported// identifiers within the package, as in this example://// package abs//// import "testing"//// func TestAbs(t *testing.T) {// got := Abs(-1)// if got != 1 {// t.Errorf("Abs(-1) = %d; want 1", got)// }// }//// If the file is in a separate "_test" package, the package being tested// must be imported explicitly and only its exported identifiers may be used.// This is known as "black box" testing.//// package abs_test//// import (// "testing"//// "path_to_pkg/abs"// )//// func TestAbs(t *testing.T) {// got := abs.Abs(-1)// if got != 1 {// t.Errorf("Abs(-1) = %d; want 1", got)// }// }//// For more detail, run "go help test" and "go help testflag".//// # Benchmarks//// Functions of the form//// func BenchmarkXxx(*testing.B)//// are considered benchmarks, and are executed by the "go test" command when// its -bench flag is provided. Benchmarks are run sequentially.//// For a description of the testing flags, see// https://golang.org/cmd/go/#hdr-Testing_flags.//// A sample benchmark function looks like this://// func BenchmarkRandInt(b *testing.B) {// for i := 0; i < b.N; i++ {// rand.Int()// }// }//// The benchmark function must run the target code b.N times.// During benchmark execution, b.N is adjusted until the benchmark function lasts// long enough to be timed reliably. The output//// BenchmarkRandInt-8 68453040 17.8 ns/op//// means that the loop ran 68453040 times at a speed of 17.8 ns per loop.//// If a benchmark needs some expensive setup before running, the timer// may be reset://// func BenchmarkBigLen(b *testing.B) {// big := NewBig()// b.ResetTimer()// for i := 0; i < b.N; i++ {// big.Len()// }// }//// If a benchmark needs to test performance in a parallel setting, it may use// the RunParallel helper function; such benchmarks are intended to be used with// the go test -cpu flag://// func BenchmarkTemplateParallel(b *testing.B) {// templ := template.Must(template.New("test").Parse("Hello, {{.}}!"))// b.RunParallel(func(pb *testing.PB) {// var buf bytes.Buffer// for pb.Next() {// buf.Reset()// templ.Execute(&buf, "World")// }// })// }//// A detailed specification of the benchmark results format is given// in https://golang.org/design/14313-benchmark-format.//// There are standard tools for working with benchmark results at// https://golang.org/x/perf/cmd.// In particular, https://golang.org/x/perf/cmd/benchstat performs// statistically robust A/B comparisons.//// # Examples//// The package also runs and verifies example code. Example functions may// include a concluding line comment that begins with "Output:" and is compared with// the standard output of the function when the tests are run. (The comparison// ignores leading and trailing space.) These are examples of an example://// func ExampleHello() {// fmt.Println("hello")// // Output: hello// }//// func ExampleSalutations() {// fmt.Println("hello, and")// fmt.Println("goodbye")// // Output:// // hello, and// // goodbye// }//// The comment prefix "Unordered output:" is like "Output:", but matches any// line order://// func ExamplePerm() {// for _, value := range Perm(5) {// fmt.Println(value)// }// // Unordered output: 4// // 2// // 1// // 3// // 0// }//// Example functions without output comments are compiled but not executed.//// The naming convention to declare examples for the package, a function F, a type T and// method M on type T are://// func Example() { ... }// func ExampleF() { ... }// func ExampleT() { ... }// func ExampleT_M() { ... }//// Multiple example functions for a package/type/function/method may be provided by// appending a distinct suffix to the name. The suffix must start with a// lower-case letter.//// func Example_suffix() { ... }// func ExampleF_suffix() { ... }// func ExampleT_suffix() { ... }// func ExampleT_M_suffix() { ... }//// The entire test file is presented as the example when it contains a single// example function, at least one other function, type, variable, or constant// declaration, and no test or benchmark functions.//// # Fuzzing//// 'go test' and the testing package support fuzzing, a testing technique where// a function is called with randomly generated inputs to find bugs not// anticipated by unit tests.//// Functions of the form//// func FuzzXxx(*testing.F)//// are considered fuzz tests.//// For example://// func FuzzHex(f *testing.F) {// for _, seed := range [][]byte{{}, {0}, {9}, {0xa}, {0xf}, {1, 2, 3, 4}} {// f.Add(seed)// }// f.Fuzz(func(t *testing.T, in []byte) {// enc := hex.EncodeToString(in)// out, err := hex.DecodeString(enc)// if err != nil {// t.Fatalf("%v: decode: %v", in, err)// }// if !bytes.Equal(in, out) {// t.Fatalf("%v: not equal after round trip: %v", in, out)// }// })// }//// A fuzz test maintains a seed corpus, or a set of inputs which are run by// default, and can seed input generation. Seed inputs may be registered by// calling (*F).Add or by storing files in the directory testdata/fuzz/<Name>// (where <Name> is the name of the fuzz test) within the package containing// the fuzz test. Seed inputs are optional, but the fuzzing engine may find// bugs more efficiently when provided with a set of small seed inputs with good// code coverage. These seed inputs can also serve as regression tests for bugs// identified through fuzzing.//// The function passed to (*F).Fuzz within the fuzz test is considered the fuzz// target. A fuzz target must accept a *T parameter, followed by one or more// parameters for random inputs. The types of arguments passed to (*F).Add must// be identical to the types of these parameters. The fuzz target may signal// that it's found a problem the same way tests do: by calling T.Fail (or any// method that calls it like T.Error or T.Fatal) or by panicking.//// When fuzzing is enabled (by setting the -fuzz flag to a regular expression// that matches a specific fuzz test), the fuzz target is called with arguments// generated by repeatedly making random changes to the seed inputs. On// supported platforms, 'go test' compiles the test executable with fuzzing// coverage instrumentation. The fuzzing engine uses that instrumentation to// find and cache inputs that expand coverage, increasing the likelihood of// finding bugs. If the fuzz target fails for a given input, the fuzzing engine// writes the inputs that caused the failure to a file in the directory// testdata/fuzz/<Name> within the package directory. This file later serves as// a seed input. If the file can't be written at that location (for example,// because the directory is read-only), the fuzzing engine writes the file to// the fuzz cache directory within the build cache instead.//// When fuzzing is disabled, the fuzz target is called with the seed inputs// registered with F.Add and seed inputs from testdata/fuzz/<Name>. In this// mode, the fuzz test acts much like a regular test, with subtests started// with F.Fuzz instead of T.Run.//// See https://go.dev/doc/fuzz for documentation about fuzzing.//// # Skipping//// Tests or benchmarks may be skipped at run time with a call to// the Skip method of *T or *B://// func TestTimeConsuming(t *testing.T) {// if testing.Short() {// t.Skip("skipping test in short mode.")// }// ...// }//// The Skip method of *T can be used in a fuzz target if the input is invalid,// but should not be considered a failing input. For example://// func FuzzJSONMarshaling(f *testing.F) {// f.Fuzz(func(t *testing.T, b []byte) {// var v interface{}// if err := json.Unmarshal(b, &v); err != nil {// t.Skip()// }// if _, err := json.Marshal(v); err != nil {// t.Errorf("Marshal: %v", err)// }// })// }//// # Subtests and Sub-benchmarks//// The Run methods of T and B allow defining subtests and sub-benchmarks,// without having to define separate functions for each. This enables uses// like table-driven benchmarks and creating hierarchical tests.// It also provides a way to share common setup and tear-down code://// func TestFoo(t *testing.T) {// // <setup code>// t.Run("A=1", func(t *testing.T) { ... })// t.Run("A=2", func(t *testing.T) { ... })// t.Run("B=1", func(t *testing.T) { ... })// // <tear-down code>// }//// Each subtest and sub-benchmark has a unique name: the combination of the name// of the top-level test and the sequence of names passed to Run, separated by// slashes, with an optional trailing sequence number for disambiguation.//// The argument to the -run, -bench, and -fuzz command-line flags is an unanchored regular// expression that matches the test's name. For tests with multiple slash-separated// elements, such as subtests, the argument is itself slash-separated, with// expressions matching each name element in turn. Because it is unanchored, an// empty expression matches any string.// For example, using "matching" to mean "whose name contains"://// go test -run '' # Run all tests.// go test -run Foo # Run top-level tests matching "Foo", such as "TestFooBar".// go test -run Foo/A= # For top-level tests matching "Foo", run subtests matching "A=".// go test -run /A=1 # For all top-level tests, run subtests matching "A=1".// go test -fuzz FuzzFoo # Fuzz the target matching "FuzzFoo"//// The -run argument can also be used to run a specific value in the seed// corpus, for debugging. For example://// go test -run=FuzzFoo/9ddb952d9814//// The -fuzz and -run flags can both be set, in order to fuzz a target but// skip the execution of all other tests.//// Subtests can also be used to control parallelism. A parent test will only// complete once all of its subtests complete. In this example, all tests are// run in parallel with each other, and only with each other, regardless of// other top-level tests that may be defined://// func TestGroupedParallel(t *testing.T) {// for _, tc := range tests {// tc := tc // capture range variable// t.Run(tc.Name, func(t *testing.T) {// t.Parallel()// ...// })// }// }//// Run does not return until parallel subtests have completed, providing a way// to clean up after a group of parallel tests://// func TestTeardownParallel(t *testing.T) {// // This Run will not return until the parallel tests finish.// t.Run("group", func(t *testing.T) {// t.Run("Test1", parallelTest1)// t.Run("Test2", parallelTest2)// t.Run("Test3", parallelTest3)// })// // <tear-down code>// }//// # Main//// It is sometimes necessary for a test or benchmark program to do extra setup or teardown// before or after it executes. It is also sometimes necessary to control// which code runs on the main thread. To support these and other cases,// if a test file contains a function://// func TestMain(m *testing.M)//// then the generated test will call TestMain(m) instead of running the tests or benchmarks// directly. TestMain runs in the main goroutine and can do whatever setup// and teardown is necessary around a call to m.Run. m.Run will return an exit// code that may be passed to os.Exit. If TestMain returns, the test wrapper// will pass the result of m.Run to os.Exit itself.//// When TestMain is called, flag.Parse has not been run. If TestMain depends on// command-line flags, including those of the testing package, it should call// flag.Parse explicitly. Command line flags are always parsed by the time test// or benchmark functions run.//// A simple implementation of TestMain is://// func TestMain(m *testing.M) {// // call flag.Parse() here if TestMain uses flags// os.Exit(m.Run())// }//// TestMain is a low-level primitive and should not be necessary for casual// testing needs, where ordinary test functions suffice.package testingimport ()var initRan bool// Init registers testing flags. These flags are automatically registered by// the "go test" command before running test functions, so Init is only needed// when calling functions such as Benchmark without using "go test".//// Init has no effect if it was already called.func () {if initRan {return}initRan = true// The short flag requests that tests run more quickly, but its functionality// is provided by test writers themselves. The testing package is just its// home. The all.bash installation script sets it to make installation more// efficient, but by default the flag is off so a plain "go test" will do a// full test of the package.short = flag.Bool("test.short", false, "run smaller test suite to save time")// The failfast flag requests that test execution stop after the first test failure.failFast = flag.Bool("test.failfast", false, "do not start new tests after the first test failure")// The directory in which to create profile files and the like. When run from// "go test", the binary always runs in the source directory for the package;// this flag lets "go test" tell the binary to write the files in the directory where// the "go test" command is run.outputDir = flag.String("test.outputdir", "", "write profiles to `dir`")// Report as tests are run; default is silent for success.flag.Var(&chatty, "test.v", "verbose: print additional output")count = flag.Uint("test.count", 1, "run tests and benchmarks `n` times")coverProfile = flag.String("test.coverprofile", "", "write a coverage profile to `file`")gocoverdir = flag.String("test.gocoverdir", "", "write coverage intermediate files to this directory")matchList = flag.String("test.list", "", "list tests, examples, and benchmarks matching `regexp` then exit")match = flag.String("test.run", "", "run only tests and examples matching `regexp`")skip = flag.String("test.skip", "", "do not list or run tests matching `regexp`")memProfile = flag.String("test.memprofile", "", "write an allocation profile to `file`")memProfileRate = flag.Int("test.memprofilerate", 0, "set memory allocation profiling `rate` (see runtime.MemProfileRate)")cpuProfile = flag.String("test.cpuprofile", "", "write a cpu profile to `file`")blockProfile = flag.String("test.blockprofile", "", "write a goroutine blocking profile to `file`")blockProfileRate = flag.Int("test.blockprofilerate", 1, "set blocking profile `rate` (see runtime.SetBlockProfileRate)")mutexProfile = flag.String("test.mutexprofile", "", "write a mutex contention profile to the named file after execution")mutexProfileFraction = flag.Int("test.mutexprofilefraction", 1, "if >= 0, calls runtime.SetMutexProfileFraction()")panicOnExit0 = flag.Bool("test.paniconexit0", false, "panic on call to os.Exit(0)")traceFile = flag.String("test.trace", "", "write an execution trace to `file`")timeout = flag.Duration("test.timeout", 0, "panic test binary after duration `d` (default 0, timeout disabled)")cpuListStr = flag.String("test.cpu", "", "comma-separated `list` of cpu counts to run each test with")parallel = flag.Int("test.parallel", runtime.GOMAXPROCS(0), "run at most `n` tests in parallel")testlog = flag.String("test.testlogfile", "", "write test action log to `file` (for use only by cmd/go)")shuffle = flag.String("test.shuffle", "off", "randomize the execution order of tests and benchmarks")fullPath = flag.Bool("test.fullpath", false, "show full file names in error messages")initBenchmarkFlags()initFuzzFlags()}var (// Flags, registered during Init.short *boolfailFast *booloutputDir *stringchatty chattyFlagcount *uintcoverProfile *stringgocoverdir *stringmatchList *stringmatch *stringskip *stringmemProfile *stringmemProfileRate *intcpuProfile *stringblockProfile *stringblockProfileRate *intmutexProfile *stringmutexProfileFraction *intpanicOnExit0 *booltraceFile *stringtimeout *time.DurationcpuListStr *stringparallel *intshuffle *stringtestlog *stringfullPath *boolhaveExamples bool // are there examples?cpuList []inttestlogFile *os.FilenumFailed atomic.Uint32 // number of test failuresrunning sync.Map // map[string]time.Time of running, unpaused tests)type chattyFlag struct {on bool // -v is set in some formjson bool // -v=test2json is set, to make output better for test2json}func (*chattyFlag) () bool { return true }func ( *chattyFlag) ( string) error {switch {default:return fmt.Errorf("invalid flag -test.v=%s", )case "true", "test2json":.on = true.json = == "test2json"case "false":.on = false.json = false}return nil}func ( *chattyFlag) () string {if .json {return "test2json"}if .on {return "true"}return "false"}func ( *chattyFlag) () any {if .json {return "test2json"}return .on}const marker = byte(0x16) // ^V for framingfunc ( *chattyFlag) () string {if .json {return string(marker)}return ""}type chattyPrinter struct {w io.WriterlastNameMu sync.Mutex // guards lastNamelastName string // last printed test name in chatty modejson bool // -v=json output mode}func newChattyPrinter( io.Writer) *chattyPrinter {return &chattyPrinter{w: , json: chatty.json}}// prefix is like chatty.prefix but using p.json instead of chatty.json.// Using p.json allows tests to check the json behavior without modifying// the global variable. For convenience, we allow p == nil and treat// that as not in json mode (because it's not chatty at all).func ( *chattyPrinter) () string {if != nil && .json {return string(marker)}return ""}// Updatef prints a message about the status of the named test to w.//// The formatted message must include the test name itself.func ( *chattyPrinter) (, string, ...any) {.lastNameMu.Lock()defer .lastNameMu.Unlock()// Since the message already implies an association with a specific new test,// we don't need to check what the old test name was or log an extra NAME line// for it. (We're updating it anyway, and the current message already includes// the test name.).lastName =fmt.Fprintf(.w, .prefix()+, ...)}// Printf prints a message, generated by the named test, that does not// necessarily mention that tests's name itself.func ( *chattyPrinter) (, string, ...any) {.lastNameMu.Lock()defer .lastNameMu.Unlock()if .lastName == "" {.lastName =} else if .lastName != {fmt.Fprintf(.w, "%s=== NAME %s\n", .prefix(), ).lastName =}fmt.Fprintf(.w, , ...)}// The maximum number of stack frames to go through when skipping helper functions for// the purpose of decorating log messages.const maxStackLen = 50// common holds the elements common between T and B and// captures common methods such as Errorf.type common struct {mu sync.RWMutex // guards this group of fieldsoutput []byte // Output generated by test or benchmark.w io.Writer // For flushToParent.ran bool // Test or benchmark (or one of its subtests) was executed.failed bool // Test or benchmark has failed.skipped bool // Test or benchmark has been skipped.done bool // Test is finished and all subtests have completed.helperPCs map[uintptr]struct{} // functions to be skipped when writing file/line infohelperNames map[string]struct{} // helperPCs converted to function namescleanups []func() // optional functions to be called at the end of the testcleanupName string // Name of the cleanup function.cleanupPc []uintptr // The stack trace at the point where Cleanup was called.finished bool // Test function has completed.inFuzzFn bool // Whether the fuzz target, if this is one, is running.chatty *chattyPrinter // A copy of chattyPrinter, if the chatty flag is set.bench bool // Whether the current test is a benchmark.hasSub atomic.Bool // whether there are sub-benchmarks.cleanupStarted atomic.Bool // Registered cleanup callbacks have started to executeraceErrors int // Number of races detected during test.runner string // Function name of tRunner running the test.isParallel bool // Whether the test is parallel.parent *commonlevel int // Nesting depth of test or benchmark.creator []uintptr // If level > 0, the stack trace at the point where the parent called t.Run.name string // Name of test or benchmark.start time.Time // Time test or benchmark startedduration time.Durationbarrier chan bool // To signal parallel subtests they may start. Nil when T.Parallel is not present (B) or not usable (when fuzzing).signal chan bool // To signal a test is done.sub []*T // Queue of subtests to be run in parallel.tempDirMu sync.MutextempDir stringtempDirErr errortempDirSeq int32}// Short reports whether the -test.short flag is set.func () bool {if short == nil {panic("testing: Short called before Init")}// Catch code that calls this from TestMain without first calling flag.Parse.if !flag.Parsed() {panic("testing: Short called before Parse")}return *short}// testBinary is set by cmd/go to "1" if this is a binary built by "go test".// The value is set to "1" by a -X option to cmd/link. We assume that// because this is possible, the compiler will not optimize testBinary// into a constant on the basis that it is an unexported package-scope// variable that is never changed. If the compiler ever starts implementing// such an optimization, we will need some technique to mark this variable// as "changed by a cmd/link -X option".var testBinary = "0"// Testing reports whether the current code is being run in a test.// This will report true in programs created by "go test",// false in programs created by "go build".func () bool {return testBinary == "1"}// CoverMode reports what the test coverage mode is set to. The// values are "set", "count", or "atomic". The return value will be// empty if test coverage is not enabled.func () string {if goexperiment.CoverageRedesign {return cover2.mode}return cover.Mode}// Verbose reports whether the -test.v flag is set.func () bool {// Same as in Short.if !flag.Parsed() {panic("testing: Verbose called before Parse")}return chatty.on}func ( *common) ( string) {if .inFuzzFn {panic(fmt.Sprintf("testing: f.%s was called inside the fuzz target, use t.%s instead", , ))}}// frameSkip searches, starting after skip frames, for the first caller frame// in a function not marked as a helper and returns that frame.// The search stops if it finds a tRunner function that// was the entry point into the test and the test is not a subtest.// This function must be called with c.mu held.func ( *common) ( int) runtime.Frame {// If the search continues into the parent test, we'll have to hold// its mu temporarily. If we then return, we need to unlock it.:= falsedefer func() {if {.mu.Unlock()}}()var [maxStackLen]uintptr// Skip two extra frames to account for this function// and runtime.Callers itself.:= runtime.Callers(+2, [:])if == 0 {panic("testing: zero callers found")}:= runtime.CallersFrames([:])var , , runtime.Framefor := true; ; = {, = .Next()if .Function == "runtime.gopanic" {continue}if .Function == .cleanupName {= runtime.CallersFrames(.cleanupPc)continue}if .PC == 0 {=}if .Function == .runner {// We've gone up all the way to the tRunner calling// the test function (so the user must have// called tb.Helper from inside that test function).// If this is a top-level test, only skip up to the test function itself.// If we're in a subtest, continue searching in the parent test,// starting from the point of the call to Run which created this subtest.if .level > 1 {= runtime.CallersFrames(.creator):= .parent// We're no longer looking at the current c after this point,// so we should unlock its mu, unless it's the original receiver,// in which case our caller doesn't expect us to do that.if {.mu.Unlock()}=// Remember to unlock c.mu when we no longer need it, either// because we went up another nesting level, or because we// returned.= true.mu.Lock()continue}return}// If more helper PCs have been added since we last did the conversionif .helperNames == nil {.helperNames = make(map[string]struct{})for := range .helperPCs {.helperNames[pcToName()] = struct{}{}}}if , := .helperNames[.Function]; ! {// Found a frame that wasn't inside a helper function.return}}return}// decorate prefixes the string with the file and line of the call site// and inserts the final newline if needed and indentation spaces for formatting.// This function must be called with c.mu held.func ( *common) ( string, int) string {:= .frameSkip():= .File:= .Lineif != "" {if *fullPath {// If relative path, truncate file name at last file name separator.} else if := strings.LastIndex(, "/"); >= 0 {= [+1:]} else if = strings.LastIndex(, "\\"); >= 0 {= [+1:]}} else {= "???"}if == 0 {= 1}:= new(strings.Builder)// Every line is indented at least 4 spaces..WriteString(" ")fmt.Fprintf(, "%s:%d: ", , ):= strings.Split(, "\n")if := len(); > 1 && [-1] == "" {= [:-1]}for , := range {if > 0 {// Second and subsequent lines are indented an additional 4 spaces..WriteString("\n ")}.WriteString()}.WriteByte('\n')return .String()}// flushToParent writes c.output to the parent after first writing the header// with the given format and arguments.func ( *common) (, string, ...any) {:= .parent.mu.Lock()defer .mu.Unlock().mu.Lock()defer .mu.Unlock()if len(.output) > 0 {// Add the current c.output to the print,// and then arrange for the print to replace c.output.// (This displays the logged output after the --- FAIL line.)+= "%s"= append([:len():len()], .output).output = .output[:0]}if .chatty != nil && (.w == .chatty.w || .chatty.json) {// We're flushing to the actual output, so track that this output is// associated with a specific test (and, specifically, that the next output// is *not* associated with that test).//// Moreover, if c.output is non-empty it is important that this write be// atomic with respect to the output of other tests, so that we don't end up// with confusing '=== NAME' lines in the middle of our '--- PASS' block.// Neither humans nor cmd/test2json can parse those easily.// (See https://go.dev/issue/40771.)//// If test2json is used, we never flush to parent tests,// so that the json stream shows subtests as they finish.// (See https://go.dev/issue/29811.).chatty.Updatef(, , ...)} else {// We're flushing to the output buffer of the parent test, which will// itself follow a test-name header when it is finally flushed to stdout.fmt.Fprintf(.w, .chatty.prefix()+, ...)}}type indenter struct {c *common}func ( indenter) ( []byte) ( int, error) {= len()for len() > 0 {:= bytes.IndexByte(, '\n')if == -1 {= len()} else {++}// An indent of 4 spaces will neatly align the dashes with the status// indicator of the parent.:= [:]if [0] == marker {.c.output = append(.c.output, marker)= [1:]}const = " ".c.output = append(.c.output, ...).c.output = append(.c.output, ...)= [:]}return}// fmtDuration returns a string representing d in the form "87.00s".func fmtDuration( time.Duration) string {return fmt.Sprintf("%.2fs", .Seconds())}// TB is the interface common to T, B, and F.type TB interface {Cleanup(func())Error(args ...any)Errorf(format string, args ...any)Fail()FailNow()Failed() boolFatal(args ...any)Fatalf(format string, args ...any)Helper()Log(args ...any)Logf(format string, args ...any)Name() stringSetenv(key, value string)Skip(args ...any)SkipNow()Skipf(format string, args ...any)Skipped() boolTempDir() string// A private method to prevent users implementing the// interface and so future additions to it will not// violate Go 1 compatibility.private()}var _ TB = (*T)(nil)var _ TB = (*B)(nil)// T is a type passed to Test functions to manage test state and support formatted test logs.//// A test ends when its Test function returns or calls any of the methods// FailNow, Fatal, Fatalf, SkipNow, Skip, or Skipf. Those methods, as well as// the Parallel method, must be called only from the goroutine running the// Test function.//// The other reporting methods, such as the variations of Log and Error,// may be called simultaneously from multiple goroutines.type T struct {commonisEnvSet boolcontext *testContext // For running tests and subtests.}func ( *common) () {}// Name returns the name of the running (sub-) test or benchmark.//// The name will include the name of the test along with the names of// any nested sub-tests. If two sibling sub-tests have the same name,// Name will append a suffix to guarantee the returned name is unique.func ( *common) () string {return .name}func ( *common) () {if .parent != nil {.parent.()}.mu.Lock()defer .mu.Unlock().ran = true}// Fail marks the function as having failed but continues execution.func ( *common) () {if .parent != nil {.parent.()}.mu.Lock()defer .mu.Unlock()// c.done needs to be locked to synchronize checks to c.done in parent tests.if .done {panic("Fail in goroutine after " + .name + " has completed")}.failed = true}// Failed reports whether the function has failed.func ( *common) () bool {.mu.RLock():= .failed.mu.RUnlock()return || .raceErrors+race.Errors() > 0}// FailNow marks the function as having failed and stops its execution// by calling runtime.Goexit (which then runs all deferred calls in the// current goroutine).// Execution will continue at the next test or benchmark.// FailNow must be called from the goroutine running the// test or benchmark function, not from other goroutines// created during the test. Calling FailNow does not stop// those other goroutines.func ( *common) () {.checkFuzzFn("FailNow").Fail()// Calling runtime.Goexit will exit the goroutine, which// will run the deferred functions in this goroutine,// which will eventually run the deferred lines in tRunner,// which will signal to the test loop that this test is done.//// A previous version of this code said://// c.duration = ...// c.signal <- c.self// runtime.Goexit()//// This previous version duplicated code (those lines are in// tRunner no matter what), but worse the goroutine teardown// implicit in runtime.Goexit was not guaranteed to complete// before the test exited. If a test deferred an important cleanup// function (like removing temporary files), there was no guarantee// it would run on a test failure. Because we send on c.signal during// a top-of-stack deferred function now, we know that the send// only happens after any other stacked defers have completed..mu.Lock().finished = true.mu.Unlock()runtime.Goexit()}// log generates the output. It's always at the same stack depth.func ( *common) ( string) {.logDepth(, 3) // logDepth + log + public function}// logDepth generates the output at an arbitrary stack depth.func ( *common) ( string, int) {.mu.Lock()defer .mu.Unlock()if .done {// This test has already finished. Try and log this message// with our parent. If we don't have a parent, panic.for := .parent; != nil; = .parent {.mu.Lock()defer .mu.Unlock()if !.done {.output = append(.output, .decorate(, +1)...)return}}panic("Log in goroutine after " + .name + " has completed: " + )} else {if .chatty != nil {if .bench {// Benchmarks don't print === CONT, so we should skip the test// printer and just print straight to stdout.fmt.Print(.decorate(, +1))} else {.chatty.Printf(.name, "%s", .decorate(, +1))}return}.output = append(.output, .decorate(, +1)...)}}// Log formats its arguments using default formatting, analogous to Println,// and records the text in the error log. For tests, the text will be printed only if// the test fails or the -test.v flag is set. For benchmarks, the text is always// printed to avoid having performance depend on the value of the -test.v flag.func ( *common) ( ...any) {.checkFuzzFn("Log").log(fmt.Sprintln(...))}// Logf formats its arguments according to the format, analogous to Printf, and// records the text in the error log. A final newline is added if not provided. For// tests, the text will be printed only if the test fails or the -test.v flag is// set. For benchmarks, the text is always printed to avoid having performance// depend on the value of the -test.v flag.func ( *common) ( string, ...any) {.checkFuzzFn("Logf").log(fmt.Sprintf(, ...))}// Error is equivalent to Log followed by Fail.func ( *common) ( ...any) {.checkFuzzFn("Error").log(fmt.Sprintln(...)).Fail()}// Errorf is equivalent to Logf followed by Fail.func ( *common) ( string, ...any) {.checkFuzzFn("Errorf").log(fmt.Sprintf(, ...)).Fail()}// Fatal is equivalent to Log followed by FailNow.func ( *common) ( ...any) {.checkFuzzFn("Fatal").log(fmt.Sprintln(...)).FailNow()}// Fatalf is equivalent to Logf followed by FailNow.func ( *common) ( string, ...any) {.checkFuzzFn("Fatalf").log(fmt.Sprintf(, ...)).FailNow()}// Skip is equivalent to Log followed by SkipNow.func ( *common) ( ...any) {.checkFuzzFn("Skip").log(fmt.Sprintln(...)).SkipNow()}// Skipf is equivalent to Logf followed by SkipNow.func ( *common) ( string, ...any) {.checkFuzzFn("Skipf").log(fmt.Sprintf(, ...)).SkipNow()}// SkipNow marks the test as having been skipped and stops its execution// by calling runtime.Goexit.// If a test fails (see Error, Errorf, Fail) and is then skipped,// it is still considered to have failed.// Execution will continue at the next test or benchmark. See also FailNow.// SkipNow must be called from the goroutine running the test, not from// other goroutines created during the test. Calling SkipNow does not stop// those other goroutines.func ( *common) () {.checkFuzzFn("SkipNow").mu.Lock().skipped = true.finished = true.mu.Unlock()runtime.Goexit()}// Skipped reports whether the test was skipped.func ( *common) () bool {.mu.RLock()defer .mu.RUnlock()return .skipped}// Helper marks the calling function as a test helper function.// When printing file and line information, that function will be skipped.// Helper may be called simultaneously from multiple goroutines.func ( *common) () {.mu.Lock()defer .mu.Unlock()if .helperPCs == nil {.helperPCs = make(map[uintptr]struct{})}// repeating code from callerName here to save walking a stack framevar [1]uintptr:= runtime.Callers(2, [:]) // skip runtime.Callers + Helperif == 0 {panic("testing: zero callers found")}if , := .helperPCs[[0]]; ! {.helperPCs[[0]] = struct{}{}.helperNames = nil // map will be recreated next time it is needed}}// Cleanup registers a function to be called when the test (or subtest) and all its// subtests complete. Cleanup functions will be called in last added,// first called order.func ( *common) ( func()) {.checkFuzzFn("Cleanup")var [maxStackLen]uintptr// Skip two extra frames to account for this function and runtime.Callers itself.:= runtime.Callers(2, [:]):= [:]:= func() {defer func() {.mu.Lock()defer .mu.Unlock().cleanupName = "".cleanupPc = nil}():= callerName(0).mu.Lock().cleanupName =.cleanupPc =.mu.Unlock()()}.mu.Lock()defer .mu.Unlock().cleanups = append(.cleanups, )}// TempDir returns a temporary directory for the test to use.// The directory is automatically removed by Cleanup when the test and// all its subtests complete.// Each subsequent call to t.TempDir returns a unique directory;// if the directory creation fails, TempDir terminates the test by calling Fatal.func ( *common) () string {.checkFuzzFn("TempDir")// Use a single parent directory for all the temporary directories// created by a test, each numbered sequentially..tempDirMu.Lock()var boolif .tempDir == "" { // Usually the case with js/wasm= true} else {, := os.Stat(.tempDir)= os.IsNotExist()if != nil && ! {.Fatalf("TempDir: %v", )}}if {.Helper()// Drop unusual characters (such as path separators or// characters interacting with globs) from the directory name to// avoid surprising os.MkdirTemp behavior.:= func( rune) rune {if < utf8.RuneSelf {const = "!#$%&()+,-.=@^_{}~ "if '0' <= && <= '9' ||'a' <= && <= 'z' ||'A' <= && <= 'Z' {return}if strings.ContainsRune(, ) {return}} else if unicode.IsLetter() || unicode.IsNumber() {return}return -1}:= strings.Map(, .Name()).tempDir, .tempDirErr = os.MkdirTemp("", )if .tempDirErr == nil {.Cleanup(func() {if := removeAll(.tempDir); != nil {.Errorf("TempDir RemoveAll cleanup: %v", )}})}}if .tempDirErr == nil {.tempDirSeq++}:= .tempDirSeq.tempDirMu.Unlock()if .tempDirErr != nil {.Fatalf("TempDir: %v", .tempDirErr)}:= fmt.Sprintf("%s%c%03d", .tempDir, os.PathSeparator, )if := os.Mkdir(, 0777); != nil {.Fatalf("TempDir: %v", )}return}// removeAll is like os.RemoveAll, but retries Windows "Access is denied."// errors up to an arbitrary timeout.//// Those errors have been known to occur spuriously on at least the// windows-amd64-2012 builder (https://go.dev/issue/50051), and can only occur// legitimately if the test leaves behind a temp file that either is still open// or the test otherwise lacks permission to delete. In the case of legitimate// failures, a failing test may take a bit longer to fail, but once the test is// fixed the extra latency will go away.func removeAll( string) error {const = 2 * time.Secondvar (time.Time= 1 * time.Millisecond)for {:= os.RemoveAll()if !isWindowsRetryable() {return}if .IsZero() {= time.Now()} else if := time.Since() + ; >= {return}time.Sleep()+= time.Duration(rand.Int63n(int64()))}}// Setenv calls os.Setenv(key, value) and uses Cleanup to// restore the environment variable to its original value// after the test.//// Because Setenv affects the whole process, it cannot be used// in parallel tests or tests with parallel ancestors.func ( *common) (, string) {.checkFuzzFn("Setenv"), := os.LookupEnv()if := os.Setenv(, ); != nil {.Fatalf("cannot set environment variable: %v", )}if {.Cleanup(func() {os.Setenv(, )})} else {.Cleanup(func() {os.Unsetenv()})}}// panicHanding is an argument to runCleanup.type panicHandling intconst (normalPanic panicHandling = iotarecoverAndReturnPanic)// runCleanup is called at the end of the test.// If catchPanic is true, this will catch panics, and return the recovered// value if any.func ( *common) ( panicHandling) ( any) {.cleanupStarted.Store(true)defer .cleanupStarted.Store(false)if == recoverAndReturnPanic {defer func() {= recover()}()}// Make sure that if a cleanup function panics,// we still run the remaining cleanup functions.defer func() {.mu.Lock():= len(.cleanups) > 0.mu.Unlock()if {.(normalPanic)}}()for {var func().mu.Lock()if len(.cleanups) > 0 {:= len(.cleanups) - 1= .cleanups[].cleanups = .cleanups[:]}.mu.Unlock()if == nil {return nil}()}}// callerName gives the function name (qualified with a package path)// for the caller after skip frames (where 0 means the current function).func callerName( int) string {var [1]uintptr:= runtime.Callers(+2, [:]) // skip + runtime.Callers + callerNameif == 0 {panic("testing: zero callers found")}return pcToName([0])}func pcToName( uintptr) string {:= []uintptr{}:= runtime.CallersFrames(), := .Next()return .Function}// Parallel signals that this test is to be run in parallel with (and only with)// other parallel tests. When a test is run multiple times due to use of// -test.count or -test.cpu, multiple instances of a single test never run in// parallel with each other.func ( *T) () {if .isParallel {panic("testing: t.Parallel called multiple times")}if .isEnvSet {panic("testing: t.Parallel called after t.Setenv; cannot set environment variables in parallel tests")}.isParallel = trueif .parent.barrier == nil {// T.Parallel has no effect when fuzzing.// Multiple processes may run in parallel, but only one input can run at a// time per process so we can attribute crashes to specific inputs.return}// We don't want to include the time we spend waiting for serial tests// in the test duration. Record the elapsed time thus far and reset the// timer afterwards..duration += time.Since(.start)// Add to the list of tests to be released by the parent..parent.sub = append(.parent.sub, ).raceErrors += race.Errors()if .chatty != nil {.chatty.Updatef(.name, "=== PAUSE %s\n", .name)}running.Delete(.name).signal <- true // Release calling test.<-.parent.barrier // Wait for the parent test to complete..context.waitParallel()if .chatty != nil {.chatty.Updatef(.name, "=== CONT %s\n", .name)}running.Store(.name, time.Now()).start = time.Now().raceErrors += -race.Errors()}// Setenv calls os.Setenv(key, value) and uses Cleanup to// restore the environment variable to its original value// after the test.//// Because Setenv affects the whole process, it cannot be used// in parallel tests or tests with parallel ancestors.func ( *T) (, string) {// Non-parallel subtests that have parallel ancestors may still// run in parallel with other tests: they are only non-parallel// with respect to the other subtests of the same parent.// Since SetEnv affects the whole process, we need to disallow it// if the current test or any parent is parallel.:= falsefor := &.common; != nil; = .parent {if .isParallel {= truebreak}}if {panic("testing: t.Setenv called after t.Parallel; cannot set environment variables in parallel tests")}.isEnvSet = true.common.Setenv(, )}// InternalTest is an internal type but exported because it is cross-package;// it is part of the implementation of the "go test" command.type InternalTest struct {Name stringF func(*T)}var errNilPanicOrGoexit = errors.New("test executed panic(nil) or runtime.Goexit")func tRunner( *T, func( *T)) {.runner = callerName(0)// When this goroutine is done, either because fn(t)// returned normally or because a test failure triggered// a call to runtime.Goexit, record the duration and send// a signal saying that the test is done.defer func() {if .Failed() {numFailed.Add(1)}if .raceErrors+race.Errors() > 0 {.Errorf("race detected during execution of test")}// Check if the test panicked or Goexited inappropriately.//// If this happens in a normal test, print output but continue panicking.// tRunner is called in its own goroutine, so this terminates the process.//// If this happens while fuzzing, recover from the panic and treat it like a// normal failure. It's important that the process keeps running in order to// find short inputs that cause panics.:= recover():= true.mu.RLock():= .finished.mu.RUnlock()if ! && == nil {= errNilPanicOrGoexitfor := .parent; != nil; = .parent {.mu.RLock()= .finished.mu.RUnlock()if {if !.isParallel {.Errorf("%v: subtest may have called FailNow on a parent test", )= nil}= falsebreak}}}if != nil && .context.isFuzzing {:= "panic: "if == errNilPanicOrGoexit {= ""}.Errorf("%s%s\n%s\n", , , string(debug.Stack())).mu.Lock().finished = true.mu.Unlock()= nil}// Use a deferred call to ensure that we report that the test is// complete even if a cleanup function calls t.FailNow. See issue 41355.:= falsedefer func() {// Only report that the test is complete if it doesn't panic,// as otherwise the test binary can exit before the panic is// reported to the user. See issue 41479.if {return}if != nil {panic()}running.Delete(.name).signal <-}():= func( any) {.Fail()if := .runCleanup(recoverAndReturnPanic); != nil {.Logf("cleanup panicked with %v", )}// Flush the output log up to the root before dying.for := &.common; .parent != nil; = .parent {.mu.Lock().duration += time.Since(.start):= .duration.mu.Unlock().flushToParent(.name, "--- FAIL: %s (%s)\n", .name, fmtDuration())if := .parent.runCleanup(recoverAndReturnPanic); != nil {fmt.Fprintf(.parent.w, "cleanup panicked with %v", )}}= truepanic()}if != nil {()}.duration += time.Since(.start)if len(.sub) > 0 {// Run parallel subtests.// Decrease the running count for this test..context.release()// Release the parallel subtests.close(.barrier)// Wait for subtests to complete.for , := range .sub {<-.signal}:= time.Now():= .runCleanup(recoverAndReturnPanic).duration += time.Since()if != nil {()}if !.isParallel {// Reacquire the count for sequential tests. See comment in Run..context.waitParallel()}} else if .isParallel {// Only release the count for this test if it was run as a parallel// test. See comment in Run method..context.release()}.report() // Report after all subtests have finished.// Do not lock t.done to allow race detector to detect race in case// the user does not appropriately synchronize a goroutine..done = trueif .parent != nil && !.hasSub.Load() {.setRan()}}()defer func() {if len(.sub) == 0 {.runCleanup(normalPanic)}}().start = time.Now().raceErrors = -race.Errors()()// code beyond here will not be executed when FailNow is invoked.mu.Lock().finished = true.mu.Unlock()}// Run runs f as a subtest of t called name. It runs f in a separate goroutine// and blocks until f returns or calls t.Parallel to become a parallel test.// Run reports whether f succeeded (or at least did not fail before calling t.Parallel).//// Run may be called simultaneously from multiple goroutines, but all such calls// must return before the outer test function for t returns.func ( *T) ( string, func( *T)) bool {if .cleanupStarted.Load() {panic("testing: t.Run called during t.Cleanup")}.hasSub.Store(true), , := .context.match.fullName(&.common, )if ! || shouldFailFast() {return true}// Record the stack trace at the point of this call so that if the subtest// function - which runs in a separate stack - is marked as a helper, we can// continue walking the stack into the parent test.var [maxStackLen]uintptr:= runtime.Callers(2, [:])= &T{common: common{barrier: make(chan bool),signal: make(chan bool, 1),name: ,parent: &.common,level: .level + 1,creator: [:],chatty: .chatty,},context: .context,}.w = indenter{&.common}if .chatty != nil {.chatty.Updatef(.name, "=== RUN %s\n", .name)}running.Store(.name, time.Now())// Instead of reducing the running count of this test before calling the// tRunner and increasing it afterwards, we rely on tRunner keeping the// count correct. This ensures that a sequence of sequential tests runs// without being preempted, even when their parent is a parallel test. This// may especially reduce surprises if *parallel == 1.go tRunner(, )if !<-.signal {// At this point, it is likely that FailNow was called on one of the// parent tests by one of the subtests. Continue aborting up the chain.runtime.Goexit()}if .chatty != nil && .chatty.json {.chatty.Updatef(.parent.name, "=== NAME %s\n", .parent.name)}return !.failed}// Deadline reports the time at which the test binary will have// exceeded the timeout specified by the -timeout flag.//// The ok result is false if the -timeout flag indicates “no timeout” (0).func ( *T) () ( time.Time, bool) {= .context.deadlinereturn , !.IsZero()}// testContext holds all fields that are common to all tests. This includes// synchronization primitives to run at most *parallel tests.type testContext struct {match *matcherdeadline time.Time// isFuzzing is true in the context used when generating random inputs// for fuzz targets. isFuzzing is false when running normal tests and// when running fuzz tests as unit tests (without -fuzz or when -fuzz// does not match).isFuzzing boolmu sync.Mutex// Channel used to signal tests that are ready to be run in parallel.startParallel chan bool// running is the number of tests currently running in parallel.// This does not include tests that are waiting for subtests to complete.running int// numWaiting is the number tests waiting to be run in parallel.numWaiting int// maxParallel is a copy of the parallel flag.maxParallel int}func newTestContext( int, *matcher) *testContext {return &testContext{match: ,startParallel: make(chan bool),maxParallel: ,running: 1, // Set the count to 1 for the main (sequential) test.}}func ( *testContext) () {.mu.Lock()if .running < .maxParallel {.running++.mu.Unlock()return}.numWaiting++.mu.Unlock()<-.startParallel}func ( *testContext) () {.mu.Lock()if .numWaiting == 0 {.running--.mu.Unlock()return}.numWaiting--.mu.Unlock().startParallel <- true // Pick a waiting test to be run.}// No one should be using func Main anymore.// See the doc comment on func Main and use MainStart instead.var errMain = errors.New("testing: unexpected use of func Main")type matchStringOnly func(pat, str string) (bool, error)func ( matchStringOnly) (, string) (bool, error) { return (, ) }func ( matchStringOnly) ( io.Writer) error { return errMain }func ( matchStringOnly) () {}func ( matchStringOnly) (string, io.Writer, int) error { return errMain }func ( matchStringOnly) () string { return "" }func ( matchStringOnly) (io.Writer) {}func ( matchStringOnly) () error { return errMain }func ( matchStringOnly) (bool) {}func ( matchStringOnly) (time.Duration, int64, time.Duration, int64, int, []corpusEntry, []reflect.Type, string, string) error {return errMain}func ( matchStringOnly) (func(corpusEntry) error) error { return errMain }func ( matchStringOnly) (string, []reflect.Type) ([]corpusEntry, error) {return nil, errMain}func ( matchStringOnly) ([]any, []reflect.Type) error { return nil }func ( matchStringOnly) () {}func ( matchStringOnly) () {}// Main is an internal function, part of the implementation of the "go test" command.// It was exported because it is cross-package and predates "internal" packages.// It is no longer used by "go test" but preserved, as much as possible, for other// systems that simulate "go test" using Main, but Main sometimes cannot be updated as// new functionality is added to the testing package.// Systems simulating "go test" should be updated to use MainStart.func ( func(, string) (bool, error), []InternalTest, []InternalBenchmark, []InternalExample) {os.Exit(MainStart(matchStringOnly(), , , nil, ).Run())}// M is a type passed to a TestMain function to run the actual tests.type M struct {deps testDepstests []InternalTestbenchmarks []InternalBenchmarkfuzzTargets []InternalFuzzTargetexamples []InternalExampletimer *time.TimerafterOnce sync.OncenumRun int// value to pass to os.Exit, the outer test func main// harness calls os.Exit with this code. See #34129.exitCode int}// testDeps is an internal interface of functionality that is// passed into this package by a test's generated main package.// The canonical implementation of this interface is// testing/internal/testdeps's TestDeps.type testDeps interface {ImportPath() stringMatchString(pat, str string) (bool, error)SetPanicOnExit0(bool)StartCPUProfile(io.Writer) errorStopCPUProfile()StartTestLog(io.Writer)StopTestLog() errorWriteProfileTo(string, io.Writer, int) errorCoordinateFuzzing(time.Duration, int64, time.Duration, int64, int, []corpusEntry, []reflect.Type, string, string) errorRunFuzzWorker(func(corpusEntry) error) errorReadCorpus(string, []reflect.Type) ([]corpusEntry, error)CheckCorpus([]any, []reflect.Type) errorResetCoverage()SnapshotCoverage()}// MainStart is meant for use by tests generated by 'go test'.// It is not meant to be called directly and is not subject to the Go 1 compatibility document.// It may change signature from release to release.func ( testDeps, []InternalTest, []InternalBenchmark, []InternalFuzzTarget, []InternalExample) *M {Init()return &M{deps: ,tests: ,benchmarks: ,fuzzTargets: ,examples: ,}}var testingTesting boolvar realStderr *os.File// Run runs the tests. It returns an exit code to pass to os.Exit.func ( *M) () ( int) {defer func() {= .exitCode}()// Count the number of calls to m.Run.// We only ever expected 1, but we didn't enforce that,// and now there are tests in the wild that call m.Run multiple times.// Sigh. go.dev/issue/23129..numRun++// TestMain may have already called flag.Parse.if !flag.Parsed() {flag.Parse()}if chatty.json {// With -v=json, stdout and stderr are pointing to the same pipe,// which is leading into test2json. In general, operating systems// do a good job of ensuring that writes to the same pipe through// different file descriptors are delivered whole, so that writing// AAA to stdout and BBB to stderr simultaneously produces// AAABBB or BBBAAA on the pipe, not something like AABBBA.// However, the exception to this is when the pipe fills: in that// case, Go's use of non-blocking I/O means that writing AAA// or BBB might be split across multiple system calls, making it// entirely possible to get output like AABBBA. The same problem// happens inside the operating system kernel if we switch to// blocking I/O on the pipe. This interleaved output can do things// like print unrelated messages in the middle of a TestFoo line,// which confuses test2json. Setting os.Stderr = os.Stdout will make// them share a single pfd, which will hold a lock for each program// write, preventing any interleaving.//// It might be nice to set Stderr = Stdout always, or perhaps if// we can tell they are the same file, but for now -v=json is// a very clear signal. Making the two files the same may cause// surprises if programs close os.Stdout but expect to be able// to continue to write to os.Stderr, but it's hard to see why a// test would think it could take over global state that way.//// This fix only helps programs where the output is coming directly// from Go code. It does not help programs in which a subprocess is// writing to stderr or stdout at the same time that a Go test is writing output.// It also does not help when the output is coming from the runtime,// such as when using the print/println functions, since that code writes// directly to fd 2 without any locking.// We keep realStderr around to prevent fd 2 from being closed.//// See go.dev/issue/33419.realStderr = os.Stderros.Stderr = os.Stdout}if *parallel < 1 {fmt.Fprintln(os.Stderr, "testing: -parallel can only be given a positive integer")flag.Usage().exitCode = 2return}if *matchFuzz != "" && *fuzzCacheDir == "" {fmt.Fprintln(os.Stderr, "testing: -test.fuzzcachedir must be set if -test.fuzz is set")flag.Usage().exitCode = 2return}if *matchList != "" {listTests(.deps.MatchString, .tests, .benchmarks, .fuzzTargets, .examples).exitCode = 0return}if *shuffle != "off" {var int64var errorif *shuffle == "on" {= time.Now().UnixNano()} else {, = strconv.ParseInt(*shuffle, 10, 64)if != nil {fmt.Fprintln(os.Stderr, `testing: -shuffle should be "off", "on", or a valid integer:`, ).exitCode = 2return}}fmt.Println("-test.shuffle", ):= rand.New(rand.NewSource()).Shuffle(len(.tests), func(, int) { .tests[], .tests[] = .tests[], .tests[] }).Shuffle(len(.benchmarks), func(, int) { .benchmarks[], .benchmarks[] = .benchmarks[], .benchmarks[] })}parseCpuList().before()defer .after()// Run tests, examples, and benchmarks unless this is a fuzz worker process.// Workers start after this is done by their parent process, and they should// not repeat this work.if !*isFuzzWorker {:= .startAlarm()haveExamples = len(.examples) > 0, := runTests(.deps.MatchString, .tests, ), := runFuzzTests(.deps, .fuzzTargets, ), := runExamples(.deps.MatchString, .examples).stopAlarm()if ! && ! && ! && *matchBenchmarks == "" && *matchFuzz == "" {fmt.Fprintln(os.Stderr, "testing: warning: no tests to run")if testingTesting && *match != "^$" {// If this happens during testing of package testing it could be that// package testing's own logic for when to run a test is broken,// in which case every test will run nothing and succeed,// with no obvious way to detect this problem (since no tests are running).// So make 'no tests to run' a hard failure when testing package testing itself.fmt.Print(chatty.prefix(), "FAIL: package testing must run tests\n")= false}}if ! || ! || ! || !runBenchmarks(.deps.ImportPath(), .deps.MatchString, .benchmarks) || race.Errors() > 0 {fmt.Print(chatty.prefix(), "FAIL\n").exitCode = 1return}}:= runFuzzing(.deps, .fuzzTargets)if ! {fmt.Print(chatty.prefix(), "FAIL\n")if *isFuzzWorker {.exitCode = fuzzWorkerExitCode} else {.exitCode = 1}return}.exitCode = 0if !*isFuzzWorker {fmt.Print(chatty.prefix(), "PASS\n")}return}func ( *T) () {if .parent == nil {return}:= fmtDuration(.duration):= "--- %s: %s (%s)\n"if .Failed() {.flushToParent(.name, , "FAIL", .name, )} else if .chatty != nil {if .Skipped() {.flushToParent(.name, , "SKIP", .name, )} else {.flushToParent(.name, , "PASS", .name, )}}}func listTests( func(, string) (bool, error), []InternalTest, []InternalBenchmark, []InternalFuzzTarget, []InternalExample) {if , := (*matchList, "non-empty"); != nil {fmt.Fprintf(os.Stderr, "testing: invalid regexp in -test.list (%q): %s\n", *matchList, )os.Exit(1)}for , := range {if , := (*matchList, .Name); {fmt.Println(.Name)}}for , := range {if , := (*matchList, .Name); {fmt.Println(.Name)}}for , := range {if , := (*matchList, .Name); {fmt.Println(.Name)}}for , := range {if , := (*matchList, .Name); {fmt.Println(.Name)}}}// RunTests is an internal function but exported because it is cross-package;// it is part of the implementation of the "go test" command.func ( func(, string) (bool, error), []InternalTest) ( bool) {var time.Timeif *timeout > 0 {= time.Now().Add(*timeout)}, := runTests(, , )if ! && !haveExamples {fmt.Fprintln(os.Stderr, "testing: warning: no tests to run")}return}func runTests( func(, string) (bool, error), []InternalTest, time.Time) (, bool) {= truefor , := range cpuList {runtime.GOMAXPROCS()for := uint(0); < *count; ++ {if shouldFailFast() {break}if > 0 && ! {// There were no tests to run on the first// iteration. This won't change, so no reason// to keep trying.break}:= newTestContext(*parallel, newMatcher(, *match, "-test.run", *skip)).deadline =:= &T{common: common{signal: make(chan bool, 1),barrier: make(chan bool),w: os.Stdout,},context: ,}if Verbose() {.chatty = newChattyPrinter(.w)}tRunner(, func( *T) {for , := range {.Run(.Name, .F)}})select {case <-.signal:default:panic("internal error: tRunner exited without sending on t.signal")}= && !.Failed()= || .ran}}return ,}// before runs before all testing.func ( *M) () {if *memProfileRate > 0 {runtime.MemProfileRate = *memProfileRate}if *cpuProfile != "" {, := os.Create(toOutputDir(*cpuProfile))if != nil {fmt.Fprintf(os.Stderr, "testing: %s\n", )return}if := .deps.StartCPUProfile(); != nil {fmt.Fprintf(os.Stderr, "testing: can't start cpu profile: %s\n", ).Close()return}// Could save f so after can call f.Close; not worth the effort.}if *traceFile != "" {, := os.Create(toOutputDir(*traceFile))if != nil {fmt.Fprintf(os.Stderr, "testing: %s\n", )return}if := trace.Start(); != nil {fmt.Fprintf(os.Stderr, "testing: can't start tracing: %s\n", ).Close()return}// Could save f so after can call f.Close; not worth the effort.}if *blockProfile != "" && *blockProfileRate >= 0 {runtime.SetBlockProfileRate(*blockProfileRate)}if *mutexProfile != "" && *mutexProfileFraction >= 0 {runtime.SetMutexProfileFraction(*mutexProfileFraction)}if *coverProfile != "" && CoverMode() == "" {fmt.Fprintf(os.Stderr, "testing: cannot use -test.coverprofile because test binary was not built with coverage enabled\n")os.Exit(2)}if *gocoverdir != "" && CoverMode() == "" {fmt.Fprintf(os.Stderr, "testing: cannot use -test.gocoverdir because test binary was not built with coverage enabled\n")os.Exit(2)}if *testlog != "" {// Note: Not using toOutputDir.// This file is for use by cmd/go, not users.var *os.Filevar errorif .numRun == 1 {, = os.Create(*testlog)} else {, = os.OpenFile(*testlog, os.O_WRONLY, 0)if == nil {.Seek(0, io.SeekEnd)}}if != nil {fmt.Fprintf(os.Stderr, "testing: %s\n", )os.Exit(2)}.deps.StartTestLog()testlogFile =}if *panicOnExit0 {.deps.SetPanicOnExit0(true)}}// after runs after all testing.func ( *M) () {.afterOnce.Do(func() {.writeProfiles()})// Restore PanicOnExit0 after every run, because we set it to true before// every run. Otherwise, if m.Run is called multiple times the behavior of// os.Exit(0) will not be restored after the second run.if *panicOnExit0 {.deps.SetPanicOnExit0(false)}}func ( *M) () {if *testlog != "" {if := .deps.StopTestLog(); != nil {fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *testlog, )os.Exit(2)}if := testlogFile.Close(); != nil {fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *testlog, )os.Exit(2)}}if *cpuProfile != "" {.deps.StopCPUProfile() // flushes profile to disk}if *traceFile != "" {trace.Stop() // flushes trace to disk}if *memProfile != "" {, := os.Create(toOutputDir(*memProfile))if != nil {fmt.Fprintf(os.Stderr, "testing: %s\n", )os.Exit(2)}runtime.GC() // materialize all statisticsif = .deps.WriteProfileTo("allocs", , 0); != nil {fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *memProfile, )os.Exit(2)}.Close()}if *blockProfile != "" && *blockProfileRate >= 0 {, := os.Create(toOutputDir(*blockProfile))if != nil {fmt.Fprintf(os.Stderr, "testing: %s\n", )os.Exit(2)}if = .deps.WriteProfileTo("block", , 0); != nil {fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *blockProfile, )os.Exit(2)}.Close()}if *mutexProfile != "" && *mutexProfileFraction >= 0 {, := os.Create(toOutputDir(*mutexProfile))if != nil {fmt.Fprintf(os.Stderr, "testing: %s\n", )os.Exit(2)}if = .deps.WriteProfileTo("mutex", , 0); != nil {fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *mutexProfile, )os.Exit(2)}.Close()}if CoverMode() != "" {coverReport()}}// toOutputDir returns the file name relocated, if required, to outputDir.// Simple implementation to avoid pulling in path/filepath.func toOutputDir( string) string {if *outputDir == "" || == "" {return}// On Windows, it's clumsy, but we can be almost always correct// by just looking for a drive letter and a colon.// Absolute paths always have a drive letter (ignoring UNC).// Problem: if path == "C:A" and outputdir == "C:\Go" it's unclear// what to do, but even then path/filepath doesn't help.// TODO: Worth doing better? Probably not, because we're here only// under the management of go test.if runtime.GOOS == "windows" && len() >= 2 {, := [0], [1]if ('a' <= && <= 'z' || 'A' <= && <= 'Z') && == ':' {// If path starts with a drive letter we're stuck with it regardless.return}}if os.IsPathSeparator([0]) {return}return fmt.Sprintf("%s%c%s", *outputDir, os.PathSeparator, )}// startAlarm starts an alarm if requested.func ( *M) () time.Time {if *timeout <= 0 {return time.Time{}}:= time.Now().Add(*timeout).timer = time.AfterFunc(*timeout, func() {.after()debug.SetTraceback("all"):= ""if := runningList(); len() > 0 {var strings.Builder.WriteString("\nrunning tests:")for , := range {.WriteString("\n\t").WriteString()}= .String()}panic(fmt.Sprintf("test timed out after %v%s", *timeout, ))})return}// runningList returns the list of running tests.func runningList() []string {var []stringrunning.Range(func(, any) bool {= append(, fmt.Sprintf("%s (%v)", .(string), time.Since(.(time.Time)).Round(time.Second)))return true})sort.Strings()return}// stopAlarm turns off the alarm.func ( *M) () {if *timeout > 0 {.timer.Stop()}}func parseCpuList() {for , := range strings.Split(*cpuListStr, ",") {= strings.TrimSpace()if == "" {continue}, := strconv.Atoi()if != nil || <= 0 {fmt.Fprintf(os.Stderr, "testing: invalid value %q for -test.cpu\n", )os.Exit(1)}cpuList = append(cpuList, )}if cpuList == nil {cpuList = append(cpuList, runtime.GOMAXPROCS(-1))}}func shouldFailFast() bool {return *failFast && numFailed.Load() > 0}
![]() |
The pages are generated with Golds v0.6.7. (GOOS=linux GOARCH=amd64) Golds is a Go 101 project developed by Tapir Liu. PR and bug reports are welcome and can be submitted to the issue list. Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds. |