Commit 11309337 by Ian Lance Taylor

libgo: update to Go 1.10.2 release

    
    Reviewed-on: https://go-review.googlesource.com/115196

From-SVN: r261041
parent 8b0b334a
9731580e76c065b76e3a103356bb8920da05a685 79eca4fd642724d89e9bec8f79889451f6632a46
The first line of this file holds the git revision number of the last The first line of this file holds the git revision number of the last
merge done from the gofrontend repository. merge done from the gofrontend repository.
bf86aec25972f3a100c3aa58a6abcbcc35bdea49 71bdbf431b79dff61944f22c25c7e085ccfc25d5
The first line of this file holds the git revision number of the The first line of this file holds the git revision number of the
last merge done from the master library sources. last merge done from the master library sources.
...@@ -122,6 +122,7 @@ net/http/httptest ...@@ -122,6 +122,7 @@ net/http/httptest
net/http/httptrace net/http/httptrace
net/http/httputil net/http/httputil
net/http/internal net/http/internal
net/http/pprof
net/internal/socktest net/internal/socktest
net/mail net/mail
net/rpc net/rpc
......
...@@ -366,7 +366,7 @@ parseExtras: ...@@ -366,7 +366,7 @@ parseExtras:
epoch := time.Date(1601, time.January, 1, 0, 0, 0, 0, time.UTC) epoch := time.Date(1601, time.January, 1, 0, 0, 0, 0, time.UTC)
modified = time.Unix(epoch.Unix()+secs, nsecs) modified = time.Unix(epoch.Unix()+secs, nsecs)
} }
case unixExtraID: case unixExtraID, infoZipUnixExtraID:
if len(fieldBuf) < 8 { if len(fieldBuf) < 8 {
continue parseExtras continue parseExtras
} }
...@@ -379,12 +379,6 @@ parseExtras: ...@@ -379,12 +379,6 @@ parseExtras:
} }
ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch
modified = time.Unix(ts, 0) modified = time.Unix(ts, 0)
case infoZipUnixExtraID:
if len(fieldBuf) < 4 {
continue parseExtras
}
ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch
modified = time.Unix(ts, 0)
} }
} }
......
...@@ -414,7 +414,7 @@ var tests = []ZipTest{ ...@@ -414,7 +414,7 @@ var tests = []ZipTest{
Name: "test.txt", Name: "test.txt",
Content: []byte{}, Content: []byte{},
Size: 1<<32 - 1, Size: 1<<32 - 1,
Modified: time.Date(2017, 10, 31, 21, 17, 27, 0, timeZone(-7*time.Hour)), Modified: time.Date(2017, 10, 31, 21, 11, 57, 0, timeZone(-7*time.Hour)),
Mode: 0644, Mode: 0644,
}, },
}, },
......
...@@ -3265,6 +3265,20 @@ func TestGoVetWithOnlyTestFiles(t *testing.T) { ...@@ -3265,6 +3265,20 @@ func TestGoVetWithOnlyTestFiles(t *testing.T) {
tg.run("vet", "p") tg.run("vet", "p")
} }
// Issue 24193.
func TestVetWithOnlyCgoFiles(t *testing.T) {
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/p/p.go", "package p; import \"C\"; func F() {}")
tg.setenv("GOPATH", tg.path("."))
tg.run("vet", "p")
}
// Issue 9767, 19769. // Issue 9767, 19769.
func TestGoGetDotSlashDownload(t *testing.T) { func TestGoGetDotSlashDownload(t *testing.T) {
testenv.MustHaveExternalNetwork(t) testenv.MustHaveExternalNetwork(t)
...@@ -5099,6 +5113,28 @@ func TestCacheOutput(t *testing.T) { ...@@ -5099,6 +5113,28 @@ func TestCacheOutput(t *testing.T) {
} }
} }
func TestCacheListStale(t *testing.T) {
tooSlow(t)
if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") {
t.Skip("GODEBUG gocacheverify")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOCACHE", tg.path("cache"))
tg.tempFile("gopath/src/p/p.go", "package p; import _ \"q\"; func F(){}\n")
tg.tempFile("gopath/src/q/q.go", "package q; func F(){}\n")
tg.tempFile("gopath/src/m/m.go", "package main; import _ \"q\"; func main(){}\n")
tg.setenv("GOPATH", tg.path("gopath"))
tg.run("install", "p", "m")
tg.run("list", "-f={{.ImportPath}} {{.Stale}}", "m", "q", "p")
tg.grepStdout("^m false", "m should not be stale")
tg.grepStdout("^q true", "q should be stale")
tg.grepStdout("^p false", "p should not be stale")
}
func TestCacheCoverage(t *testing.T) { func TestCacheCoverage(t *testing.T) {
tooSlow(t) tooSlow(t)
...@@ -5792,6 +5828,22 @@ func TestAtomicCoverpkgAll(t *testing.T) { ...@@ -5792,6 +5828,22 @@ func TestAtomicCoverpkgAll(t *testing.T) {
} }
} }
// Issue 23882.
func TestCoverpkgAllRuntime(t *testing.T) {
skipIfGccgo(t, "gccgo has no cover tool")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/x/x.go", `package x; import _ "runtime"; func F() {}`)
tg.tempFile("src/x/x_test.go", `package x; import "testing"; func TestF(t *testing.T) { F() }`)
tg.setenv("GOPATH", tg.path("."))
tg.run("test", "-coverpkg=all", "x")
if canRace {
tg.run("test", "-coverpkg=all", "-race", "x")
}
}
func TestBadCommandLines(t *testing.T) { func TestBadCommandLines(t *testing.T) {
tg := testgo(t) tg := testgo(t)
defer tg.cleanup() defer tg.cleanup()
...@@ -5949,3 +6001,36 @@ func TestBadCgoDirectives(t *testing.T) { ...@@ -5949,3 +6001,36 @@ func TestBadCgoDirectives(t *testing.T) {
tg.run("build", "-n", "x") tg.run("build", "-n", "x")
tg.grepStderr("-D@foo", "did not find -D@foo in commands") tg.grepStderr("-D@foo", "did not find -D@foo in commands")
} }
func TestTwoPkgConfigs(t *testing.T) {
if !canCgo {
t.Skip("no cgo")
}
if runtime.GOOS == "windows" || runtime.GOOS == "plan9" {
t.Skipf("no shell scripts on %s", runtime.GOOS)
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/x/a.go", `package x
// #cgo pkg-config: --static a
import "C"
`)
tg.tempFile("src/x/b.go", `package x
// #cgo pkg-config: --static a
import "C"
`)
tg.tempFile("pkg-config.sh", `#!/bin/sh
echo $* >>`+tg.path("pkg-config.out"))
tg.must(os.Chmod(tg.path("pkg-config.sh"), 0755))
tg.setenv("GOPATH", tg.path("."))
tg.setenv("PKG_CONFIG", tg.path("pkg-config.sh"))
tg.run("build", "x")
out, err := ioutil.ReadFile(tg.path("pkg-config.out"))
tg.must(err)
out = bytes.TrimSpace(out)
want := "--cflags --static --static -- a a\n--libs --static --static -- a a"
if !bytes.Equal(out, []byte(want)) {
t.Errorf("got %q want %q", out, want)
}
}
...@@ -809,8 +809,8 @@ func repoRootForImportDynamic(importPath string, security web.SecurityMode) (*re ...@@ -809,8 +809,8 @@ func repoRootForImportDynamic(importPath string, security web.SecurityMode) (*re
} }
} }
if !strings.Contains(mmi.RepoRoot, "://") { if err := validateRepoRootScheme(mmi.RepoRoot); err != nil {
return nil, fmt.Errorf("%s: invalid repo root %q; no scheme", urlStr, mmi.RepoRoot) return nil, fmt.Errorf("%s: invalid repo root %q: %v", urlStr, mmi.RepoRoot, err)
} }
rr := &repoRoot{ rr := &repoRoot{
vcs: vcsByCmd(mmi.VCS), vcs: vcsByCmd(mmi.VCS),
...@@ -824,6 +824,36 @@ func repoRootForImportDynamic(importPath string, security web.SecurityMode) (*re ...@@ -824,6 +824,36 @@ func repoRootForImportDynamic(importPath string, security web.SecurityMode) (*re
return rr, nil return rr, nil
} }
// validateRepoRootScheme returns an error if repoRoot does not seem
// to have a valid URL scheme. At this point we permit things that
// aren't valid URLs, although later, if not using -insecure, we will
// restrict repoRoots to be valid URLs. This is only because we've
// historically permitted them, and people may depend on that.
func validateRepoRootScheme(repoRoot string) error {
end := strings.Index(repoRoot, "://")
if end <= 0 {
return errors.New("no scheme")
}
// RFC 3986 section 3.1.
for i := 0; i < end; i++ {
c := repoRoot[i]
switch {
case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z':
// OK.
case '0' <= c && c <= '9' || c == '+' || c == '-' || c == '.':
// OK except at start.
if i == 0 {
return errors.New("invalid scheme")
}
default:
return errors.New("invalid scheme")
}
}
return nil
}
var fetchGroup singleflight.Group var fetchGroup singleflight.Group
var ( var (
fetchCacheMu sync.Mutex fetchCacheMu sync.Mutex
......
...@@ -408,3 +408,46 @@ func TestMatchGoImport(t *testing.T) { ...@@ -408,3 +408,46 @@ func TestMatchGoImport(t *testing.T) {
} }
} }
} }
func TestValidateRepoRootScheme(t *testing.T) {
tests := []struct {
root string
err string
}{
{
root: "",
err: "no scheme",
},
{
root: "http://",
err: "",
},
{
root: "a://",
err: "",
},
{
root: "a#://",
err: "invalid scheme",
},
{
root: "-config://",
err: "invalid scheme",
},
}
for _, test := range tests {
err := validateRepoRootScheme(test.root)
if err == nil {
if test.err != "" {
t.Errorf("validateRepoRootScheme(%q) = nil, want %q", test.root, test.err)
}
} else if test.err == "" {
if err != nil {
t.Errorf("validateRepoRootScheme(%q) = %q, want nil", test.root, test.err)
}
} else if err.Error() != test.err {
t.Errorf("validateRepoRootScheme(%q) = %q, want %q", test.root, err, test.err)
}
}
}
...@@ -673,6 +673,14 @@ func runTest(cmd *base.Command, args []string) { ...@@ -673,6 +673,14 @@ func runTest(cmd *base.Command, args []string) {
continue continue
} }
// If using the race detector, silently ignore
// attempts to run coverage on the runtime
// packages. It will cause the race detector
// to be invoked before it has been initialized.
if cfg.BuildRace && p.Standard && (p.ImportPath == "runtime" || strings.HasPrefix(p.ImportPath, "runtime/internal")) {
continue
}
if haveMatch { if haveMatch {
testCoverPkgs = append(testCoverPkgs, p) testCoverPkgs = append(testCoverPkgs, p)
} }
......
...@@ -62,11 +62,11 @@ func runVet(cmd *base.Command, args []string) { ...@@ -62,11 +62,11 @@ func runVet(cmd *base.Command, args []string) {
base.Errorf("%v", err) base.Errorf("%v", err)
continue continue
} }
if len(ptest.GoFiles) == 0 && pxtest == nil { if len(ptest.GoFiles) == 0 && len(ptest.CgoFiles) == 0 && pxtest == nil {
base.Errorf("go vet %s: no Go files in %s", p.ImportPath, p.Dir) base.Errorf("go vet %s: no Go files in %s", p.ImportPath, p.Dir)
continue continue
} }
if len(ptest.GoFiles) > 0 { if len(ptest.GoFiles) > 0 || len(ptest.CgoFiles) > 0 {
root.Deps = append(root.Deps, b.VetAction(work.ModeBuild, work.ModeBuild, ptest)) root.Deps = append(root.Deps, b.VetAction(work.ModeBuild, work.ModeBuild, ptest))
} }
if pxtest != nil { if pxtest != nil {
......
...@@ -461,15 +461,7 @@ func (b *Builder) useCache(a *Action, p *load.Package, actionHash cache.ActionID ...@@ -461,15 +461,7 @@ func (b *Builder) useCache(a *Action, p *load.Package, actionHash cache.ActionID
// If so, it's up to date and we can reuse it instead of rebuilding it. // If so, it's up to date and we can reuse it instead of rebuilding it.
var buildID string var buildID string
if target != "" && !cfg.BuildA { if target != "" && !cfg.BuildA {
var err error buildID, _ = buildid.ReadFile(target)
buildID, err = buildid.ReadFile(target)
if err != nil && b.ComputeStaleOnly {
if p != nil && !p.Stale {
p.Stale = true
p.StaleReason = "target missing"
}
return true
}
if strings.HasPrefix(buildID, actionID+buildIDSeparator) { if strings.HasPrefix(buildID, actionID+buildIDSeparator) {
a.buildID = buildID a.buildID = buildID
a.built = target a.built = target
...@@ -546,7 +538,10 @@ func (b *Builder) useCache(a *Action, p *load.Package, actionHash cache.ActionID ...@@ -546,7 +538,10 @@ func (b *Builder) useCache(a *Action, p *load.Package, actionHash cache.ActionID
} }
} }
} }
return true
// Fall through to update a.buildID from the build artifact cache,
// which will affect the computation of buildIDs for targets
// higher up in the dependency graph.
} }
// Check the build artifact cache. // Check the build artifact cache.
...@@ -574,6 +569,10 @@ func (b *Builder) useCache(a *Action, p *load.Package, actionHash cache.ActionID ...@@ -574,6 +569,10 @@ func (b *Builder) useCache(a *Action, p *load.Package, actionHash cache.ActionID
a.built = file a.built = file
a.Target = "DO NOT USE - using cache" a.Target = "DO NOT USE - using cache"
a.buildID = buildID a.buildID = buildID
if p := a.Package; p != nil {
// Clearer than explaining that something else is stale.
p.StaleReason = "not installed but available in build cache"
}
return true return true
} }
} }
...@@ -584,6 +583,10 @@ func (b *Builder) useCache(a *Action, p *load.Package, actionHash cache.ActionID ...@@ -584,6 +583,10 @@ func (b *Builder) useCache(a *Action, p *load.Package, actionHash cache.ActionID
a.output = []byte{} a.output = []byte{}
} }
if b.ComputeStaleOnly {
return true
}
return false return false
} }
......
...@@ -956,11 +956,19 @@ func splitPkgConfigOutput(out []byte) []string { ...@@ -956,11 +956,19 @@ func splitPkgConfigOutput(out []byte) []string {
// Calls pkg-config if needed and returns the cflags/ldflags needed to build the package. // Calls pkg-config if needed and returns the cflags/ldflags needed to build the package.
func (b *Builder) getPkgConfigFlags(p *load.Package) (cflags, ldflags []string, err error) { func (b *Builder) getPkgConfigFlags(p *load.Package) (cflags, ldflags []string, err error) {
if pkgs := p.CgoPkgConfig; len(pkgs) > 0 { if pcargs := p.CgoPkgConfig; len(pcargs) > 0 {
// pkg-config permits arguments to appear anywhere in
// the command line. Move them all to the front, before --.
var pcflags []string var pcflags []string
for len(pkgs) > 0 && strings.HasPrefix(pkgs[0], "--") { var pkgs []string
pcflags = append(pcflags, pkgs[0]) for _, pcarg := range pcargs {
pkgs = pkgs[1:] if pcarg == "--" {
// We're going to add our own "--" argument.
} else if strings.HasPrefix(pcarg, "--") {
pcflags = append(pcflags, pcarg)
} else {
pkgs = append(pkgs, pcarg)
}
} }
for _, pkg := range pkgs { for _, pkg := range pkgs {
if !load.SafeArg(pkg) { if !load.SafeArg(pkg) {
...@@ -1107,7 +1115,7 @@ func BuildInstallFunc(b *Builder, a *Action) (err error) { ...@@ -1107,7 +1115,7 @@ func BuildInstallFunc(b *Builder, a *Action) (err error) {
// We want to hide that awful detail as much as possible, so don't // We want to hide that awful detail as much as possible, so don't
// advertise it by touching the mtimes (usually the libraries are up // advertise it by touching the mtimes (usually the libraries are up
// to date). // to date).
if !a.buggyInstall { if !a.buggyInstall && !b.ComputeStaleOnly {
now := time.Now() now := time.Now()
os.Chtimes(a.Target, now, now) os.Chtimes(a.Target, now, now)
} }
......
...@@ -46,12 +46,19 @@ var validCompilerFlags = []*regexp.Regexp{ ...@@ -46,12 +46,19 @@ var validCompilerFlags = []*regexp.Regexp{
re(`-O([^@\-].*)`), re(`-O([^@\-].*)`),
re(`-W`), re(`-W`),
re(`-W([^@,]+)`), // -Wall but not -Wa,-foo. re(`-W([^@,]+)`), // -Wall but not -Wa,-foo.
re(`-Wa,-mbig-obj`),
re(`-ansi`),
re(`-f(no-)?blocks`), re(`-f(no-)?blocks`),
re(`-f(no-)?common`), re(`-f(no-)?common`),
re(`-f(no-)?constant-cfstrings`), re(`-f(no-)?constant-cfstrings`),
re(`-fdiagnostics-show-note-include-stack`),
re(`-f(no-)?exceptions`), re(`-f(no-)?exceptions`),
re(`-f(no-)?inline-functions`),
re(`-finput-charset=([^@\-].*)`), re(`-finput-charset=([^@\-].*)`),
re(`-f(no-)?fat-lto-objects`),
re(`-f(no-)?lto`), re(`-f(no-)?lto`),
re(`-fmacro-backtrace-limit=(.+)`),
re(`-fmessage-length=(.+)`),
re(`-f(no-)?modules`), re(`-f(no-)?modules`),
re(`-f(no-)?objc-arc`), re(`-f(no-)?objc-arc`),
re(`-f(no-)?omit-frame-pointer`), re(`-f(no-)?omit-frame-pointer`),
...@@ -62,27 +69,42 @@ var validCompilerFlags = []*regexp.Regexp{ ...@@ -62,27 +69,42 @@ var validCompilerFlags = []*regexp.Regexp{
re(`-f(no-)?split-stack`), re(`-f(no-)?split-stack`),
re(`-f(no-)?stack-(.+)`), re(`-f(no-)?stack-(.+)`),
re(`-f(no-)?strict-aliasing`), re(`-f(no-)?strict-aliasing`),
re(`-f(un)signed-char`),
re(`-f(no-)?use-linker-plugin`), // safe if -B is not used; we don't permit -B
re(`-fsanitize=(.+)`), re(`-fsanitize=(.+)`),
re(`-ftemplate-depth-(.+)`),
re(`-fvisibility=(.+)`),
re(`-g([^@\-].*)?`), re(`-g([^@\-].*)?`),
re(`-m32`),
re(`-m64`),
re(`-m(arch|cpu|fpu|tune)=([^@\-].*)`), re(`-m(arch|cpu|fpu|tune)=([^@\-].*)`),
re(`-m(no-)?avx[0-9a-z.]*`), re(`-m(no-)?avx[0-9a-z.]*`),
re(`-m(no-)?ms-bitfields`), re(`-m(no-)?ms-bitfields`),
re(`-m(no-)?stack-(.+)`), re(`-m(no-)?stack-(.+)`),
re(`-mmacosx-(.+)`), re(`-mmacosx-(.+)`),
re(`-mios-simulator-version-min=(.+)`),
re(`-miphoneos-version-min=(.+)`),
re(`-mnop-fun-dllimport`), re(`-mnop-fun-dllimport`),
re(`-m(no-)?sse[0-9.]*`), re(`-m(no-)?sse[0-9.]*`),
re(`-mwindows`),
re(`-pedantic(-errors)?`), re(`-pedantic(-errors)?`),
re(`-pipe`), re(`-pipe`),
re(`-pthread`), re(`-pthread`),
re(`-?-std=([^@\-].*)`), re(`-?-std=([^@\-].*)`),
re(`-?-stdlib=([^@\-].*)`),
re(`-w`),
re(`-x([^@\-].*)`), re(`-x([^@\-].*)`),
} }
var validCompilerFlagsWithNextArg = []string{ var validCompilerFlagsWithNextArg = []string{
"-arch",
"-D", "-D",
"-I", "-I",
"-isystem",
"-framework", "-framework",
"-isysroot",
"-isystem",
"--sysroot",
"-target",
"-x", "-x",
} }
...@@ -90,43 +112,65 @@ var validLinkerFlags = []*regexp.Regexp{ ...@@ -90,43 +112,65 @@ var validLinkerFlags = []*regexp.Regexp{
re(`-F([^@\-].*)`), re(`-F([^@\-].*)`),
re(`-l([^@\-].*)`), re(`-l([^@\-].*)`),
re(`-L([^@\-].*)`), re(`-L([^@\-].*)`),
re(`-O`),
re(`-O([^@\-].*)`),
re(`-f(no-)?(pic|PIC|pie|PIE)`), re(`-f(no-)?(pic|PIC|pie|PIE)`),
re(`-fsanitize=([^@\-].*)`), re(`-fsanitize=([^@\-].*)`),
re(`-g([^@\-].*)?`), re(`-g([^@\-].*)?`),
re(`-m(arch|cpu|fpu|tune)=([^@\-].*)`), re(`-m(arch|cpu|fpu|tune)=([^@\-].*)`),
re(`-mmacosx-(.+)`),
re(`-mios-simulator-version-min=(.+)`),
re(`-miphoneos-version-min=(.+)`),
re(`-mwindows`),
re(`-(pic|PIC|pie|PIE)`), re(`-(pic|PIC|pie|PIE)`),
re(`-pthread`), re(`-pthread`),
re(`-shared`),
re(`-?-static([-a-z0-9+]*)`), re(`-?-static([-a-z0-9+]*)`),
re(`-?-stdlib=([^@\-].*)`),
// Note that any wildcards in -Wl need to exclude comma, // Note that any wildcards in -Wl need to exclude comma,
// since -Wl splits its argument at commas and passes // since -Wl splits its argument at commas and passes
// them all to the linker uninterpreted. Allowing comma // them all to the linker uninterpreted. Allowing comma
// in a wildcard would allow tunnelling arbitrary additional // in a wildcard would allow tunnelling arbitrary additional
// linker arguments through one of these. // linker arguments through one of these.
re(`-Wl,--(no-)?allow-multiple-definition`),
re(`-Wl,--(no-)?as-needed`), re(`-Wl,--(no-)?as-needed`),
re(`-Wl,-Bdynamic`), re(`-Wl,-Bdynamic`),
re(`-Wl,-Bstatic`), re(`-Wl,-Bstatic`),
re(`-Wl,-d[ny]`),
re(`-Wl,--disable-new-dtags`), re(`-Wl,--disable-new-dtags`),
re(`-Wl,--enable-new-dtags`), re(`-Wl,--enable-new-dtags`),
re(`-Wl,--end-group`), re(`-Wl,--end-group`),
re(`-Wl,-framework,[^,@\-][^,]+`), re(`-Wl,-framework,[^,@\-][^,]+`),
re(`-Wl,-headerpad_max_install_names`), re(`-Wl,-headerpad_max_install_names`),
re(`-Wl,--no-undefined`), re(`-Wl,--no-undefined`),
re(`-Wl,-rpath,([^,@\-][^,]+)`), re(`-Wl,-rpath[=,]([^,@\-][^,]+)`),
re(`-Wl,-search_paths_first`), re(`-Wl,-search_paths_first`),
re(`-Wl,-sectcreate,([^,@\-][^,]+),([^,@\-][^,]+),([^,@\-][^,]+)`),
re(`-Wl,--start-group`), re(`-Wl,--start-group`),
re(`-Wl,-?-static`),
re(`-Wl,--subsystem,(native|windows|console|posix|xbox)`),
re(`-Wl,-undefined[=,]([^,@\-][^,]+)`),
re(`-Wl,-?-unresolved-symbols=[^,]+`), re(`-Wl,-?-unresolved-symbols=[^,]+`),
re(`-Wl,--(no-)?warn-([^,]+)`), re(`-Wl,--(no-)?warn-([^,]+)`),
re(`-Wl,-z,(no)?execstack`),
re(`-Wl,-z,relro`),
re(`[a-zA-Z0-9_/].*\.(a|o|obj|dll|dylib|so)`), // direct linker inputs: x.o or libfoo.so (but not -foo.o or @foo.o) re(`[a-zA-Z0-9_/].*\.(a|o|obj|dll|dylib|so)`), // direct linker inputs: x.o or libfoo.so (but not -foo.o or @foo.o)
} }
var validLinkerFlagsWithNextArg = []string{ var validLinkerFlagsWithNextArg = []string{
"-arch",
"-F", "-F",
"-l", "-l",
"-L", "-L",
"-framework", "-framework",
"-isysroot",
"--sysroot",
"-target",
"-Wl,-framework", "-Wl,-framework",
"-Wl,-rpath",
"-Wl,-undefined",
} }
func checkCompilerFlags(name, source string, list []string) error { func checkCompilerFlags(name, source string, list []string) error {
......
...@@ -140,9 +140,6 @@ var goodLinkerFlags = [][]string{ ...@@ -140,9 +140,6 @@ var goodLinkerFlags = [][]string{
var badLinkerFlags = [][]string{ var badLinkerFlags = [][]string{
{"-DFOO"}, {"-DFOO"},
{"-Dfoo=bar"}, {"-Dfoo=bar"},
{"-O"},
{"-O2"},
{"-Osmall"},
{"-W"}, {"-W"},
{"-Wall"}, {"-Wall"},
{"-fobjc-arc"}, {"-fobjc-arc"},
...@@ -155,7 +152,6 @@ var badLinkerFlags = [][]string{ ...@@ -155,7 +152,6 @@ var badLinkerFlags = [][]string{
{"-fno-stack-xxx"}, {"-fno-stack-xxx"},
{"-mstack-overflow"}, {"-mstack-overflow"},
{"-mno-stack-overflow"}, {"-mno-stack-overflow"},
{"-mmacosx-version"},
{"-mnop-fun-dllimport"}, {"-mnop-fun-dllimport"},
{"-std=c99"}, {"-std=c99"},
{"-xc"}, {"-xc"},
......
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package objabi
// A FuncID identifies particular functions that need to be treated
// specially by the runtime.
// Note that in some situations involving plugins, there may be multiple
// copies of a particular special runtime function.
// Note: this list must match the list in runtime/symtab.go.
type FuncID uint32
const (
FuncID_normal FuncID = iota // not a special function
FuncID_goexit
FuncID_jmpdefer
FuncID_mcall
FuncID_morestack
FuncID_mstart
FuncID_rt0_go
FuncID_asmcgocall
FuncID_sigpanic
FuncID_runfinq
FuncID_bgsweep
FuncID_forcegchelper
FuncID_timerproc
FuncID_gcBgMarkWorker
FuncID_systemstack_switch
FuncID_systemstack
FuncID_cgocallback_gofunc
FuncID_gogo
FuncID_externalthreadhandler
)
...@@ -11,6 +11,7 @@ import ( ...@@ -11,6 +11,7 @@ import (
"crypto/rand" "crypto/rand"
"crypto/x509/pkix" "crypto/x509/pkix"
"encoding/asn1" "encoding/asn1"
"encoding/hex"
"encoding/pem" "encoding/pem"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
...@@ -42,6 +43,7 @@ type nameConstraintsTest struct { ...@@ -42,6 +43,7 @@ type nameConstraintsTest struct {
roots []constraintsSpec roots []constraintsSpec
intermediates [][]constraintsSpec intermediates [][]constraintsSpec
leaf leafSpec leaf leafSpec
requestedEKUs []ExtKeyUsage
expectedError string expectedError string
noOpenSSL bool noOpenSSL bool
} }
...@@ -1444,6 +1446,118 @@ var nameConstraintsTests = []nameConstraintsTest{ ...@@ -1444,6 +1446,118 @@ var nameConstraintsTests = []nameConstraintsTest{
}, },
expectedError: "\"https://example.com/test\" is excluded", expectedError: "\"https://example.com/test\" is excluded",
}, },
// #75: While serverAuth in a CA certificate permits clientAuth in a leaf,
// serverAuth in a leaf shouldn't permit clientAuth when requested in
// VerifyOptions.
nameConstraintsTest{
roots: []constraintsSpec{
constraintsSpec{},
},
intermediates: [][]constraintsSpec{
[]constraintsSpec{
constraintsSpec{},
},
},
leaf: leafSpec{
sans: []string{"dns:example.com"},
ekus: []string{"serverAuth"},
},
requestedEKUs: []ExtKeyUsage{ExtKeyUsageClientAuth},
expectedError: "incompatible key usage",
},
// #76: However, MSSGC in a leaf should match a request for serverAuth.
nameConstraintsTest{
roots: []constraintsSpec{
constraintsSpec{},
},
intermediates: [][]constraintsSpec{
[]constraintsSpec{
constraintsSpec{},
},
},
leaf: leafSpec{
sans: []string{"dns:example.com"},
ekus: []string{"msSGC"},
},
requestedEKUs: []ExtKeyUsage{ExtKeyUsageServerAuth},
},
// An invalid DNS SAN should be detected only at validation time so
// that we can process CA certificates in the wild that have invalid SANs.
// See https://github.com/golang/go/issues/23995
// #77: an invalid DNS or mail SAN will not be detected if name constaint
// checking is not triggered.
nameConstraintsTest{
roots: []constraintsSpec{
constraintsSpec{},
},
intermediates: [][]constraintsSpec{
[]constraintsSpec{
constraintsSpec{},
},
},
leaf: leafSpec{
sans: []string{"dns:this is invalid", "email:this @ is invalid"},
},
},
// #78: an invalid DNS SAN will be detected if any name constraint checking
// is triggered.
nameConstraintsTest{
roots: []constraintsSpec{
constraintsSpec{
bad: []string{"uri:"},
},
},
intermediates: [][]constraintsSpec{
[]constraintsSpec{
constraintsSpec{},
},
},
leaf: leafSpec{
sans: []string{"dns:this is invalid"},
},
expectedError: "cannot parse dnsName",
},
// #79: an invalid email SAN will be detected if any name constraint
// checking is triggered.
nameConstraintsTest{
roots: []constraintsSpec{
constraintsSpec{
bad: []string{"uri:"},
},
},
intermediates: [][]constraintsSpec{
[]constraintsSpec{
constraintsSpec{},
},
},
leaf: leafSpec{
sans: []string{"email:this @ is invalid"},
},
expectedError: "cannot parse rfc822Name",
},
// #80: if several EKUs are requested, satisfying any of them is sufficient.
nameConstraintsTest{
roots: []constraintsSpec{
constraintsSpec{},
},
intermediates: [][]constraintsSpec{
[]constraintsSpec{
constraintsSpec{},
},
},
leaf: leafSpec{
sans: []string{"dns:example.com"},
ekus: []string{"email"},
},
requestedEKUs: []ExtKeyUsage{ExtKeyUsageClientAuth, ExtKeyUsageEmailProtection},
},
} }
func makeConstraintsCACert(constraints constraintsSpec, name string, key *ecdsa.PrivateKey, parent *Certificate, parentKey *ecdsa.PrivateKey) (*Certificate, error) { func makeConstraintsCACert(constraints constraintsSpec, name string, key *ecdsa.PrivateKey, parent *Certificate, parentKey *ecdsa.PrivateKey) (*Certificate, error) {
...@@ -1459,7 +1573,7 @@ func makeConstraintsCACert(constraints constraintsSpec, name string, key *ecdsa. ...@@ -1459,7 +1573,7 @@ func makeConstraintsCACert(constraints constraintsSpec, name string, key *ecdsa.
NotAfter: time.Unix(2000, 0), NotAfter: time.Unix(2000, 0),
KeyUsage: KeyUsageCertSign, KeyUsage: KeyUsageCertSign,
BasicConstraintsValid: true, BasicConstraintsValid: true,
IsCA: true, IsCA: true,
} }
if err := addConstraintsToTemplate(constraints, template); err != nil { if err := addConstraintsToTemplate(constraints, template); err != nil {
...@@ -1497,7 +1611,7 @@ func makeConstraintsLeafCert(leaf leafSpec, key *ecdsa.PrivateKey, parent *Certi ...@@ -1497,7 +1611,7 @@ func makeConstraintsLeafCert(leaf leafSpec, key *ecdsa.PrivateKey, parent *Certi
NotAfter: time.Unix(2000, 0), NotAfter: time.Unix(2000, 0),
KeyUsage: KeyUsageDigitalSignature, KeyUsage: KeyUsageDigitalSignature,
BasicConstraintsValid: true, BasicConstraintsValid: true,
IsCA: false, IsCA: false,
} }
for _, name := range leaf.sans { for _, name := range leaf.sans {
...@@ -1512,6 +1626,13 @@ func makeConstraintsLeafCert(leaf leafSpec, key *ecdsa.PrivateKey, parent *Certi ...@@ -1512,6 +1626,13 @@ func makeConstraintsLeafCert(leaf leafSpec, key *ecdsa.PrivateKey, parent *Certi
} }
template.IPAddresses = append(template.IPAddresses, ip) template.IPAddresses = append(template.IPAddresses, ip)
case strings.HasPrefix(name, "invalidip:"):
ipBytes, err := hex.DecodeString(name[10:])
if err != nil {
return nil, fmt.Errorf("cannot parse invalid IP: %s", err)
}
template.IPAddresses = append(template.IPAddresses, net.IP(ipBytes))
case strings.HasPrefix(name, "email:"): case strings.HasPrefix(name, "email:"):
template.EmailAddresses = append(template.EmailAddresses, name[6:]) template.EmailAddresses = append(template.EmailAddresses, name[6:])
...@@ -1781,6 +1902,7 @@ func TestConstraintCases(t *testing.T) { ...@@ -1781,6 +1902,7 @@ func TestConstraintCases(t *testing.T) {
Roots: rootPool, Roots: rootPool,
Intermediates: intermediatePool, Intermediates: intermediatePool,
CurrentTime: time.Unix(1500, 0), CurrentTime: time.Unix(1500, 0),
KeyUsages: test.requestedEKUs,
} }
_, err = leafCert.Verify(verifyOpts) _, err = leafCert.Verify(verifyOpts)
...@@ -1972,12 +2094,13 @@ func TestBadNamesInConstraints(t *testing.T) { ...@@ -1972,12 +2094,13 @@ func TestBadNamesInConstraints(t *testing.T) {
} }
func TestBadNamesInSANs(t *testing.T) { func TestBadNamesInSANs(t *testing.T) {
// Bad names in SANs should not parse. // Bad names in URI and IP SANs should not parse. Bad DNS and email SANs
// will parse and are tested in name constraint tests at the top of this
// file.
badNames := []string{ badNames := []string{
"dns:foo.com.",
"email:abc@foo.com.",
"email:foo.com.",
"uri:https://example.com./dsf", "uri:https://example.com./dsf",
"invalidip:0102",
"invalidip:0102030405",
} }
priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
......
...@@ -6,12 +6,14 @@ package x509 ...@@ -6,12 +6,14 @@ package x509
import ( import (
"bytes" "bytes"
"encoding/asn1"
"errors" "errors"
"fmt" "fmt"
"net" "net"
"net/url" "net/url"
"reflect" "reflect"
"runtime" "runtime"
"strconv"
"strings" "strings"
"time" "time"
"unicode/utf8" "unicode/utf8"
...@@ -178,10 +180,14 @@ type VerifyOptions struct { ...@@ -178,10 +180,14 @@ type VerifyOptions struct {
Intermediates *CertPool Intermediates *CertPool
Roots *CertPool // if nil, the system roots are used Roots *CertPool // if nil, the system roots are used
CurrentTime time.Time // if zero, the current time is used CurrentTime time.Time // if zero, the current time is used
// KeyUsage specifies which Extended Key Usage values are acceptable. // KeyUsage specifies which Extended Key Usage values are acceptable. A leaf
// An empty list means ExtKeyUsageServerAuth. Key usage is considered a // certificate is accepted if it contains any of the listed values. An empty
// constraint down the chain which mirrors Windows CryptoAPI behavior, // list means ExtKeyUsageServerAuth. To accept any key usage, include
// but not the spec. To accept any key usage, include ExtKeyUsageAny. // ExtKeyUsageAny.
//
// Certificate chains are required to nest extended key usage values,
// irrespective of this value. This matches the Windows CryptoAPI behavior,
// but not the spec.
KeyUsages []ExtKeyUsage KeyUsages []ExtKeyUsage
// MaxConstraintComparisions is the maximum number of comparisons to // MaxConstraintComparisions is the maximum number of comparisons to
// perform when checking a given certificate's name constraints. If // perform when checking a given certificate's name constraints. If
...@@ -543,11 +549,16 @@ func (c *Certificate) checkNameConstraints(count *int, ...@@ -543,11 +549,16 @@ func (c *Certificate) checkNameConstraints(count *int,
return nil return nil
} }
const (
checkingAgainstIssuerCert = iota
checkingAgainstLeafCert
)
// ekuPermittedBy returns true iff the given extended key usage is permitted by // ekuPermittedBy returns true iff the given extended key usage is permitted by
// the given EKU from a certificate. Normally, this would be a simple // the given EKU from a certificate. Normally, this would be a simple
// comparison plus a special case for the “any” EKU. But, in order to support // comparison plus a special case for the “any” EKU. But, in order to support
// existing certificates, some exceptions are made. // existing certificates, some exceptions are made.
func ekuPermittedBy(eku, certEKU ExtKeyUsage) bool { func ekuPermittedBy(eku, certEKU ExtKeyUsage, context int) bool {
if certEKU == ExtKeyUsageAny || eku == certEKU { if certEKU == ExtKeyUsageAny || eku == certEKU {
return true return true
} }
...@@ -564,18 +575,23 @@ func ekuPermittedBy(eku, certEKU ExtKeyUsage) bool { ...@@ -564,18 +575,23 @@ func ekuPermittedBy(eku, certEKU ExtKeyUsage) bool {
eku = mapServerAuthEKUs(eku) eku = mapServerAuthEKUs(eku)
certEKU = mapServerAuthEKUs(certEKU) certEKU = mapServerAuthEKUs(certEKU)
if eku == certEKU || if eku == certEKU {
// ServerAuth in a CA permits ClientAuth in the leaf. return true
(eku == ExtKeyUsageClientAuth && certEKU == ExtKeyUsageServerAuth) || }
// If checking a requested EKU against the list in a leaf certificate there
// are fewer exceptions.
if context == checkingAgainstLeafCert {
return false
}
// ServerAuth in a CA permits ClientAuth in the leaf.
return (eku == ExtKeyUsageClientAuth && certEKU == ExtKeyUsageServerAuth) ||
// Any CA may issue an OCSP responder certificate. // Any CA may issue an OCSP responder certificate.
eku == ExtKeyUsageOCSPSigning || eku == ExtKeyUsageOCSPSigning ||
// Code-signing CAs can use Microsoft's commercial and // Code-signing CAs can use Microsoft's commercial and
// kernel-mode EKUs. // kernel-mode EKUs.
((eku == ExtKeyUsageMicrosoftCommercialCodeSigning || eku == ExtKeyUsageMicrosoftKernelCodeSigning) && certEKU == ExtKeyUsageCodeSigning) { (eku == ExtKeyUsageMicrosoftCommercialCodeSigning || eku == ExtKeyUsageMicrosoftKernelCodeSigning) && certEKU == ExtKeyUsageCodeSigning
return true
}
return false
} }
// isValid performs validity checks on c given that it is a candidate to append // isValid performs validity checks on c given that it is a candidate to append
...@@ -630,8 +646,7 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V ...@@ -630,8 +646,7 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
name := string(data) name := string(data)
mailbox, ok := parseRFC2821Mailbox(name) mailbox, ok := parseRFC2821Mailbox(name)
if !ok { if !ok {
// This certificate should not have parsed. return fmt.Errorf("x509: cannot parse rfc822Name %q", mailbox)
return errors.New("x509: internal error: rfc822Name SAN failed to parse")
} }
if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "email address", name, mailbox, if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "email address", name, mailbox,
...@@ -643,6 +658,10 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V ...@@ -643,6 +658,10 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
case nameTypeDNS: case nameTypeDNS:
name := string(data) name := string(data)
if _, ok := domainToReverseLabels(name); !ok {
return fmt.Errorf("x509: cannot parse dnsName %q", name)
}
if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "DNS name", name, name, if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "DNS name", name, name,
func(parsedName, constraint interface{}) (bool, error) { func(parsedName, constraint interface{}) (bool, error) {
return matchDomainConstraint(parsedName.(string), constraint.(string)) return matchDomainConstraint(parsedName.(string), constraint.(string))
...@@ -716,7 +735,7 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V ...@@ -716,7 +735,7 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
for _, caEKU := range c.ExtKeyUsage { for _, caEKU := range c.ExtKeyUsage {
comparisonCount++ comparisonCount++
if ekuPermittedBy(eku, caEKU) { if ekuPermittedBy(eku, caEKU, checkingAgainstIssuerCert) {
continue NextEKU continue NextEKU
} }
} }
...@@ -773,6 +792,18 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V ...@@ -773,6 +792,18 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
return nil return nil
} }
// formatOID formats an ASN.1 OBJECT IDENTIFER in the common, dotted style.
func formatOID(oid asn1.ObjectIdentifier) string {
ret := ""
for i, v := range oid {
if i > 0 {
ret += "."
}
ret += strconv.Itoa(v)
}
return ret
}
// Verify attempts to verify c by building one or more chains from c to a // Verify attempts to verify c by building one or more chains from c to a
// certificate in opts.Roots, using certificates in opts.Intermediates if // certificate in opts.Roots, using certificates in opts.Intermediates if
// needed. If successful, it returns one or more chains where the first // needed. If successful, it returns one or more chains where the first
...@@ -847,16 +878,33 @@ func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err e ...@@ -847,16 +878,33 @@ func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err e
} }
if checkEKU { if checkEKU {
foundMatch := false
NextUsage: NextUsage:
for _, eku := range requestedKeyUsages { for _, eku := range requestedKeyUsages {
for _, leafEKU := range c.ExtKeyUsage { for _, leafEKU := range c.ExtKeyUsage {
if ekuPermittedBy(eku, leafEKU) { if ekuPermittedBy(eku, leafEKU, checkingAgainstLeafCert) {
continue NextUsage foundMatch = true
break NextUsage
} }
} }
}
oid, _ := oidFromExtKeyUsage(eku) if !foundMatch {
return nil, CertificateInvalidError{c, IncompatibleUsage, fmt.Sprintf("%#v", oid)} msg := "leaf contains the following, recognized EKUs: "
for i, leafEKU := range c.ExtKeyUsage {
oid, ok := oidFromExtKeyUsage(leafEKU)
if !ok {
continue
}
if i > 0 {
msg += ", "
}
msg += formatOID(oid)
}
return nil, CertificateInvalidError{c, IncompatibleUsage, msg}
} }
} }
......
...@@ -706,7 +706,9 @@ type Certificate struct { ...@@ -706,7 +706,9 @@ type Certificate struct {
OCSPServer []string OCSPServer []string
IssuingCertificateURL []string IssuingCertificateURL []string
// Subject Alternate Name values // Subject Alternate Name values. (Note that these values may not be valid
// if invalid values were contained within a parsed certificate. For
// example, an element of DNSNames may not be a valid DNS domain name.)
DNSNames []string DNSNames []string
EmailAddresses []string EmailAddresses []string
IPAddresses []net.IP IPAddresses []net.IP
...@@ -1126,17 +1128,9 @@ func parseSANExtension(value []byte) (dnsNames, emailAddresses []string, ipAddre ...@@ -1126,17 +1128,9 @@ func parseSANExtension(value []byte) (dnsNames, emailAddresses []string, ipAddre
err = forEachSAN(value, func(tag int, data []byte) error { err = forEachSAN(value, func(tag int, data []byte) error {
switch tag { switch tag {
case nameTypeEmail: case nameTypeEmail:
mailbox := string(data) emailAddresses = append(emailAddresses, string(data))
if _, ok := parseRFC2821Mailbox(mailbox); !ok {
return fmt.Errorf("x509: cannot parse rfc822Name %q", mailbox)
}
emailAddresses = append(emailAddresses, mailbox)
case nameTypeDNS: case nameTypeDNS:
domain := string(data) dnsNames = append(dnsNames, string(data))
if _, ok := domainToReverseLabels(domain); !ok {
return fmt.Errorf("x509: cannot parse dnsName %q", string(data))
}
dnsNames = append(dnsNames, domain)
case nameTypeURI: case nameTypeURI:
uri, err := url.Parse(string(data)) uri, err := url.Parse(string(data))
if err != nil { if err != nil {
...@@ -1153,7 +1147,7 @@ func parseSANExtension(value []byte) (dnsNames, emailAddresses []string, ipAddre ...@@ -1153,7 +1147,7 @@ func parseSANExtension(value []byte) (dnsNames, emailAddresses []string, ipAddre
case net.IPv4len, net.IPv6len: case net.IPv4len, net.IPv6len:
ipAddresses = append(ipAddresses, data) ipAddresses = append(ipAddresses, data)
default: default:
return errors.New("x509: certificate contained IP address of length " + strconv.Itoa(len(data))) return errors.New("x509: cannot parse IP address of length " + strconv.Itoa(len(data)))
} }
} }
...@@ -2543,7 +2537,7 @@ func ParseCertificateRequest(asn1Data []byte) (*CertificateRequest, error) { ...@@ -2543,7 +2537,7 @@ func ParseCertificateRequest(asn1Data []byte) (*CertificateRequest, error) {
func parseCertificateRequest(in *certificateRequest) (*CertificateRequest, error) { func parseCertificateRequest(in *certificateRequest) (*CertificateRequest, error) {
out := &CertificateRequest{ out := &CertificateRequest{
Raw: in.Raw, Raw: in.Raw,
RawTBSCertificateRequest: in.TBSCSR.Raw, RawTBSCertificateRequest: in.TBSCSR.Raw,
RawSubjectPublicKeyInfo: in.TBSCSR.PublicKey.Raw, RawSubjectPublicKeyInfo: in.TBSCSR.PublicKey.Raw,
RawSubject: in.TBSCSR.Subject.FullBytes, RawSubject: in.TBSCSR.Subject.FullBytes,
......
...@@ -443,10 +443,25 @@ func (d *decodeState) valueQuoted() interface{} { ...@@ -443,10 +443,25 @@ func (d *decodeState) valueQuoted() interface{} {
// if it encounters an Unmarshaler, indirect stops and returns that. // if it encounters an Unmarshaler, indirect stops and returns that.
// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. // if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
// Issue #24153 indicates that it is generally not a guaranteed property
// that you may round-trip a reflect.Value by calling Value.Addr().Elem()
// and expect the value to still be settable for values derived from
// unexported embedded struct fields.
//
// The logic below effectively does this when it first addresses the value
// (to satisfy possible pointer methods) and continues to dereference
// subsequent pointers as necessary.
//
// After the first round-trip, we set v back to the original value to
// preserve the original RW flags contained in reflect.Value.
v0 := v
haveAddr := false
// If v is a named type and is addressable, // If v is a named type and is addressable,
// start with its address, so that if the type has pointer methods, // start with its address, so that if the type has pointer methods,
// we find them. // we find them.
if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
haveAddr = true
v = v.Addr() v = v.Addr()
} }
for { for {
...@@ -455,6 +470,7 @@ func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, ...@@ -455,6 +470,7 @@ func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler,
if v.Kind() == reflect.Interface && !v.IsNil() { if v.Kind() == reflect.Interface && !v.IsNil() {
e := v.Elem() e := v.Elem()
if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
haveAddr = false
v = e v = e
continue continue
} }
...@@ -480,7 +496,13 @@ func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, ...@@ -480,7 +496,13 @@ func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler,
} }
} }
} }
v = v.Elem()
if haveAddr {
v = v0 // restore original value after round-trip Value.Addr().Elem()
haveAddr = false
} else {
v = v.Elem()
}
} }
return nil, nil, v return nil, nil, v
} }
......
...@@ -615,9 +615,9 @@ var unmarshalTests = []unmarshalTest{ ...@@ -615,9 +615,9 @@ var unmarshalTests = []unmarshalTest{
out: S5{S8: S8{S9: S9{Y: 2}}}, out: S5{S8: S8{S9: S9{Y: 2}}},
}, },
{ {
in: `{"X": 1,"Y":2}`, in: `{"X": 1,"Y":2}`,
ptr: new(S5), ptr: new(S5),
err: fmt.Errorf("json: unknown field \"X\""), err: fmt.Errorf("json: unknown field \"X\""),
disallowUnknownFields: true, disallowUnknownFields: true,
}, },
{ {
...@@ -626,9 +626,9 @@ var unmarshalTests = []unmarshalTest{ ...@@ -626,9 +626,9 @@ var unmarshalTests = []unmarshalTest{
out: S10{S13: S13{S8: S8{S9: S9{Y: 2}}}}, out: S10{S13: S13{S8: S8{S9: S9{Y: 2}}}},
}, },
{ {
in: `{"X": 1,"Y":2}`, in: `{"X": 1,"Y":2}`,
ptr: new(S10), ptr: new(S10),
err: fmt.Errorf("json: unknown field \"X\""), err: fmt.Errorf("json: unknown field \"X\""),
disallowUnknownFields: true, disallowUnknownFields: true,
}, },
...@@ -835,8 +835,8 @@ var unmarshalTests = []unmarshalTest{ ...@@ -835,8 +835,8 @@ var unmarshalTests = []unmarshalTest{
"Q": 18, "Q": 18,
"extra": true "extra": true
}`, }`,
ptr: new(Top), ptr: new(Top),
err: fmt.Errorf("json: unknown field \"extra\""), err: fmt.Errorf("json: unknown field \"extra\""),
disallowUnknownFields: true, disallowUnknownFields: true,
}, },
{ {
...@@ -862,8 +862,8 @@ var unmarshalTests = []unmarshalTest{ ...@@ -862,8 +862,8 @@ var unmarshalTests = []unmarshalTest{
"Z": 17, "Z": 17,
"Q": 18 "Q": 18
}`, }`,
ptr: new(Top), ptr: new(Top),
err: fmt.Errorf("json: unknown field \"extra\""), err: fmt.Errorf("json: unknown field \"extra\""),
disallowUnknownFields: true, disallowUnknownFields: true,
}, },
} }
...@@ -2089,10 +2089,14 @@ func TestInvalidStringOption(t *testing.T) { ...@@ -2089,10 +2089,14 @@ func TestInvalidStringOption(t *testing.T) {
} }
} }
// Test unmarshal behavior with regards to embedded pointers to unexported structs. // Test unmarshal behavior with regards to embedded unexported structs.
// If unallocated, this returns an error because unmarshal cannot set the field. //
// Issue 21357. // (Issue 21357) If the embedded struct is a pointer and is unallocated,
func TestUnmarshalEmbeddedPointerUnexported(t *testing.T) { // this returns an error because unmarshal cannot set the field.
//
// (Issue 24152) If the embedded struct is given an explicit name,
// ensure that the normal unmarshal logic does not panic in reflect.
func TestUnmarshalEmbeddedUnexported(t *testing.T) {
type ( type (
embed1 struct{ Q int } embed1 struct{ Q int }
embed2 struct{ Q int } embed2 struct{ Q int }
...@@ -2119,6 +2123,18 @@ func TestUnmarshalEmbeddedPointerUnexported(t *testing.T) { ...@@ -2119,6 +2123,18 @@ func TestUnmarshalEmbeddedPointerUnexported(t *testing.T) {
*embed3 *embed3
R int R int
} }
S6 struct {
embed1 `json:"embed1"`
}
S7 struct {
embed1 `json:"embed1"`
embed2
}
S8 struct {
embed1 `json:"embed1"`
embed2 `json:"embed2"`
Q int
}
) )
tests := []struct { tests := []struct {
...@@ -2154,6 +2170,32 @@ func TestUnmarshalEmbeddedPointerUnexported(t *testing.T) { ...@@ -2154,6 +2170,32 @@ func TestUnmarshalEmbeddedPointerUnexported(t *testing.T) {
ptr: new(S5), ptr: new(S5),
out: &S5{R: 2}, out: &S5{R: 2},
err: fmt.Errorf("json: cannot set embedded pointer to unexported struct: json.embed3"), err: fmt.Errorf("json: cannot set embedded pointer to unexported struct: json.embed3"),
}, {
// Issue 24152, ensure decodeState.indirect does not panic.
in: `{"embed1": {"Q": 1}}`,
ptr: new(S6),
out: &S6{embed1{1}},
}, {
// Issue 24153, check that we can still set forwarded fields even in
// the presence of a name conflict.
//
// This relies on obscure behavior of reflect where it is possible
// to set a forwarded exported field on an unexported embedded struct
// even though there is a name conflict, even when it would have been
// impossible to do so according to Go visibility rules.
// Go forbids this because it is ambiguous whether S7.Q refers to
// S7.embed1.Q or S7.embed2.Q. Since embed1 and embed2 are unexported,
// it should be impossible for an external package to set either Q.
//
// It is probably okay for a future reflect change to break this.
in: `{"embed1": {"Q": 1}, "Q": 2}`,
ptr: new(S7),
out: &S7{embed1{1}, embed2{2}},
}, {
// Issue 24153, similar to the S7 case.
in: `{"embed1": {"Q": 1}, "embed2": {"Q": 2}, "Q": 3}`,
ptr: new(S8),
out: &S8{embed1{1}, embed2{2}, 3},
}} }}
for i, tt := range tests { for i, tt := range tests {
......
...@@ -44,9 +44,9 @@ func New(ctxt *build.Context, fset *token.FileSet, packages map[string]*types.Pa ...@@ -44,9 +44,9 @@ func New(ctxt *build.Context, fset *token.FileSet, packages map[string]*types.Pa
// for a package that is in the process of being imported. // for a package that is in the process of being imported.
var importing types.Package var importing types.Package
// Import(path) is a shortcut for ImportFrom(path, "", 0). // Import(path) is a shortcut for ImportFrom(path, ".", 0).
func (p *Importer) Import(path string) (*types.Package, error) { func (p *Importer) Import(path string) (*types.Package, error) {
return p.ImportFrom(path, "", 0) return p.ImportFrom(path, ".", 0) // use "." rather than "" (see issue #24441)
} }
// ImportFrom imports the package with the given import path resolved from the given srcDir, // ImportFrom imports the package with the given import path resolved from the given srcDir,
...@@ -60,23 +60,10 @@ func (p *Importer) ImportFrom(path, srcDir string, mode types.ImportMode) (*type ...@@ -60,23 +60,10 @@ func (p *Importer) ImportFrom(path, srcDir string, mode types.ImportMode) (*type
panic("non-zero import mode") panic("non-zero import mode")
} }
// determine package path (do vendor resolution) if abs, err := p.absPath(srcDir); err == nil { // see issue #14282
var bp *build.Package srcDir = abs
var err error
switch {
default:
if abs, err := p.absPath(srcDir); err == nil { // see issue #14282
srcDir = abs
}
bp, err = p.ctxt.Import(path, srcDir, build.FindOnly)
case build.IsLocalImport(path):
// "./x" -> "srcDir/x"
bp, err = p.ctxt.ImportDir(filepath.Join(srcDir, path), build.FindOnly)
case p.isAbsPath(path):
return nil, fmt.Errorf("invalid absolute import path %q", path)
} }
bp, err := p.ctxt.Import(path, srcDir, 0)
if err != nil { if err != nil {
return nil, err // err may be *build.NoGoError - return as is return nil, err // err may be *build.NoGoError - return as is
} }
...@@ -113,11 +100,6 @@ func (p *Importer) ImportFrom(path, srcDir string, mode types.ImportMode) (*type ...@@ -113,11 +100,6 @@ func (p *Importer) ImportFrom(path, srcDir string, mode types.ImportMode) (*type
} }
}() }()
// collect package files
bp, err = p.ctxt.ImportDir(bp.Dir, 0)
if err != nil {
return nil, err // err may be *build.NoGoError - return as is
}
var filenames []string var filenames []string
filenames = append(filenames, bp.GoFiles...) filenames = append(filenames, bp.GoFiles...)
filenames = append(filenames, bp.CgoFiles...) filenames = append(filenames, bp.CgoFiles...)
......
...@@ -10,6 +10,7 @@ import ( ...@@ -10,6 +10,7 @@ import (
"go/types" "go/types"
"internal/testenv" "internal/testenv"
"io/ioutil" "io/ioutil"
"path"
"path/filepath" "path/filepath"
"runtime" "runtime"
"strings" "strings"
...@@ -162,3 +163,34 @@ func TestIssue20855(t *testing.T) { ...@@ -162,3 +163,34 @@ func TestIssue20855(t *testing.T) {
t.Error("got no package despite no hard errors") t.Error("got no package despite no hard errors")
} }
} }
func testImportPath(t *testing.T, pkgPath string) {
if !testenv.HasSrc() {
t.Skip("no source code available")
}
pkgName := path.Base(pkgPath)
pkg, err := importer.Import(pkgPath)
if err != nil {
t.Fatal(err)
}
if pkg.Name() != pkgName {
t.Errorf("got %q; want %q", pkg.Name(), pkgName)
}
if pkg.Path() != pkgPath {
t.Errorf("got %q; want %q", pkg.Path(), pkgPath)
}
}
// TestIssue23092 tests relative imports.
func TestIssue23092(t *testing.T) {
testImportPath(t, "./testdata/issue23092")
}
// TestIssue24392 tests imports against a path containing 'testdata'.
func TestIssue24392(t *testing.T) {
testImportPath(t, "go/internal/srcimporter/testdata/issue24392")
}
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package issue23092
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package issue24392
...@@ -103,11 +103,21 @@ func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { ...@@ -103,11 +103,21 @@ func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) {
g.mu.Unlock() g.mu.Unlock()
} }
// Forget tells the singleflight to forget about a key. Future calls // ForgetUnshared tells the singleflight to forget about a key if it is not
// to Do for this key will call the function rather than waiting for // shared with any other goroutines. Future calls to Do for a forgotten key
// an earlier call to complete. // will call the function rather than waiting for an earlier call to complete.
func (g *Group) Forget(key string) { // Returns whether the key was forgotten or unknown--that is, whether no
// other goroutines are waiting for the result.
func (g *Group) ForgetUnshared(key string) bool {
g.mu.Lock() g.mu.Lock()
delete(g.m, key) defer g.mu.Unlock()
g.mu.Unlock() c, ok := g.m[key]
if !ok {
return true
}
if c.dups == 0 {
delete(g.m, key)
return true
}
return false
} }
...@@ -80,6 +80,7 @@ func init() { ...@@ -80,6 +80,7 @@ func init() {
// command line, with arguments separated by NUL bytes. // command line, with arguments separated by NUL bytes.
// The package initialization registers it as /debug/pprof/cmdline. // The package initialization registers it as /debug/pprof/cmdline.
func Cmdline(w http.ResponseWriter, r *http.Request) { func Cmdline(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Content-Type-Options", "nosniff")
w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("Content-Type", "text/plain; charset=utf-8")
fmt.Fprintf(w, strings.Join(os.Args, "\x00")) fmt.Fprintf(w, strings.Join(os.Args, "\x00"))
} }
...@@ -100,33 +101,36 @@ func durationExceedsWriteTimeout(r *http.Request, seconds float64) bool { ...@@ -100,33 +101,36 @@ func durationExceedsWriteTimeout(r *http.Request, seconds float64) bool {
return ok && srv.WriteTimeout != 0 && seconds >= srv.WriteTimeout.Seconds() return ok && srv.WriteTimeout != 0 && seconds >= srv.WriteTimeout.Seconds()
} }
func serveError(w http.ResponseWriter, status int, txt string) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Header().Set("X-Go-Pprof", "1")
w.Header().Del("Content-Disposition")
w.WriteHeader(status)
fmt.Fprintln(w, txt)
}
// Profile responds with the pprof-formatted cpu profile. // Profile responds with the pprof-formatted cpu profile.
// The package initialization registers it as /debug/pprof/profile. // The package initialization registers it as /debug/pprof/profile.
func Profile(w http.ResponseWriter, r *http.Request) { func Profile(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Content-Type-Options", "nosniff")
sec, _ := strconv.ParseInt(r.FormValue("seconds"), 10, 64) sec, _ := strconv.ParseInt(r.FormValue("seconds"), 10, 64)
if sec == 0 { if sec == 0 {
sec = 30 sec = 30
} }
if durationExceedsWriteTimeout(r, float64(sec)) { if durationExceedsWriteTimeout(r, float64(sec)) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8") serveError(w, http.StatusBadRequest, "profile duration exceeds server's WriteTimeout")
w.Header().Set("X-Go-Pprof", "1")
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintln(w, "profile duration exceeds server's WriteTimeout")
return return
} }
// Set Content Type assuming StartCPUProfile will work, // Set Content Type assuming StartCPUProfile will work,
// because if it does it starts writing. // because if it does it starts writing.
w.Header().Set("Content-Type", "application/octet-stream") w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Set("Content-Disposition", `attachment; filename="profile"`)
if err := pprof.StartCPUProfile(w); err != nil { if err := pprof.StartCPUProfile(w); err != nil {
// StartCPUProfile failed, so no writes yet. // StartCPUProfile failed, so no writes yet.
// Can change header back to text content serveError(w, http.StatusInternalServerError,
// and send error code. fmt.Sprintf("Could not enable CPU profiling: %s", err))
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Header().Set("X-Go-Pprof", "1")
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Could not enable CPU profiling: %s\n", err)
return return
} }
sleep(w, time.Duration(sec)*time.Second) sleep(w, time.Duration(sec)*time.Second)
...@@ -137,29 +141,25 @@ func Profile(w http.ResponseWriter, r *http.Request) { ...@@ -137,29 +141,25 @@ func Profile(w http.ResponseWriter, r *http.Request) {
// Tracing lasts for duration specified in seconds GET parameter, or for 1 second if not specified. // Tracing lasts for duration specified in seconds GET parameter, or for 1 second if not specified.
// The package initialization registers it as /debug/pprof/trace. // The package initialization registers it as /debug/pprof/trace.
func Trace(w http.ResponseWriter, r *http.Request) { func Trace(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Content-Type-Options", "nosniff")
sec, err := strconv.ParseFloat(r.FormValue("seconds"), 64) sec, err := strconv.ParseFloat(r.FormValue("seconds"), 64)
if sec <= 0 || err != nil { if sec <= 0 || err != nil {
sec = 1 sec = 1
} }
if durationExceedsWriteTimeout(r, sec) { if durationExceedsWriteTimeout(r, sec) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8") serveError(w, http.StatusBadRequest, "profile duration exceeds server's WriteTimeout")
w.Header().Set("X-Go-Pprof", "1")
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintln(w, "profile duration exceeds server's WriteTimeout")
return return
} }
// Set Content Type assuming trace.Start will work, // Set Content Type assuming trace.Start will work,
// because if it does it starts writing. // because if it does it starts writing.
w.Header().Set("Content-Type", "application/octet-stream") w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Set("Content-Disposition", `attachment; filename="trace"`)
if err := trace.Start(w); err != nil { if err := trace.Start(w); err != nil {
// trace.Start failed, so no writes yet. // trace.Start failed, so no writes yet.
// Can change header back to text content and send error code. serveError(w, http.StatusInternalServerError,
w.Header().Set("Content-Type", "text/plain; charset=utf-8") fmt.Sprintf("Could not enable tracing: %s", err))
w.Header().Set("X-Go-Pprof", "1")
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Could not enable tracing: %s\n", err)
return return
} }
sleep(w, time.Duration(sec*float64(time.Second))) sleep(w, time.Duration(sec*float64(time.Second)))
...@@ -170,6 +170,7 @@ func Trace(w http.ResponseWriter, r *http.Request) { ...@@ -170,6 +170,7 @@ func Trace(w http.ResponseWriter, r *http.Request) {
// responding with a table mapping program counters to function names. // responding with a table mapping program counters to function names.
// The package initialization registers it as /debug/pprof/symbol. // The package initialization registers it as /debug/pprof/symbol.
func Symbol(w http.ResponseWriter, r *http.Request) { func Symbol(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Content-Type-Options", "nosniff")
w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("Content-Type", "text/plain; charset=utf-8")
// We have to read the whole POST body before // We have to read the whole POST body before
...@@ -222,18 +223,23 @@ func Handler(name string) http.Handler { ...@@ -222,18 +223,23 @@ func Handler(name string) http.Handler {
type handler string type handler string
func (name handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (name handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("X-Content-Type-Options", "nosniff")
debug, _ := strconv.Atoi(r.FormValue("debug"))
p := pprof.Lookup(string(name)) p := pprof.Lookup(string(name))
if p == nil { if p == nil {
w.WriteHeader(404) serveError(w, http.StatusNotFound, "Unknown profile")
fmt.Fprintf(w, "Unknown profile: %s\n", name)
return return
} }
gc, _ := strconv.Atoi(r.FormValue("gc")) gc, _ := strconv.Atoi(r.FormValue("gc"))
if name == "heap" && gc > 0 { if name == "heap" && gc > 0 {
runtime.GC() runtime.GC()
} }
debug, _ := strconv.Atoi(r.FormValue("debug"))
if debug != 0 {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
} else {
w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, name))
}
p.WriteTo(w, debug) p.WriteTo(w, debug)
} }
......
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pprof
import (
"bytes"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
)
func TestHandlers(t *testing.T) {
testCases := []struct {
path string
handler http.HandlerFunc
statusCode int
contentType string
contentDisposition string
resp []byte
}{
{"/debug/pprof/<script>scripty<script>", Index, http.StatusNotFound, "text/plain; charset=utf-8", "", []byte("Unknown profile\n")},
{"/debug/pprof/heap", Index, http.StatusOK, "application/octet-stream", `attachment; filename="heap"`, nil},
{"/debug/pprof/heap?debug=1", Index, http.StatusOK, "text/plain; charset=utf-8", "", nil},
{"/debug/pprof/cmdline", Cmdline, http.StatusOK, "text/plain; charset=utf-8", "", nil},
{"/debug/pprof/profile?seconds=1", Profile, http.StatusOK, "application/octet-stream", `attachment; filename="profile"`, nil},
{"/debug/pprof/symbol", Symbol, http.StatusOK, "text/plain; charset=utf-8", "", nil},
{"/debug/pprof/trace", Trace, http.StatusOK, "application/octet-stream", `attachment; filename="trace"`, nil},
}
for _, tc := range testCases {
t.Run(tc.path, func(t *testing.T) {
req := httptest.NewRequest("GET", "http://example.com"+tc.path, nil)
w := httptest.NewRecorder()
tc.handler(w, req)
resp := w.Result()
if got, want := resp.StatusCode, tc.statusCode; got != want {
t.Errorf("status code: got %d; want %d", got, want)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Errorf("when reading response body, expected non-nil err; got %v", err)
}
if got, want := resp.Header.Get("X-Content-Type-Options"), "nosniff"; got != want {
t.Errorf("X-Content-Type-Options: got %q; want %q", got, want)
}
if got, want := resp.Header.Get("Content-Type"), tc.contentType; got != want {
t.Errorf("Content-Type: got %q; want %q", got, want)
}
if got, want := resp.Header.Get("Content-Disposition"), tc.contentDisposition; got != want {
t.Errorf("Content-Disposition: got %q; want %q", got, want)
}
if resp.StatusCode == http.StatusOK {
return
}
if got, want := resp.Header.Get("X-Go-Pprof"), "1"; got != want {
t.Errorf("X-Go-Pprof: got %q; want %q", got, want)
}
if !bytes.Equal(body, tc.resp) {
t.Errorf("response: got %q; want %q", body, tc.resp)
}
})
}
}
...@@ -194,10 +194,16 @@ func (r *Resolver) LookupIPAddr(ctx context.Context, host string) ([]IPAddr, err ...@@ -194,10 +194,16 @@ func (r *Resolver) LookupIPAddr(ctx context.Context, host string) ([]IPAddr, err
resolverFunc = alt resolverFunc = alt
} }
// We don't want a cancelation of ctx to affect the
// lookupGroup operation. Otherwise if our context gets
// canceled it might cause an error to be returned to a lookup
// using a completely different context.
lookupGroupCtx, lookupGroupCancel := context.WithCancel(context.Background())
dnsWaitGroup.Add(1) dnsWaitGroup.Add(1)
ch, called := lookupGroup.DoChan(host, func() (interface{}, error) { ch, called := lookupGroup.DoChan(host, func() (interface{}, error) {
defer dnsWaitGroup.Done() defer dnsWaitGroup.Done()
return testHookLookupIP(ctx, resolverFunc, host) return testHookLookupIP(lookupGroupCtx, resolverFunc, host)
}) })
if !called { if !called {
dnsWaitGroup.Done() dnsWaitGroup.Done()
...@@ -205,20 +211,28 @@ func (r *Resolver) LookupIPAddr(ctx context.Context, host string) ([]IPAddr, err ...@@ -205,20 +211,28 @@ func (r *Resolver) LookupIPAddr(ctx context.Context, host string) ([]IPAddr, err
select { select {
case <-ctx.Done(): case <-ctx.Done():
// If the DNS lookup timed out for some reason, force // Our context was canceled. If we are the only
// future requests to start the DNS lookup again // goroutine looking up this key, then drop the key
// rather than waiting for the current lookup to // from the lookupGroup and cancel the lookup.
// complete. See issue 8602. // If there are other goroutines looking up this key,
ctxErr := ctx.Err() // let the lookup continue uncanceled, and let later
if ctxErr == context.DeadlineExceeded { // lookups with the same key share the result.
lookupGroup.Forget(host) // See issues 8602, 20703, 22724.
if lookupGroup.ForgetUnshared(host) {
lookupGroupCancel()
} else {
go func() {
<-ch
lookupGroupCancel()
}()
} }
err := mapErr(ctxErr) err := mapErr(ctx.Err())
if trace != nil && trace.DNSDone != nil { if trace != nil && trace.DNSDone != nil {
trace.DNSDone(nil, false, err) trace.DNSDone(nil, false, err)
} }
return nil, err return nil, err
case r := <-ch: case r := <-ch:
lookupGroupCancel()
if trace != nil && trace.DNSDone != nil { if trace != nil && trace.DNSDone != nil {
addrs, _ := r.Val.([]IPAddr) addrs, _ := r.Val.([]IPAddr)
trace.DNSDone(ipAddrsEface(addrs), r.Shared, r.Err) trace.DNSDone(ipAddrsEface(addrs), r.Shared, r.Err)
......
...@@ -791,3 +791,28 @@ func TestLookupNonLDH(t *testing.T) { ...@@ -791,3 +791,28 @@ func TestLookupNonLDH(t *testing.T) {
t.Fatalf("lookup error = %v, want %v", err, errNoSuchHost) t.Fatalf("lookup error = %v, want %v", err, errNoSuchHost)
} }
} }
func TestLookupContextCancel(t *testing.T) {
if testenv.Builder() == "" {
testenv.MustHaveExternalNetwork(t)
}
if runtime.GOOS == "nacl" {
t.Skip("skip on nacl")
}
defer dnsWaitGroup.Wait()
ctx, ctxCancel := context.WithCancel(context.Background())
ctxCancel()
_, err := DefaultResolver.LookupIPAddr(ctx, "google.com")
if err != errCanceled {
testenv.SkipFlakyNet(t)
t.Fatal(err)
}
ctx = context.Background()
_, err = DefaultResolver.LookupIPAddr(ctx, "google.com")
if err != nil {
testenv.SkipFlakyNet(t)
t.Fatal(err)
}
}
...@@ -87,6 +87,7 @@ func TestTCPSpuriousConnSetupCompletionWithCancel(t *testing.T) { ...@@ -87,6 +87,7 @@ func TestTCPSpuriousConnSetupCompletionWithCancel(t *testing.T) {
if testenv.Builder() == "" { if testenv.Builder() == "" {
testenv.MustHaveExternalNetwork(t) testenv.MustHaveExternalNetwork(t)
} }
defer dnsWaitGroup.Wait()
t.Parallel() t.Parallel()
const tries = 10000 const tries = 10000
var wg sync.WaitGroup var wg sync.WaitGroup
......
...@@ -139,14 +139,12 @@ func typestring(x interface{}) string { ...@@ -139,14 +139,12 @@ func typestring(x interface{}) string {
} }
// printany prints an argument passed to panic. // printany prints an argument passed to panic.
// If panic is called with a value that has a String or Error method,
// it has already been converted into a string by preprintpanics.
func printany(i interface{}) { func printany(i interface{}) {
switch v := i.(type) { switch v := i.(type) {
case nil: case nil:
print("nil") print("nil")
case stringer:
print(v.String())
case error:
print(v.Error())
case bool: case bool:
print(v) print(v)
case int: case int:
......
...@@ -384,7 +384,6 @@ func Goexit() { ...@@ -384,7 +384,6 @@ func Goexit() {
// Call all Error and String methods before freezing the world. // Call all Error and String methods before freezing the world.
// Used when crashing with panicking. // Used when crashing with panicking.
// This must match types handled by printany.
func preprintpanics(p *_panic) { func preprintpanics(p *_panic) {
defer func() { defer func() {
if recover() != nil { if recover() != nil {
...@@ -410,8 +409,6 @@ func printpanics(p *_panic) { ...@@ -410,8 +409,6 @@ func printpanics(p *_panic) {
print("\t") print("\t")
} }
print("panic: ") print("panic: ")
// Because of preprintpanics, p.arg cannot be an error or
// stringer, so this won't call into user code.
printany(p.arg) printany(p.arg)
if p.recovered { if p.recovered {
print(" [recovered]") print(" [recovered]")
......
...@@ -423,6 +423,12 @@ func releaseSudog(s *sudog) { ...@@ -423,6 +423,12 @@ func releaseSudog(s *sudog) {
// funcPC returns the entry PC of the function f. // funcPC returns the entry PC of the function f.
// It assumes that f is a func value. Otherwise the behavior is undefined. // It assumes that f is a func value. Otherwise the behavior is undefined.
// CAREFUL: In programs with plugins, funcPC can return different values
// for the same function (because there are actually multiple copies of
// the same function in the address space). To be safe, don't use the
// results of this function in any == expression. It is only safe to
// use the result as an address at which to start executing code.
//
// For gccgo note that this differs from the gc implementation; the gc // For gccgo note that this differs from the gc implementation; the gc
// implementation adds sys.PtrSize to the address of the interface // implementation adds sys.PtrSize to the address of the interface
// value, but GCC's alias analysis decides that that can not be a // value, but GCC's alias analysis decides that that can not be a
......
...@@ -115,6 +115,35 @@ type Func struct { ...@@ -115,6 +115,35 @@ type Func struct {
entry uintptr entry uintptr
} }
// A FuncID identifies particular functions that need to be treated
// specially by the runtime.
// Note that in some situations involving plugins, there may be multiple
// copies of a particular special runtime function.
// Note: this list must match the list in cmd/internal/objabi/funcid.go.
type funcID uint32
const (
funcID_normal funcID = iota // not a special function
funcID_goexit
funcID_jmpdefer
funcID_mcall
funcID_morestack
funcID_mstart
funcID_rt0_go
funcID_asmcgocall
funcID_sigpanic
funcID_runfinq
funcID_bgsweep
funcID_forcegchelper
funcID_timerproc
funcID_gcBgMarkWorker
funcID_systemstack_switch
funcID_systemstack
funcID_cgocallback_gofunc
funcID_gogo
funcID_externalthreadhandler
)
// FuncForPC returns a *Func describing the function that contains the // FuncForPC returns a *Func describing the function that contains the
// given program counter address, or else nil. // given program counter address, or else nil.
// //
......
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import "plugin"
func main() {
p, err := plugin.Open("issue24351.so")
if err != nil {
panic(err)
}
f, err := p.Lookup("B")
if err != nil {
panic(err)
}
c := make(chan bool)
f.(func(chan bool))(c)
<-c
}
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import "fmt"
func B(c chan bool) {
go func() {
fmt.Println(1.5)
c <- true
}()
}
...@@ -85,3 +85,8 @@ GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -o issue22175 src/issue22175/main. ...@@ -85,3 +85,8 @@ GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -o issue22175 src/issue22175/main.
GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin -o issue.22295.so issue22295.pkg GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin -o issue.22295.so issue22295.pkg
GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -o issue22295 src/issue22295.pkg/main.go GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -o issue22295 src/issue22295.pkg/main.go
./issue22295 ./issue22295
# Test for issue 24351
GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin -o issue24351.so src/issue24351/plugin.go
GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -o issue24351 src/issue24351/main.go
./issue24351
...@@ -790,6 +790,7 @@ func TestRebuilding(t *testing.T) { ...@@ -790,6 +790,7 @@ func TestRebuilding(t *testing.T) {
// If the .a file is newer than the .so, the .so is rebuilt (but not the .a) // If the .a file is newer than the .so, the .so is rebuilt (but not the .a)
t.Run("newarchive", func(t *testing.T) { t.Run("newarchive", func(t *testing.T) {
resetFileStamps() resetFileStamps()
AssertNotRebuilt(t, "new .a file before build", filepath.Join(gopathInstallDir, "depBase.a"))
goCmd(t, "list", "-linkshared", "-f={{.ImportPath}} {{.Stale}} {{.StaleReason}} {{.Target}}", "depBase") goCmd(t, "list", "-linkshared", "-f={{.ImportPath}} {{.Stale}} {{.StaleReason}} {{.Target}}", "depBase")
AssertNotRebuilt(t, "new .a file before build", filepath.Join(gopathInstallDir, "depBase.a")) AssertNotRebuilt(t, "new .a file before build", filepath.Join(gopathInstallDir, "depBase.a"))
cleanup := touch(t, filepath.Join(gopathInstallDir, "depBase.a")) cleanup := touch(t, filepath.Join(gopathInstallDir, "depBase.a"))
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment