Commit 11309337 by Ian Lance Taylor

libgo: update to Go 1.10.2 release

    
    Reviewed-on: https://go-review.googlesource.com/115196

From-SVN: r261041
parent 8b0b334a
9731580e76c065b76e3a103356bb8920da05a685
79eca4fd642724d89e9bec8f79889451f6632a46
The first line of this file holds the git revision number of the last
merge done from the gofrontend repository.
bf86aec25972f3a100c3aa58a6abcbcc35bdea49
71bdbf431b79dff61944f22c25c7e085ccfc25d5
The first line of this file holds the git revision number of the
last merge done from the master library sources.
......@@ -122,6 +122,7 @@ net/http/httptest
net/http/httptrace
net/http/httputil
net/http/internal
net/http/pprof
net/internal/socktest
net/mail
net/rpc
......
......@@ -366,7 +366,7 @@ parseExtras:
epoch := time.Date(1601, time.January, 1, 0, 0, 0, 0, time.UTC)
modified = time.Unix(epoch.Unix()+secs, nsecs)
}
case unixExtraID:
case unixExtraID, infoZipUnixExtraID:
if len(fieldBuf) < 8 {
continue parseExtras
}
......@@ -379,12 +379,6 @@ parseExtras:
}
ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch
modified = time.Unix(ts, 0)
case infoZipUnixExtraID:
if len(fieldBuf) < 4 {
continue parseExtras
}
ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch
modified = time.Unix(ts, 0)
}
}
......
......@@ -414,7 +414,7 @@ var tests = []ZipTest{
Name: "test.txt",
Content: []byte{},
Size: 1<<32 - 1,
Modified: time.Date(2017, 10, 31, 21, 17, 27, 0, timeZone(-7*time.Hour)),
Modified: time.Date(2017, 10, 31, 21, 11, 57, 0, timeZone(-7*time.Hour)),
Mode: 0644,
},
},
......
......@@ -3265,6 +3265,20 @@ func TestGoVetWithOnlyTestFiles(t *testing.T) {
tg.run("vet", "p")
}
// Issue 24193.
func TestVetWithOnlyCgoFiles(t *testing.T) {
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/p/p.go", "package p; import \"C\"; func F() {}")
tg.setenv("GOPATH", tg.path("."))
tg.run("vet", "p")
}
// Issue 9767, 19769.
func TestGoGetDotSlashDownload(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
......@@ -5099,6 +5113,28 @@ func TestCacheOutput(t *testing.T) {
}
}
func TestCacheListStale(t *testing.T) {
tooSlow(t)
if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") {
t.Skip("GODEBUG gocacheverify")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOCACHE", tg.path("cache"))
tg.tempFile("gopath/src/p/p.go", "package p; import _ \"q\"; func F(){}\n")
tg.tempFile("gopath/src/q/q.go", "package q; func F(){}\n")
tg.tempFile("gopath/src/m/m.go", "package main; import _ \"q\"; func main(){}\n")
tg.setenv("GOPATH", tg.path("gopath"))
tg.run("install", "p", "m")
tg.run("list", "-f={{.ImportPath}} {{.Stale}}", "m", "q", "p")
tg.grepStdout("^m false", "m should not be stale")
tg.grepStdout("^q true", "q should be stale")
tg.grepStdout("^p false", "p should not be stale")
}
func TestCacheCoverage(t *testing.T) {
tooSlow(t)
......@@ -5792,6 +5828,22 @@ func TestAtomicCoverpkgAll(t *testing.T) {
}
}
// Issue 23882.
func TestCoverpkgAllRuntime(t *testing.T) {
skipIfGccgo(t, "gccgo has no cover tool")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/x/x.go", `package x; import _ "runtime"; func F() {}`)
tg.tempFile("src/x/x_test.go", `package x; import "testing"; func TestF(t *testing.T) { F() }`)
tg.setenv("GOPATH", tg.path("."))
tg.run("test", "-coverpkg=all", "x")
if canRace {
tg.run("test", "-coverpkg=all", "-race", "x")
}
}
func TestBadCommandLines(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
......@@ -5949,3 +6001,36 @@ func TestBadCgoDirectives(t *testing.T) {
tg.run("build", "-n", "x")
tg.grepStderr("-D@foo", "did not find -D@foo in commands")
}
func TestTwoPkgConfigs(t *testing.T) {
if !canCgo {
t.Skip("no cgo")
}
if runtime.GOOS == "windows" || runtime.GOOS == "plan9" {
t.Skipf("no shell scripts on %s", runtime.GOOS)
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/x/a.go", `package x
// #cgo pkg-config: --static a
import "C"
`)
tg.tempFile("src/x/b.go", `package x
// #cgo pkg-config: --static a
import "C"
`)
tg.tempFile("pkg-config.sh", `#!/bin/sh
echo $* >>`+tg.path("pkg-config.out"))
tg.must(os.Chmod(tg.path("pkg-config.sh"), 0755))
tg.setenv("GOPATH", tg.path("."))
tg.setenv("PKG_CONFIG", tg.path("pkg-config.sh"))
tg.run("build", "x")
out, err := ioutil.ReadFile(tg.path("pkg-config.out"))
tg.must(err)
out = bytes.TrimSpace(out)
want := "--cflags --static --static -- a a\n--libs --static --static -- a a"
if !bytes.Equal(out, []byte(want)) {
t.Errorf("got %q want %q", out, want)
}
}
......@@ -809,8 +809,8 @@ func repoRootForImportDynamic(importPath string, security web.SecurityMode) (*re
}
}
if !strings.Contains(mmi.RepoRoot, "://") {
return nil, fmt.Errorf("%s: invalid repo root %q; no scheme", urlStr, mmi.RepoRoot)
if err := validateRepoRootScheme(mmi.RepoRoot); err != nil {
return nil, fmt.Errorf("%s: invalid repo root %q: %v", urlStr, mmi.RepoRoot, err)
}
rr := &repoRoot{
vcs: vcsByCmd(mmi.VCS),
......@@ -824,6 +824,36 @@ func repoRootForImportDynamic(importPath string, security web.SecurityMode) (*re
return rr, nil
}
// validateRepoRootScheme returns an error if repoRoot does not seem
// to have a valid URL scheme. At this point we permit things that
// aren't valid URLs, although later, if not using -insecure, we will
// restrict repoRoots to be valid URLs. This is only because we've
// historically permitted them, and people may depend on that.
func validateRepoRootScheme(repoRoot string) error {
end := strings.Index(repoRoot, "://")
if end <= 0 {
return errors.New("no scheme")
}
// RFC 3986 section 3.1.
for i := 0; i < end; i++ {
c := repoRoot[i]
switch {
case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z':
// OK.
case '0' <= c && c <= '9' || c == '+' || c == '-' || c == '.':
// OK except at start.
if i == 0 {
return errors.New("invalid scheme")
}
default:
return errors.New("invalid scheme")
}
}
return nil
}
var fetchGroup singleflight.Group
var (
fetchCacheMu sync.Mutex
......
......@@ -408,3 +408,46 @@ func TestMatchGoImport(t *testing.T) {
}
}
}
func TestValidateRepoRootScheme(t *testing.T) {
tests := []struct {
root string
err string
}{
{
root: "",
err: "no scheme",
},
{
root: "http://",
err: "",
},
{
root: "a://",
err: "",
},
{
root: "a#://",
err: "invalid scheme",
},
{
root: "-config://",
err: "invalid scheme",
},
}
for _, test := range tests {
err := validateRepoRootScheme(test.root)
if err == nil {
if test.err != "" {
t.Errorf("validateRepoRootScheme(%q) = nil, want %q", test.root, test.err)
}
} else if test.err == "" {
if err != nil {
t.Errorf("validateRepoRootScheme(%q) = %q, want nil", test.root, test.err)
}
} else if err.Error() != test.err {
t.Errorf("validateRepoRootScheme(%q) = %q, want %q", test.root, err, test.err)
}
}
}
......@@ -673,6 +673,14 @@ func runTest(cmd *base.Command, args []string) {
continue
}
// If using the race detector, silently ignore
// attempts to run coverage on the runtime
// packages. It will cause the race detector
// to be invoked before it has been initialized.
if cfg.BuildRace && p.Standard && (p.ImportPath == "runtime" || strings.HasPrefix(p.ImportPath, "runtime/internal")) {
continue
}
if haveMatch {
testCoverPkgs = append(testCoverPkgs, p)
}
......
......@@ -62,11 +62,11 @@ func runVet(cmd *base.Command, args []string) {
base.Errorf("%v", err)
continue
}
if len(ptest.GoFiles) == 0 && pxtest == nil {
if len(ptest.GoFiles) == 0 && len(ptest.CgoFiles) == 0 && pxtest == nil {
base.Errorf("go vet %s: no Go files in %s", p.ImportPath, p.Dir)
continue
}
if len(ptest.GoFiles) > 0 {
if len(ptest.GoFiles) > 0 || len(ptest.CgoFiles) > 0 {
root.Deps = append(root.Deps, b.VetAction(work.ModeBuild, work.ModeBuild, ptest))
}
if pxtest != nil {
......
......@@ -461,15 +461,7 @@ func (b *Builder) useCache(a *Action, p *load.Package, actionHash cache.ActionID
// If so, it's up to date and we can reuse it instead of rebuilding it.
var buildID string
if target != "" && !cfg.BuildA {
var err error
buildID, err = buildid.ReadFile(target)
if err != nil && b.ComputeStaleOnly {
if p != nil && !p.Stale {
p.Stale = true
p.StaleReason = "target missing"
}
return true
}
buildID, _ = buildid.ReadFile(target)
if strings.HasPrefix(buildID, actionID+buildIDSeparator) {
a.buildID = buildID
a.built = target
......@@ -546,7 +538,10 @@ func (b *Builder) useCache(a *Action, p *load.Package, actionHash cache.ActionID
}
}
}
return true
// Fall through to update a.buildID from the build artifact cache,
// which will affect the computation of buildIDs for targets
// higher up in the dependency graph.
}
// Check the build artifact cache.
......@@ -574,6 +569,10 @@ func (b *Builder) useCache(a *Action, p *load.Package, actionHash cache.ActionID
a.built = file
a.Target = "DO NOT USE - using cache"
a.buildID = buildID
if p := a.Package; p != nil {
// Clearer than explaining that something else is stale.
p.StaleReason = "not installed but available in build cache"
}
return true
}
}
......@@ -584,6 +583,10 @@ func (b *Builder) useCache(a *Action, p *load.Package, actionHash cache.ActionID
a.output = []byte{}
}
if b.ComputeStaleOnly {
return true
}
return false
}
......
......@@ -956,11 +956,19 @@ func splitPkgConfigOutput(out []byte) []string {
// Calls pkg-config if needed and returns the cflags/ldflags needed to build the package.
func (b *Builder) getPkgConfigFlags(p *load.Package) (cflags, ldflags []string, err error) {
if pkgs := p.CgoPkgConfig; len(pkgs) > 0 {
if pcargs := p.CgoPkgConfig; len(pcargs) > 0 {
// pkg-config permits arguments to appear anywhere in
// the command line. Move them all to the front, before --.
var pcflags []string
for len(pkgs) > 0 && strings.HasPrefix(pkgs[0], "--") {
pcflags = append(pcflags, pkgs[0])
pkgs = pkgs[1:]
var pkgs []string
for _, pcarg := range pcargs {
if pcarg == "--" {
// We're going to add our own "--" argument.
} else if strings.HasPrefix(pcarg, "--") {
pcflags = append(pcflags, pcarg)
} else {
pkgs = append(pkgs, pcarg)
}
}
for _, pkg := range pkgs {
if !load.SafeArg(pkg) {
......@@ -1107,7 +1115,7 @@ func BuildInstallFunc(b *Builder, a *Action) (err error) {
// We want to hide that awful detail as much as possible, so don't
// advertise it by touching the mtimes (usually the libraries are up
// to date).
if !a.buggyInstall {
if !a.buggyInstall && !b.ComputeStaleOnly {
now := time.Now()
os.Chtimes(a.Target, now, now)
}
......
......@@ -46,12 +46,19 @@ var validCompilerFlags = []*regexp.Regexp{
re(`-O([^@\-].*)`),
re(`-W`),
re(`-W([^@,]+)`), // -Wall but not -Wa,-foo.
re(`-Wa,-mbig-obj`),
re(`-ansi`),
re(`-f(no-)?blocks`),
re(`-f(no-)?common`),
re(`-f(no-)?constant-cfstrings`),
re(`-fdiagnostics-show-note-include-stack`),
re(`-f(no-)?exceptions`),
re(`-f(no-)?inline-functions`),
re(`-finput-charset=([^@\-].*)`),
re(`-f(no-)?fat-lto-objects`),
re(`-f(no-)?lto`),
re(`-fmacro-backtrace-limit=(.+)`),
re(`-fmessage-length=(.+)`),
re(`-f(no-)?modules`),
re(`-f(no-)?objc-arc`),
re(`-f(no-)?omit-frame-pointer`),
......@@ -62,27 +69,42 @@ var validCompilerFlags = []*regexp.Regexp{
re(`-f(no-)?split-stack`),
re(`-f(no-)?stack-(.+)`),
re(`-f(no-)?strict-aliasing`),
re(`-f(un)signed-char`),
re(`-f(no-)?use-linker-plugin`), // safe if -B is not used; we don't permit -B
re(`-fsanitize=(.+)`),
re(`-ftemplate-depth-(.+)`),
re(`-fvisibility=(.+)`),
re(`-g([^@\-].*)?`),
re(`-m32`),
re(`-m64`),
re(`-m(arch|cpu|fpu|tune)=([^@\-].*)`),
re(`-m(no-)?avx[0-9a-z.]*`),
re(`-m(no-)?ms-bitfields`),
re(`-m(no-)?stack-(.+)`),
re(`-mmacosx-(.+)`),
re(`-mios-simulator-version-min=(.+)`),
re(`-miphoneos-version-min=(.+)`),
re(`-mnop-fun-dllimport`),
re(`-m(no-)?sse[0-9.]*`),
re(`-mwindows`),
re(`-pedantic(-errors)?`),
re(`-pipe`),
re(`-pthread`),
re(`-?-std=([^@\-].*)`),
re(`-?-stdlib=([^@\-].*)`),
re(`-w`),
re(`-x([^@\-].*)`),
}
var validCompilerFlagsWithNextArg = []string{
"-arch",
"-D",
"-I",
"-isystem",
"-framework",
"-isysroot",
"-isystem",
"--sysroot",
"-target",
"-x",
}
......@@ -90,43 +112,65 @@ var validLinkerFlags = []*regexp.Regexp{
re(`-F([^@\-].*)`),
re(`-l([^@\-].*)`),
re(`-L([^@\-].*)`),
re(`-O`),
re(`-O([^@\-].*)`),
re(`-f(no-)?(pic|PIC|pie|PIE)`),
re(`-fsanitize=([^@\-].*)`),
re(`-g([^@\-].*)?`),
re(`-m(arch|cpu|fpu|tune)=([^@\-].*)`),
re(`-mmacosx-(.+)`),
re(`-mios-simulator-version-min=(.+)`),
re(`-miphoneos-version-min=(.+)`),
re(`-mwindows`),
re(`-(pic|PIC|pie|PIE)`),
re(`-pthread`),
re(`-shared`),
re(`-?-static([-a-z0-9+]*)`),
re(`-?-stdlib=([^@\-].*)`),
// Note that any wildcards in -Wl need to exclude comma,
// since -Wl splits its argument at commas and passes
// them all to the linker uninterpreted. Allowing comma
// in a wildcard would allow tunnelling arbitrary additional
// linker arguments through one of these.
re(`-Wl,--(no-)?allow-multiple-definition`),
re(`-Wl,--(no-)?as-needed`),
re(`-Wl,-Bdynamic`),
re(`-Wl,-Bstatic`),
re(`-Wl,-d[ny]`),
re(`-Wl,--disable-new-dtags`),
re(`-Wl,--enable-new-dtags`),
re(`-Wl,--end-group`),
re(`-Wl,-framework,[^,@\-][^,]+`),
re(`-Wl,-headerpad_max_install_names`),
re(`-Wl,--no-undefined`),
re(`-Wl,-rpath,([^,@\-][^,]+)`),
re(`-Wl,-rpath[=,]([^,@\-][^,]+)`),
re(`-Wl,-search_paths_first`),
re(`-Wl,-sectcreate,([^,@\-][^,]+),([^,@\-][^,]+),([^,@\-][^,]+)`),
re(`-Wl,--start-group`),
re(`-Wl,-?-static`),
re(`-Wl,--subsystem,(native|windows|console|posix|xbox)`),
re(`-Wl,-undefined[=,]([^,@\-][^,]+)`),
re(`-Wl,-?-unresolved-symbols=[^,]+`),
re(`-Wl,--(no-)?warn-([^,]+)`),
re(`-Wl,-z,(no)?execstack`),
re(`-Wl,-z,relro`),
re(`[a-zA-Z0-9_/].*\.(a|o|obj|dll|dylib|so)`), // direct linker inputs: x.o or libfoo.so (but not -foo.o or @foo.o)
}
var validLinkerFlagsWithNextArg = []string{
"-arch",
"-F",
"-l",
"-L",
"-framework",
"-isysroot",
"--sysroot",
"-target",
"-Wl,-framework",
"-Wl,-rpath",
"-Wl,-undefined",
}
func checkCompilerFlags(name, source string, list []string) error {
......
......@@ -140,9 +140,6 @@ var goodLinkerFlags = [][]string{
var badLinkerFlags = [][]string{
{"-DFOO"},
{"-Dfoo=bar"},
{"-O"},
{"-O2"},
{"-Osmall"},
{"-W"},
{"-Wall"},
{"-fobjc-arc"},
......@@ -155,7 +152,6 @@ var badLinkerFlags = [][]string{
{"-fno-stack-xxx"},
{"-mstack-overflow"},
{"-mno-stack-overflow"},
{"-mmacosx-version"},
{"-mnop-fun-dllimport"},
{"-std=c99"},
{"-xc"},
......
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package objabi
// A FuncID identifies particular functions that need to be treated
// specially by the runtime.
// Note that in some situations involving plugins, there may be multiple
// copies of a particular special runtime function.
// Note: this list must match the list in runtime/symtab.go.
type FuncID uint32
const (
FuncID_normal FuncID = iota // not a special function
FuncID_goexit
FuncID_jmpdefer
FuncID_mcall
FuncID_morestack
FuncID_mstart
FuncID_rt0_go
FuncID_asmcgocall
FuncID_sigpanic
FuncID_runfinq
FuncID_bgsweep
FuncID_forcegchelper
FuncID_timerproc
FuncID_gcBgMarkWorker
FuncID_systemstack_switch
FuncID_systemstack
FuncID_cgocallback_gofunc
FuncID_gogo
FuncID_externalthreadhandler
)
......@@ -11,6 +11,7 @@ import (
"crypto/rand"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/hex"
"encoding/pem"
"fmt"
"io/ioutil"
......@@ -42,6 +43,7 @@ type nameConstraintsTest struct {
roots []constraintsSpec
intermediates [][]constraintsSpec
leaf leafSpec
requestedEKUs []ExtKeyUsage
expectedError string
noOpenSSL bool
}
......@@ -1444,6 +1446,118 @@ var nameConstraintsTests = []nameConstraintsTest{
},
expectedError: "\"https://example.com/test\" is excluded",
},
// #75: While serverAuth in a CA certificate permits clientAuth in a leaf,
// serverAuth in a leaf shouldn't permit clientAuth when requested in
// VerifyOptions.
nameConstraintsTest{
roots: []constraintsSpec{
constraintsSpec{},
},
intermediates: [][]constraintsSpec{
[]constraintsSpec{
constraintsSpec{},
},
},
leaf: leafSpec{
sans: []string{"dns:example.com"},
ekus: []string{"serverAuth"},
},
requestedEKUs: []ExtKeyUsage{ExtKeyUsageClientAuth},
expectedError: "incompatible key usage",
},
// #76: However, MSSGC in a leaf should match a request for serverAuth.
nameConstraintsTest{
roots: []constraintsSpec{
constraintsSpec{},
},
intermediates: [][]constraintsSpec{
[]constraintsSpec{
constraintsSpec{},
},
},
leaf: leafSpec{
sans: []string{"dns:example.com"},
ekus: []string{"msSGC"},
},
requestedEKUs: []ExtKeyUsage{ExtKeyUsageServerAuth},
},
// An invalid DNS SAN should be detected only at validation time so
// that we can process CA certificates in the wild that have invalid SANs.
// See https://github.com/golang/go/issues/23995
// #77: an invalid DNS or mail SAN will not be detected if name constaint
// checking is not triggered.
nameConstraintsTest{
roots: []constraintsSpec{
constraintsSpec{},
},
intermediates: [][]constraintsSpec{
[]constraintsSpec{
constraintsSpec{},
},
},
leaf: leafSpec{
sans: []string{"dns:this is invalid", "email:this @ is invalid"},
},
},
// #78: an invalid DNS SAN will be detected if any name constraint checking
// is triggered.
nameConstraintsTest{
roots: []constraintsSpec{
constraintsSpec{
bad: []string{"uri:"},
},
},
intermediates: [][]constraintsSpec{
[]constraintsSpec{
constraintsSpec{},
},
},
leaf: leafSpec{
sans: []string{"dns:this is invalid"},
},
expectedError: "cannot parse dnsName",
},
// #79: an invalid email SAN will be detected if any name constraint
// checking is triggered.
nameConstraintsTest{
roots: []constraintsSpec{
constraintsSpec{
bad: []string{"uri:"},
},
},
intermediates: [][]constraintsSpec{
[]constraintsSpec{
constraintsSpec{},
},
},
leaf: leafSpec{
sans: []string{"email:this @ is invalid"},
},
expectedError: "cannot parse rfc822Name",
},
// #80: if several EKUs are requested, satisfying any of them is sufficient.
nameConstraintsTest{
roots: []constraintsSpec{
constraintsSpec{},
},
intermediates: [][]constraintsSpec{
[]constraintsSpec{
constraintsSpec{},
},
},
leaf: leafSpec{
sans: []string{"dns:example.com"},
ekus: []string{"email"},
},
requestedEKUs: []ExtKeyUsage{ExtKeyUsageClientAuth, ExtKeyUsageEmailProtection},
},
}
func makeConstraintsCACert(constraints constraintsSpec, name string, key *ecdsa.PrivateKey, parent *Certificate, parentKey *ecdsa.PrivateKey) (*Certificate, error) {
......@@ -1459,7 +1573,7 @@ func makeConstraintsCACert(constraints constraintsSpec, name string, key *ecdsa.
NotAfter: time.Unix(2000, 0),
KeyUsage: KeyUsageCertSign,
BasicConstraintsValid: true,
IsCA: true,
IsCA: true,
}
if err := addConstraintsToTemplate(constraints, template); err != nil {
......@@ -1497,7 +1611,7 @@ func makeConstraintsLeafCert(leaf leafSpec, key *ecdsa.PrivateKey, parent *Certi
NotAfter: time.Unix(2000, 0),
KeyUsage: KeyUsageDigitalSignature,
BasicConstraintsValid: true,
IsCA: false,
IsCA: false,
}
for _, name := range leaf.sans {
......@@ -1512,6 +1626,13 @@ func makeConstraintsLeafCert(leaf leafSpec, key *ecdsa.PrivateKey, parent *Certi
}
template.IPAddresses = append(template.IPAddresses, ip)
case strings.HasPrefix(name, "invalidip:"):
ipBytes, err := hex.DecodeString(name[10:])
if err != nil {
return nil, fmt.Errorf("cannot parse invalid IP: %s", err)
}
template.IPAddresses = append(template.IPAddresses, net.IP(ipBytes))
case strings.HasPrefix(name, "email:"):
template.EmailAddresses = append(template.EmailAddresses, name[6:])
......@@ -1781,6 +1902,7 @@ func TestConstraintCases(t *testing.T) {
Roots: rootPool,
Intermediates: intermediatePool,
CurrentTime: time.Unix(1500, 0),
KeyUsages: test.requestedEKUs,
}
_, err = leafCert.Verify(verifyOpts)
......@@ -1972,12 +2094,13 @@ func TestBadNamesInConstraints(t *testing.T) {
}
func TestBadNamesInSANs(t *testing.T) {
// Bad names in SANs should not parse.
// Bad names in URI and IP SANs should not parse. Bad DNS and email SANs
// will parse and are tested in name constraint tests at the top of this
// file.
badNames := []string{
"dns:foo.com.",
"email:abc@foo.com.",
"email:foo.com.",
"uri:https://example.com./dsf",
"invalidip:0102",
"invalidip:0102030405",
}
priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
......
......@@ -6,12 +6,14 @@ package x509
import (
"bytes"
"encoding/asn1"
"errors"
"fmt"
"net"
"net/url"
"reflect"
"runtime"
"strconv"
"strings"
"time"
"unicode/utf8"
......@@ -178,10 +180,14 @@ type VerifyOptions struct {
Intermediates *CertPool
Roots *CertPool // if nil, the system roots are used
CurrentTime time.Time // if zero, the current time is used
// KeyUsage specifies which Extended Key Usage values are acceptable.
// An empty list means ExtKeyUsageServerAuth. Key usage is considered a
// constraint down the chain which mirrors Windows CryptoAPI behavior,
// but not the spec. To accept any key usage, include ExtKeyUsageAny.
// KeyUsage specifies which Extended Key Usage values are acceptable. A leaf
// certificate is accepted if it contains any of the listed values. An empty
// list means ExtKeyUsageServerAuth. To accept any key usage, include
// ExtKeyUsageAny.
//
// Certificate chains are required to nest extended key usage values,
// irrespective of this value. This matches the Windows CryptoAPI behavior,
// but not the spec.
KeyUsages []ExtKeyUsage
// MaxConstraintComparisions is the maximum number of comparisons to
// perform when checking a given certificate's name constraints. If
......@@ -543,11 +549,16 @@ func (c *Certificate) checkNameConstraints(count *int,
return nil
}
const (
checkingAgainstIssuerCert = iota
checkingAgainstLeafCert
)
// ekuPermittedBy returns true iff the given extended key usage is permitted by
// the given EKU from a certificate. Normally, this would be a simple
// comparison plus a special case for the “any” EKU. But, in order to support
// existing certificates, some exceptions are made.
func ekuPermittedBy(eku, certEKU ExtKeyUsage) bool {
func ekuPermittedBy(eku, certEKU ExtKeyUsage, context int) bool {
if certEKU == ExtKeyUsageAny || eku == certEKU {
return true
}
......@@ -564,18 +575,23 @@ func ekuPermittedBy(eku, certEKU ExtKeyUsage) bool {
eku = mapServerAuthEKUs(eku)
certEKU = mapServerAuthEKUs(certEKU)
if eku == certEKU ||
// ServerAuth in a CA permits ClientAuth in the leaf.
(eku == ExtKeyUsageClientAuth && certEKU == ExtKeyUsageServerAuth) ||
if eku == certEKU {
return true
}
// If checking a requested EKU against the list in a leaf certificate there
// are fewer exceptions.
if context == checkingAgainstLeafCert {
return false
}
// ServerAuth in a CA permits ClientAuth in the leaf.
return (eku == ExtKeyUsageClientAuth && certEKU == ExtKeyUsageServerAuth) ||
// Any CA may issue an OCSP responder certificate.
eku == ExtKeyUsageOCSPSigning ||
// Code-signing CAs can use Microsoft's commercial and
// kernel-mode EKUs.
((eku == ExtKeyUsageMicrosoftCommercialCodeSigning || eku == ExtKeyUsageMicrosoftKernelCodeSigning) && certEKU == ExtKeyUsageCodeSigning) {
return true
}
return false
(eku == ExtKeyUsageMicrosoftCommercialCodeSigning || eku == ExtKeyUsageMicrosoftKernelCodeSigning) && certEKU == ExtKeyUsageCodeSigning
}
// isValid performs validity checks on c given that it is a candidate to append
......@@ -630,8 +646,7 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
name := string(data)
mailbox, ok := parseRFC2821Mailbox(name)
if !ok {
// This certificate should not have parsed.
return errors.New("x509: internal error: rfc822Name SAN failed to parse")
return fmt.Errorf("x509: cannot parse rfc822Name %q", mailbox)
}
if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "email address", name, mailbox,
......@@ -643,6 +658,10 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
case nameTypeDNS:
name := string(data)
if _, ok := domainToReverseLabels(name); !ok {
return fmt.Errorf("x509: cannot parse dnsName %q", name)
}
if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "DNS name", name, name,
func(parsedName, constraint interface{}) (bool, error) {
return matchDomainConstraint(parsedName.(string), constraint.(string))
......@@ -716,7 +735,7 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
for _, caEKU := range c.ExtKeyUsage {
comparisonCount++
if ekuPermittedBy(eku, caEKU) {
if ekuPermittedBy(eku, caEKU, checkingAgainstIssuerCert) {
continue NextEKU
}
}
......@@ -773,6 +792,18 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
return nil
}
// formatOID formats an ASN.1 OBJECT IDENTIFER in the common, dotted style.
func formatOID(oid asn1.ObjectIdentifier) string {
ret := ""
for i, v := range oid {
if i > 0 {
ret += "."
}
ret += strconv.Itoa(v)
}
return ret
}
// Verify attempts to verify c by building one or more chains from c to a
// certificate in opts.Roots, using certificates in opts.Intermediates if
// needed. If successful, it returns one or more chains where the first
......@@ -847,16 +878,33 @@ func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err e
}
if checkEKU {
foundMatch := false
NextUsage:
for _, eku := range requestedKeyUsages {
for _, leafEKU := range c.ExtKeyUsage {
if ekuPermittedBy(eku, leafEKU) {
continue NextUsage
if ekuPermittedBy(eku, leafEKU, checkingAgainstLeafCert) {
foundMatch = true
break NextUsage
}
}
}
oid, _ := oidFromExtKeyUsage(eku)
return nil, CertificateInvalidError{c, IncompatibleUsage, fmt.Sprintf("%#v", oid)}
if !foundMatch {
msg := "leaf contains the following, recognized EKUs: "
for i, leafEKU := range c.ExtKeyUsage {
oid, ok := oidFromExtKeyUsage(leafEKU)
if !ok {
continue
}
if i > 0 {
msg += ", "
}
msg += formatOID(oid)
}
return nil, CertificateInvalidError{c, IncompatibleUsage, msg}
}
}
......
......@@ -706,7 +706,9 @@ type Certificate struct {
OCSPServer []string
IssuingCertificateURL []string
// Subject Alternate Name values
// Subject Alternate Name values. (Note that these values may not be valid
// if invalid values were contained within a parsed certificate. For
// example, an element of DNSNames may not be a valid DNS domain name.)
DNSNames []string
EmailAddresses []string
IPAddresses []net.IP
......@@ -1126,17 +1128,9 @@ func parseSANExtension(value []byte) (dnsNames, emailAddresses []string, ipAddre
err = forEachSAN(value, func(tag int, data []byte) error {
switch tag {
case nameTypeEmail:
mailbox := string(data)
if _, ok := parseRFC2821Mailbox(mailbox); !ok {
return fmt.Errorf("x509: cannot parse rfc822Name %q", mailbox)
}
emailAddresses = append(emailAddresses, mailbox)
emailAddresses = append(emailAddresses, string(data))
case nameTypeDNS:
domain := string(data)
if _, ok := domainToReverseLabels(domain); !ok {
return fmt.Errorf("x509: cannot parse dnsName %q", string(data))
}
dnsNames = append(dnsNames, domain)
dnsNames = append(dnsNames, string(data))
case nameTypeURI:
uri, err := url.Parse(string(data))
if err != nil {
......@@ -1153,7 +1147,7 @@ func parseSANExtension(value []byte) (dnsNames, emailAddresses []string, ipAddre
case net.IPv4len, net.IPv6len:
ipAddresses = append(ipAddresses, data)
default:
return errors.New("x509: certificate contained IP address of length " + strconv.Itoa(len(data)))
return errors.New("x509: cannot parse IP address of length " + strconv.Itoa(len(data)))
}
}
......@@ -2543,7 +2537,7 @@ func ParseCertificateRequest(asn1Data []byte) (*CertificateRequest, error) {
func parseCertificateRequest(in *certificateRequest) (*CertificateRequest, error) {
out := &CertificateRequest{
Raw: in.Raw,
Raw: in.Raw,
RawTBSCertificateRequest: in.TBSCSR.Raw,
RawSubjectPublicKeyInfo: in.TBSCSR.PublicKey.Raw,
RawSubject: in.TBSCSR.Subject.FullBytes,
......
......@@ -443,10 +443,25 @@ func (d *decodeState) valueQuoted() interface{} {
// if it encounters an Unmarshaler, indirect stops and returns that.
// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
// Issue #24153 indicates that it is generally not a guaranteed property
// that you may round-trip a reflect.Value by calling Value.Addr().Elem()
// and expect the value to still be settable for values derived from
// unexported embedded struct fields.
//
// The logic below effectively does this when it first addresses the value
// (to satisfy possible pointer methods) and continues to dereference
// subsequent pointers as necessary.
//
// After the first round-trip, we set v back to the original value to
// preserve the original RW flags contained in reflect.Value.
v0 := v
haveAddr := false
// If v is a named type and is addressable,
// start with its address, so that if the type has pointer methods,
// we find them.
if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
haveAddr = true
v = v.Addr()
}
for {
......@@ -455,6 +470,7 @@ func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler,
if v.Kind() == reflect.Interface && !v.IsNil() {
e := v.Elem()
if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
haveAddr = false
v = e
continue
}
......@@ -480,7 +496,13 @@ func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler,
}
}
}
v = v.Elem()
if haveAddr {
v = v0 // restore original value after round-trip Value.Addr().Elem()
haveAddr = false
} else {
v = v.Elem()
}
}
return nil, nil, v
}
......
......@@ -615,9 +615,9 @@ var unmarshalTests = []unmarshalTest{
out: S5{S8: S8{S9: S9{Y: 2}}},
},
{
in: `{"X": 1,"Y":2}`,
ptr: new(S5),
err: fmt.Errorf("json: unknown field \"X\""),
in: `{"X": 1,"Y":2}`,
ptr: new(S5),
err: fmt.Errorf("json: unknown field \"X\""),
disallowUnknownFields: true,
},
{
......@@ -626,9 +626,9 @@ var unmarshalTests = []unmarshalTest{
out: S10{S13: S13{S8: S8{S9: S9{Y: 2}}}},
},
{
in: `{"X": 1,"Y":2}`,
ptr: new(S10),
err: fmt.Errorf("json: unknown field \"X\""),
in: `{"X": 1,"Y":2}`,
ptr: new(S10),
err: fmt.Errorf("json: unknown field \"X\""),
disallowUnknownFields: true,
},
......@@ -835,8 +835,8 @@ var unmarshalTests = []unmarshalTest{
"Q": 18,
"extra": true
}`,
ptr: new(Top),
err: fmt.Errorf("json: unknown field \"extra\""),
ptr: new(Top),
err: fmt.Errorf("json: unknown field \"extra\""),
disallowUnknownFields: true,
},
{
......@@ -862,8 +862,8 @@ var unmarshalTests = []unmarshalTest{
"Z": 17,
"Q": 18
}`,
ptr: new(Top),
err: fmt.Errorf("json: unknown field \"extra\""),
ptr: new(Top),
err: fmt.Errorf("json: unknown field \"extra\""),
disallowUnknownFields: true,
},
}
......@@ -2089,10 +2089,14 @@ func TestInvalidStringOption(t *testing.T) {
}
}
// Test unmarshal behavior with regards to embedded pointers to unexported structs.
// If unallocated, this returns an error because unmarshal cannot set the field.
// Issue 21357.
func TestUnmarshalEmbeddedPointerUnexported(t *testing.T) {
// Test unmarshal behavior with regards to embedded unexported structs.
//
// (Issue 21357) If the embedded struct is a pointer and is unallocated,
// this returns an error because unmarshal cannot set the field.
//
// (Issue 24152) If the embedded struct is given an explicit name,
// ensure that the normal unmarshal logic does not panic in reflect.
func TestUnmarshalEmbeddedUnexported(t *testing.T) {
type (
embed1 struct{ Q int }
embed2 struct{ Q int }
......@@ -2119,6 +2123,18 @@ func TestUnmarshalEmbeddedPointerUnexported(t *testing.T) {
*embed3
R int
}
S6 struct {
embed1 `json:"embed1"`
}
S7 struct {
embed1 `json:"embed1"`
embed2
}
S8 struct {
embed1 `json:"embed1"`
embed2 `json:"embed2"`
Q int
}
)
tests := []struct {
......@@ -2154,6 +2170,32 @@ func TestUnmarshalEmbeddedPointerUnexported(t *testing.T) {
ptr: new(S5),
out: &S5{R: 2},
err: fmt.Errorf("json: cannot set embedded pointer to unexported struct: json.embed3"),
}, {
// Issue 24152, ensure decodeState.indirect does not panic.
in: `{"embed1": {"Q": 1}}`,
ptr: new(S6),
out: &S6{embed1{1}},
}, {
// Issue 24153, check that we can still set forwarded fields even in
// the presence of a name conflict.
//
// This relies on obscure behavior of reflect where it is possible
// to set a forwarded exported field on an unexported embedded struct
// even though there is a name conflict, even when it would have been
// impossible to do so according to Go visibility rules.
// Go forbids this because it is ambiguous whether S7.Q refers to
// S7.embed1.Q or S7.embed2.Q. Since embed1 and embed2 are unexported,
// it should be impossible for an external package to set either Q.
//
// It is probably okay for a future reflect change to break this.
in: `{"embed1": {"Q": 1}, "Q": 2}`,
ptr: new(S7),
out: &S7{embed1{1}, embed2{2}},
}, {
// Issue 24153, similar to the S7 case.
in: `{"embed1": {"Q": 1}, "embed2": {"Q": 2}, "Q": 3}`,
ptr: new(S8),
out: &S8{embed1{1}, embed2{2}, 3},
}}
for i, tt := range tests {
......
......@@ -44,9 +44,9 @@ func New(ctxt *build.Context, fset *token.FileSet, packages map[string]*types.Pa
// for a package that is in the process of being imported.
var importing types.Package
// Import(path) is a shortcut for ImportFrom(path, "", 0).
// Import(path) is a shortcut for ImportFrom(path, ".", 0).
func (p *Importer) Import(path string) (*types.Package, error) {
return p.ImportFrom(path, "", 0)
return p.ImportFrom(path, ".", 0) // use "." rather than "" (see issue #24441)
}
// ImportFrom imports the package with the given import path resolved from the given srcDir,
......@@ -60,23 +60,10 @@ func (p *Importer) ImportFrom(path, srcDir string, mode types.ImportMode) (*type
panic("non-zero import mode")
}
// determine package path (do vendor resolution)
var bp *build.Package
var err error
switch {
default:
if abs, err := p.absPath(srcDir); err == nil { // see issue #14282
srcDir = abs
}
bp, err = p.ctxt.Import(path, srcDir, build.FindOnly)
case build.IsLocalImport(path):
// "./x" -> "srcDir/x"
bp, err = p.ctxt.ImportDir(filepath.Join(srcDir, path), build.FindOnly)
case p.isAbsPath(path):
return nil, fmt.Errorf("invalid absolute import path %q", path)
if abs, err := p.absPath(srcDir); err == nil { // see issue #14282
srcDir = abs
}
bp, err := p.ctxt.Import(path, srcDir, 0)
if err != nil {
return nil, err // err may be *build.NoGoError - return as is
}
......@@ -113,11 +100,6 @@ func (p *Importer) ImportFrom(path, srcDir string, mode types.ImportMode) (*type
}
}()
// collect package files
bp, err = p.ctxt.ImportDir(bp.Dir, 0)
if err != nil {
return nil, err // err may be *build.NoGoError - return as is
}
var filenames []string
filenames = append(filenames, bp.GoFiles...)
filenames = append(filenames, bp.CgoFiles...)
......
......@@ -10,6 +10,7 @@ import (
"go/types"
"internal/testenv"
"io/ioutil"
"path"
"path/filepath"
"runtime"
"strings"
......@@ -162,3 +163,34 @@ func TestIssue20855(t *testing.T) {
t.Error("got no package despite no hard errors")
}
}
func testImportPath(t *testing.T, pkgPath string) {
if !testenv.HasSrc() {
t.Skip("no source code available")
}
pkgName := path.Base(pkgPath)
pkg, err := importer.Import(pkgPath)
if err != nil {
t.Fatal(err)
}
if pkg.Name() != pkgName {
t.Errorf("got %q; want %q", pkg.Name(), pkgName)
}
if pkg.Path() != pkgPath {
t.Errorf("got %q; want %q", pkg.Path(), pkgPath)
}
}
// TestIssue23092 tests relative imports.
func TestIssue23092(t *testing.T) {
testImportPath(t, "./testdata/issue23092")
}
// TestIssue24392 tests imports against a path containing 'testdata'.
func TestIssue24392(t *testing.T) {
testImportPath(t, "go/internal/srcimporter/testdata/issue24392")
}
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package issue23092
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package issue24392
......@@ -103,11 +103,21 @@ func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) {
g.mu.Unlock()
}
// Forget tells the singleflight to forget about a key. Future calls
// to Do for this key will call the function rather than waiting for
// an earlier call to complete.
func (g *Group) Forget(key string) {
// ForgetUnshared tells the singleflight to forget about a key if it is not
// shared with any other goroutines. Future calls to Do for a forgotten key
// will call the function rather than waiting for an earlier call to complete.
// Returns whether the key was forgotten or unknown--that is, whether no
// other goroutines are waiting for the result.
func (g *Group) ForgetUnshared(key string) bool {
g.mu.Lock()
delete(g.m, key)
g.mu.Unlock()
defer g.mu.Unlock()
c, ok := g.m[key]
if !ok {
return true
}
if c.dups == 0 {
delete(g.m, key)
return true
}
return false
}
......@@ -80,6 +80,7 @@ func init() {
// command line, with arguments separated by NUL bytes.
// The package initialization registers it as /debug/pprof/cmdline.
func Cmdline(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Content-Type-Options", "nosniff")
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
fmt.Fprintf(w, strings.Join(os.Args, "\x00"))
}
......@@ -100,33 +101,36 @@ func durationExceedsWriteTimeout(r *http.Request, seconds float64) bool {
return ok && srv.WriteTimeout != 0 && seconds >= srv.WriteTimeout.Seconds()
}
func serveError(w http.ResponseWriter, status int, txt string) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Header().Set("X-Go-Pprof", "1")
w.Header().Del("Content-Disposition")
w.WriteHeader(status)
fmt.Fprintln(w, txt)
}
// Profile responds with the pprof-formatted cpu profile.
// The package initialization registers it as /debug/pprof/profile.
func Profile(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Content-Type-Options", "nosniff")
sec, _ := strconv.ParseInt(r.FormValue("seconds"), 10, 64)
if sec == 0 {
sec = 30
}
if durationExceedsWriteTimeout(r, float64(sec)) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Header().Set("X-Go-Pprof", "1")
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintln(w, "profile duration exceeds server's WriteTimeout")
serveError(w, http.StatusBadRequest, "profile duration exceeds server's WriteTimeout")
return
}
// Set Content Type assuming StartCPUProfile will work,
// because if it does it starts writing.
w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Set("Content-Disposition", `attachment; filename="profile"`)
if err := pprof.StartCPUProfile(w); err != nil {
// StartCPUProfile failed, so no writes yet.
// Can change header back to text content
// and send error code.
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Header().Set("X-Go-Pprof", "1")
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Could not enable CPU profiling: %s\n", err)
serveError(w, http.StatusInternalServerError,
fmt.Sprintf("Could not enable CPU profiling: %s", err))
return
}
sleep(w, time.Duration(sec)*time.Second)
......@@ -137,29 +141,25 @@ func Profile(w http.ResponseWriter, r *http.Request) {
// Tracing lasts for duration specified in seconds GET parameter, or for 1 second if not specified.
// The package initialization registers it as /debug/pprof/trace.
func Trace(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Content-Type-Options", "nosniff")
sec, err := strconv.ParseFloat(r.FormValue("seconds"), 64)
if sec <= 0 || err != nil {
sec = 1
}
if durationExceedsWriteTimeout(r, sec) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Header().Set("X-Go-Pprof", "1")
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintln(w, "profile duration exceeds server's WriteTimeout")
serveError(w, http.StatusBadRequest, "profile duration exceeds server's WriteTimeout")
return
}
// Set Content Type assuming trace.Start will work,
// because if it does it starts writing.
w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Set("Content-Disposition", `attachment; filename="trace"`)
if err := trace.Start(w); err != nil {
// trace.Start failed, so no writes yet.
// Can change header back to text content and send error code.
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Header().Set("X-Go-Pprof", "1")
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Could not enable tracing: %s\n", err)
serveError(w, http.StatusInternalServerError,
fmt.Sprintf("Could not enable tracing: %s", err))
return
}
sleep(w, time.Duration(sec*float64(time.Second)))
......@@ -170,6 +170,7 @@ func Trace(w http.ResponseWriter, r *http.Request) {
// responding with a table mapping program counters to function names.
// The package initialization registers it as /debug/pprof/symbol.
func Symbol(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Content-Type-Options", "nosniff")
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
// We have to read the whole POST body before
......@@ -222,18 +223,23 @@ func Handler(name string) http.Handler {
type handler string
func (name handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
debug, _ := strconv.Atoi(r.FormValue("debug"))
w.Header().Set("X-Content-Type-Options", "nosniff")
p := pprof.Lookup(string(name))
if p == nil {
w.WriteHeader(404)
fmt.Fprintf(w, "Unknown profile: %s\n", name)
serveError(w, http.StatusNotFound, "Unknown profile")
return
}
gc, _ := strconv.Atoi(r.FormValue("gc"))
if name == "heap" && gc > 0 {
runtime.GC()
}
debug, _ := strconv.Atoi(r.FormValue("debug"))
if debug != 0 {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
} else {
w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, name))
}
p.WriteTo(w, debug)
}
......
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pprof
import (
"bytes"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
)
func TestHandlers(t *testing.T) {
testCases := []struct {
path string
handler http.HandlerFunc
statusCode int
contentType string
contentDisposition string
resp []byte
}{
{"/debug/pprof/<script>scripty<script>", Index, http.StatusNotFound, "text/plain; charset=utf-8", "", []byte("Unknown profile\n")},
{"/debug/pprof/heap", Index, http.StatusOK, "application/octet-stream", `attachment; filename="heap"`, nil},
{"/debug/pprof/heap?debug=1", Index, http.StatusOK, "text/plain; charset=utf-8", "", nil},
{"/debug/pprof/cmdline", Cmdline, http.StatusOK, "text/plain; charset=utf-8", "", nil},
{"/debug/pprof/profile?seconds=1", Profile, http.StatusOK, "application/octet-stream", `attachment; filename="profile"`, nil},
{"/debug/pprof/symbol", Symbol, http.StatusOK, "text/plain; charset=utf-8", "", nil},
{"/debug/pprof/trace", Trace, http.StatusOK, "application/octet-stream", `attachment; filename="trace"`, nil},
}
for _, tc := range testCases {
t.Run(tc.path, func(t *testing.T) {
req := httptest.NewRequest("GET", "http://example.com"+tc.path, nil)
w := httptest.NewRecorder()
tc.handler(w, req)
resp := w.Result()
if got, want := resp.StatusCode, tc.statusCode; got != want {
t.Errorf("status code: got %d; want %d", got, want)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Errorf("when reading response body, expected non-nil err; got %v", err)
}
if got, want := resp.Header.Get("X-Content-Type-Options"), "nosniff"; got != want {
t.Errorf("X-Content-Type-Options: got %q; want %q", got, want)
}
if got, want := resp.Header.Get("Content-Type"), tc.contentType; got != want {
t.Errorf("Content-Type: got %q; want %q", got, want)
}
if got, want := resp.Header.Get("Content-Disposition"), tc.contentDisposition; got != want {
t.Errorf("Content-Disposition: got %q; want %q", got, want)
}
if resp.StatusCode == http.StatusOK {
return
}
if got, want := resp.Header.Get("X-Go-Pprof"), "1"; got != want {
t.Errorf("X-Go-Pprof: got %q; want %q", got, want)
}
if !bytes.Equal(body, tc.resp) {
t.Errorf("response: got %q; want %q", body, tc.resp)
}
})
}
}
......@@ -194,10 +194,16 @@ func (r *Resolver) LookupIPAddr(ctx context.Context, host string) ([]IPAddr, err
resolverFunc = alt
}
// We don't want a cancelation of ctx to affect the
// lookupGroup operation. Otherwise if our context gets
// canceled it might cause an error to be returned to a lookup
// using a completely different context.
lookupGroupCtx, lookupGroupCancel := context.WithCancel(context.Background())
dnsWaitGroup.Add(1)
ch, called := lookupGroup.DoChan(host, func() (interface{}, error) {
defer dnsWaitGroup.Done()
return testHookLookupIP(ctx, resolverFunc, host)
return testHookLookupIP(lookupGroupCtx, resolverFunc, host)
})
if !called {
dnsWaitGroup.Done()
......@@ -205,20 +211,28 @@ func (r *Resolver) LookupIPAddr(ctx context.Context, host string) ([]IPAddr, err
select {
case <-ctx.Done():
// If the DNS lookup timed out for some reason, force
// future requests to start the DNS lookup again
// rather than waiting for the current lookup to
// complete. See issue 8602.
ctxErr := ctx.Err()
if ctxErr == context.DeadlineExceeded {
lookupGroup.Forget(host)
// Our context was canceled. If we are the only
// goroutine looking up this key, then drop the key
// from the lookupGroup and cancel the lookup.
// If there are other goroutines looking up this key,
// let the lookup continue uncanceled, and let later
// lookups with the same key share the result.
// See issues 8602, 20703, 22724.
if lookupGroup.ForgetUnshared(host) {
lookupGroupCancel()
} else {
go func() {
<-ch
lookupGroupCancel()
}()
}
err := mapErr(ctxErr)
err := mapErr(ctx.Err())
if trace != nil && trace.DNSDone != nil {
trace.DNSDone(nil, false, err)
}
return nil, err
case r := <-ch:
lookupGroupCancel()
if trace != nil && trace.DNSDone != nil {
addrs, _ := r.Val.([]IPAddr)
trace.DNSDone(ipAddrsEface(addrs), r.Shared, r.Err)
......
......@@ -791,3 +791,28 @@ func TestLookupNonLDH(t *testing.T) {
t.Fatalf("lookup error = %v, want %v", err, errNoSuchHost)
}
}
func TestLookupContextCancel(t *testing.T) {
if testenv.Builder() == "" {
testenv.MustHaveExternalNetwork(t)
}
if runtime.GOOS == "nacl" {
t.Skip("skip on nacl")
}
defer dnsWaitGroup.Wait()
ctx, ctxCancel := context.WithCancel(context.Background())
ctxCancel()
_, err := DefaultResolver.LookupIPAddr(ctx, "google.com")
if err != errCanceled {
testenv.SkipFlakyNet(t)
t.Fatal(err)
}
ctx = context.Background()
_, err = DefaultResolver.LookupIPAddr(ctx, "google.com")
if err != nil {
testenv.SkipFlakyNet(t)
t.Fatal(err)
}
}
......@@ -87,6 +87,7 @@ func TestTCPSpuriousConnSetupCompletionWithCancel(t *testing.T) {
if testenv.Builder() == "" {
testenv.MustHaveExternalNetwork(t)
}
defer dnsWaitGroup.Wait()
t.Parallel()
const tries = 10000
var wg sync.WaitGroup
......
......@@ -139,14 +139,12 @@ func typestring(x interface{}) string {
}
// printany prints an argument passed to panic.
// If panic is called with a value that has a String or Error method,
// it has already been converted into a string by preprintpanics.
func printany(i interface{}) {
switch v := i.(type) {
case nil:
print("nil")
case stringer:
print(v.String())
case error:
print(v.Error())
case bool:
print(v)
case int:
......
......@@ -384,7 +384,6 @@ func Goexit() {
// Call all Error and String methods before freezing the world.
// Used when crashing with panicking.
// This must match types handled by printany.
func preprintpanics(p *_panic) {
defer func() {
if recover() != nil {
......@@ -410,8 +409,6 @@ func printpanics(p *_panic) {
print("\t")
}
print("panic: ")
// Because of preprintpanics, p.arg cannot be an error or
// stringer, so this won't call into user code.
printany(p.arg)
if p.recovered {
print(" [recovered]")
......
......@@ -423,6 +423,12 @@ func releaseSudog(s *sudog) {
// funcPC returns the entry PC of the function f.
// It assumes that f is a func value. Otherwise the behavior is undefined.
// CAREFUL: In programs with plugins, funcPC can return different values
// for the same function (because there are actually multiple copies of
// the same function in the address space). To be safe, don't use the
// results of this function in any == expression. It is only safe to
// use the result as an address at which to start executing code.
//
// For gccgo note that this differs from the gc implementation; the gc
// implementation adds sys.PtrSize to the address of the interface
// value, but GCC's alias analysis decides that that can not be a
......
......@@ -115,6 +115,35 @@ type Func struct {
entry uintptr
}
// A FuncID identifies particular functions that need to be treated
// specially by the runtime.
// Note that in some situations involving plugins, there may be multiple
// copies of a particular special runtime function.
// Note: this list must match the list in cmd/internal/objabi/funcid.go.
type funcID uint32
const (
funcID_normal funcID = iota // not a special function
funcID_goexit
funcID_jmpdefer
funcID_mcall
funcID_morestack
funcID_mstart
funcID_rt0_go
funcID_asmcgocall
funcID_sigpanic
funcID_runfinq
funcID_bgsweep
funcID_forcegchelper
funcID_timerproc
funcID_gcBgMarkWorker
funcID_systemstack_switch
funcID_systemstack
funcID_cgocallback_gofunc
funcID_gogo
funcID_externalthreadhandler
)
// FuncForPC returns a *Func describing the function that contains the
// given program counter address, or else nil.
//
......
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import "plugin"
func main() {
p, err := plugin.Open("issue24351.so")
if err != nil {
panic(err)
}
f, err := p.Lookup("B")
if err != nil {
panic(err)
}
c := make(chan bool)
f.(func(chan bool))(c)
<-c
}
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import "fmt"
func B(c chan bool) {
go func() {
fmt.Println(1.5)
c <- true
}()
}
......@@ -85,3 +85,8 @@ GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -o issue22175 src/issue22175/main.
GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin -o issue.22295.so issue22295.pkg
GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -o issue22295 src/issue22295.pkg/main.go
./issue22295
# Test for issue 24351
GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin -o issue24351.so src/issue24351/plugin.go
GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -o issue24351 src/issue24351/main.go
./issue24351
......@@ -790,6 +790,7 @@ func TestRebuilding(t *testing.T) {
// If the .a file is newer than the .so, the .so is rebuilt (but not the .a)
t.Run("newarchive", func(t *testing.T) {
resetFileStamps()
AssertNotRebuilt(t, "new .a file before build", filepath.Join(gopathInstallDir, "depBase.a"))
goCmd(t, "list", "-linkshared", "-f={{.ImportPath}} {{.Stale}} {{.StaleReason}} {{.Target}}", "depBase")
AssertNotRebuilt(t, "new .a file before build", filepath.Join(gopathInstallDir, "depBase.a"))
cleanup := touch(t, filepath.Join(gopathInstallDir, "depBase.a"))
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment