Commit af92e385 by Ian Lance Taylor

libgo: Update to weekly.2012-01-20.

From-SVN: r183540
parent df1304ee
354b17404643 9f2be4fbbf69
The first line of this file holds the Mercurial revision number of the The first line of this file holds the Mercurial revision number of the
last merge done from the master library sources. last merge done from the master library sources.
...@@ -197,6 +197,16 @@ toolexeclibgocryptox509dir = $(toolexeclibgocryptodir)/x509 ...@@ -197,6 +197,16 @@ toolexeclibgocryptox509dir = $(toolexeclibgocryptodir)/x509
toolexeclibgocryptox509_DATA = \ toolexeclibgocryptox509_DATA = \
crypto/x509/pkix.gox crypto/x509/pkix.gox
toolexeclibgodatabasedir = $(toolexeclibgodir)/database
toolexeclibgodatabase_DATA = \
database/sql.gox
toolexeclibgodatabasesqldir = $(toolexeclibgodatabasedir)/sql
toolexeclibgodatabasesql_DATA = \
database/sql/driver.gox
toolexeclibgodebugdir = $(toolexeclibgodir)/debug toolexeclibgodebugdir = $(toolexeclibgodir)/debug
toolexeclibgodebug_DATA = \ toolexeclibgodebug_DATA = \
...@@ -237,15 +247,10 @@ toolexeclibgoexp_DATA = \ ...@@ -237,15 +247,10 @@ toolexeclibgoexp_DATA = \
exp/norm.gox \ exp/norm.gox \
exp/proxy.gox \ exp/proxy.gox \
exp/spdy.gox \ exp/spdy.gox \
exp/sql.gox \
exp/ssh.gox \ exp/ssh.gox \
exp/terminal.gox \ exp/terminal.gox \
exp/types.gox exp/types.gox \
exp/utf8string.gox
toolexeclibgoexpsqldir = $(toolexeclibgoexpdir)/sql
toolexeclibgoexpsql_DATA = \
exp/sql/driver.gox
toolexeclibgogodir = $(toolexeclibgodir)/go toolexeclibgogodir = $(toolexeclibgodir)/go
...@@ -717,6 +722,7 @@ go_net_files = \ ...@@ -717,6 +722,7 @@ go_net_files = \
go/net/dnsclient_unix.go \ go/net/dnsclient_unix.go \
go/net/dnsconfig.go \ go/net/dnsconfig.go \
go/net/dnsmsg.go \ go/net/dnsmsg.go \
go/net/doc.go \
$(go_net_newpollserver_file) \ $(go_net_newpollserver_file) \
go/net/fd.go \ go/net/fd.go \
$(go_net_fd_os_file) \ $(go_net_fd_os_file) \
...@@ -793,6 +799,7 @@ endif ...@@ -793,6 +799,7 @@ endif
go_os_files = \ go_os_files = \
$(go_os_dir_file) \ $(go_os_dir_file) \
go/os/dir.go \ go/os/dir.go \
go/os/doc.go \
go/os/env.go \ go/os/env.go \
go/os/error.go \ go/os/error.go \
go/os/error_posix.go \ go/os/error_posix.go \
...@@ -1005,7 +1012,8 @@ go_crypto_dsa_files = \ ...@@ -1005,7 +1012,8 @@ go_crypto_dsa_files = \
go_crypto_ecdsa_files = \ go_crypto_ecdsa_files = \
go/crypto/ecdsa/ecdsa.go go/crypto/ecdsa/ecdsa.go
go_crypto_elliptic_files = \ go_crypto_elliptic_files = \
go/crypto/elliptic/elliptic.go go/crypto/elliptic/elliptic.go \
go/crypto/elliptic/p224.go
go_crypto_hmac_files = \ go_crypto_hmac_files = \
go/crypto/hmac/hmac.go go/crypto/hmac/hmac.go
go_crypto_md4_files = \ go_crypto_md4_files = \
...@@ -1094,6 +1102,14 @@ go_crypto_openpgp_s2k_files = \ ...@@ -1094,6 +1102,14 @@ go_crypto_openpgp_s2k_files = \
go_crypto_x509_pkix_files = \ go_crypto_x509_pkix_files = \
go/crypto/x509/pkix/pkix.go go/crypto/x509/pkix/pkix.go
go_database_sql_files = \
go/database/sql/convert.go \
go/database/sql/sql.go
go_database_sql_driver_files = \
go/database/sql/driver/driver.go \
go/database/sql/driver/types.go
go_debug_dwarf_files = \ go_debug_dwarf_files = \
go/debug/dwarf/buf.go \ go/debug/dwarf/buf.go \
go/debug/dwarf/const.go \ go/debug/dwarf/const.go \
...@@ -1179,9 +1195,6 @@ go_exp_spdy_files = \ ...@@ -1179,9 +1195,6 @@ go_exp_spdy_files = \
go/exp/spdy/read.go \ go/exp/spdy/read.go \
go/exp/spdy/types.go \ go/exp/spdy/types.go \
go/exp/spdy/write.go go/exp/spdy/write.go
go_exp_sql_files = \
go/exp/sql/convert.go \
go/exp/sql/sql.go
go_exp_ssh_files = \ go_exp_ssh_files = \
go/exp/ssh/channel.go \ go/exp/ssh/channel.go \
go/exp/ssh/cipher.go \ go/exp/ssh/cipher.go \
...@@ -1205,10 +1218,8 @@ go_exp_types_files = \ ...@@ -1205,10 +1218,8 @@ go_exp_types_files = \
go/exp/types/gcimporter.go \ go/exp/types/gcimporter.go \
go/exp/types/types.go \ go/exp/types/types.go \
go/exp/types/universe.go go/exp/types/universe.go
go_exp_utf8string_files = \
go_exp_sql_driver_files = \ go/exp/utf8string/string.go
go/exp/sql/driver/driver.go \
go/exp/sql/driver/types.go
go_go_ast_files = \ go_go_ast_files = \
go/go/ast/ast.go \ go/go/ast/ast.go \
...@@ -1467,7 +1478,6 @@ go_text_scanner_files = \ ...@@ -1467,7 +1478,6 @@ go_text_scanner_files = \
go_unicode_utf16_files = \ go_unicode_utf16_files = \
go/unicode/utf16/utf16.go go/unicode/utf16/utf16.go
go_unicode_utf8_files = \ go_unicode_utf8_files = \
go/unicode/utf8/string.go \
go/unicode/utf8/utf8.go go/unicode/utf8/utf8.go
# Define Syscall and Syscall6. # Define Syscall and Syscall6.
...@@ -1751,6 +1761,8 @@ libgo_go_objs = \ ...@@ -1751,6 +1761,8 @@ libgo_go_objs = \
crypto/openpgp/packet.lo \ crypto/openpgp/packet.lo \
crypto/openpgp/s2k.lo \ crypto/openpgp/s2k.lo \
crypto/x509/pkix.lo \ crypto/x509/pkix.lo \
database/sql.lo \
database/sql/driver.lo \
debug/dwarf.lo \ debug/dwarf.lo \
debug/elf.lo \ debug/elf.lo \
debug/gosym.lo \ debug/gosym.lo \
...@@ -1772,11 +1784,10 @@ libgo_go_objs = \ ...@@ -1772,11 +1784,10 @@ libgo_go_objs = \
exp/norm.lo \ exp/norm.lo \
exp/proxy.lo \ exp/proxy.lo \
exp/spdy.lo \ exp/spdy.lo \
exp/sql.lo \
exp/ssh.lo \ exp/ssh.lo \
exp/terminal.lo \ exp/terminal.lo \
exp/types.lo \ exp/types.lo \
exp/sql/driver.lo \ exp/utf8string.lo \
html/template.lo \ html/template.lo \
go/ast.lo \ go/ast.lo \
go/build.lo \ go/build.lo \
...@@ -2646,6 +2657,26 @@ crypto/x509/pkix/check: $(CHECK_DEPS) ...@@ -2646,6 +2657,26 @@ crypto/x509/pkix/check: $(CHECK_DEPS)
@$(CHECK) @$(CHECK)
.PHONY: crypto/x509/pkix/check .PHONY: crypto/x509/pkix/check
@go_include@ database/sql.lo.dep
database/sql.lo.dep: $(go_database_sql_files)
$(BUILDDEPS)
database/sql.lo: $(go_database_sql_files)
$(BUILDPACKAGE)
database/sql/check: $(CHECK_DEPS)
@$(MKDIR_P) database/sql
@$(CHECK)
.PHONY: database/sql/check
@go_include@ database/sql/driver.lo.dep
database/sql/driver.lo.dep: $(go_database_sql_driver_files)
$(BUILDDEPS)
database/sql/driver.lo: $(go_database_sql_driver_files)
$(BUILDPACKAGE)
database/sql/driver/check: $(CHECK_DEPS)
@$(MKDIR_P) database/sql/driver
@$(CHECK)
.PHONY: database/sql/driver/check
@go_include@ debug/dwarf.lo.dep @go_include@ debug/dwarf.lo.dep
debug/dwarf.lo.dep: $(go_debug_dwarf_files) debug/dwarf.lo.dep: $(go_debug_dwarf_files)
$(BUILDDEPS) $(BUILDDEPS)
...@@ -2856,16 +2887,6 @@ exp/spdy/check: $(CHECK_DEPS) ...@@ -2856,16 +2887,6 @@ exp/spdy/check: $(CHECK_DEPS)
@$(CHECK) @$(CHECK)
.PHONY: exp/spdy/check .PHONY: exp/spdy/check
@go_include@ exp/sql.lo.dep
exp/sql.lo.dep: $(go_exp_sql_files)
$(BUILDDEPS)
exp/sql.lo: $(go_exp_sql_files)
$(BUILDPACKAGE)
exp/sql/check: $(CHECK_DEPS)
@$(MKDIR_P) exp/sql
@$(CHECK)
.PHONY: exp/sql/check
@go_include@ exp/ssh.lo.dep @go_include@ exp/ssh.lo.dep
exp/ssh.lo.dep: $(go_exp_ssh_files) exp/ssh.lo.dep: $(go_exp_ssh_files)
$(BUILDDEPS) $(BUILDDEPS)
...@@ -2896,6 +2917,16 @@ exp/types/check: $(CHECK_DEPS) ...@@ -2896,6 +2917,16 @@ exp/types/check: $(CHECK_DEPS)
@$(CHECK) @$(CHECK)
.PHONY: exp/types/check .PHONY: exp/types/check
@go_include@ exp/utf8string.lo.dep
exp/utf8string.lo.dep: $(go_exp_utf8string_files)
$(BUILDDEPS)
exp/utf8string.lo: $(go_exp_utf8string_files)
$(BUILDPACKAGE)
exp/utf8string/check: $(CHECK_DEPS)
@$(MKDIR_P) exp/utf8string
@$(CHECK)
.PHONY: exp/utf8string/check
@go_include@ exp/inotify.lo.dep @go_include@ exp/inotify.lo.dep
exp/inotify.lo.dep: $(go_exp_inotify_files) exp/inotify.lo.dep: $(go_exp_inotify_files)
$(BUILDDEPS) $(BUILDDEPS)
...@@ -2906,16 +2937,6 @@ exp/inotify/check: $(CHECK_DEPS) ...@@ -2906,16 +2937,6 @@ exp/inotify/check: $(CHECK_DEPS)
@$(CHECK) @$(CHECK)
.PHONY: exp/inotify/check .PHONY: exp/inotify/check
@go_include@ exp/sql/driver.lo.dep
exp/sql/driver.lo.dep: $(go_exp_sql_driver_files)
$(BUILDDEPS)
exp/sql/driver.lo: $(go_exp_sql_driver_files)
$(BUILDPACKAGE)
exp/sql/driver/check: $(CHECK_DEPS)
@$(MKDIR_P) exp/sql/driver
@$(CHECK)
.PHONY: exp/sql/driver/check
@go_include@ html/template.lo.dep @go_include@ html/template.lo.dep
html/template.lo.dep: $(go_html_template_files) html/template.lo.dep: $(go_html_template_files)
$(BUILDDEPS) $(BUILDDEPS)
...@@ -3670,6 +3691,12 @@ crypto/openpgp/s2k.gox: crypto/openpgp/s2k.lo ...@@ -3670,6 +3691,12 @@ crypto/openpgp/s2k.gox: crypto/openpgp/s2k.lo
crypto/x509/pkix.gox: crypto/x509/pkix.lo crypto/x509/pkix.gox: crypto/x509/pkix.lo
$(BUILDGOX) $(BUILDGOX)
database/sql.gox: database/sql.lo
$(BUILDGOX)
database/sql/driver.gox: database/sql/driver.lo
$(BUILDGOX)
debug/dwarf.gox: debug/dwarf.lo debug/dwarf.gox: debug/dwarf.lo
$(BUILDGOX) $(BUILDGOX)
debug/elf.gox: debug/elf.lo debug/elf.gox: debug/elf.lo
...@@ -3716,16 +3743,13 @@ exp/proxy.gox: exp/proxy.lo ...@@ -3716,16 +3743,13 @@ exp/proxy.gox: exp/proxy.lo
$(BUILDGOX) $(BUILDGOX)
exp/spdy.gox: exp/spdy.lo exp/spdy.gox: exp/spdy.lo
$(BUILDGOX) $(BUILDGOX)
exp/sql.gox: exp/sql.lo
$(BUILDGOX)
exp/ssh.gox: exp/ssh.lo exp/ssh.gox: exp/ssh.lo
$(BUILDGOX) $(BUILDGOX)
exp/terminal.gox: exp/terminal.lo exp/terminal.gox: exp/terminal.lo
$(BUILDGOX) $(BUILDGOX)
exp/types.gox: exp/types.lo exp/types.gox: exp/types.lo
$(BUILDGOX) $(BUILDGOX)
exp/utf8string.gox: exp/utf8string.lo
exp/sql/driver.gox: exp/sql/driver.lo
$(BUILDGOX) $(BUILDGOX)
html/template.gox: html/template.lo html/template.gox: html/template.lo
...@@ -3941,6 +3965,8 @@ TEST_PACKAGES = \ ...@@ -3941,6 +3965,8 @@ TEST_PACKAGES = \
crypto/openpgp/elgamal/check \ crypto/openpgp/elgamal/check \
crypto/openpgp/packet/check \ crypto/openpgp/packet/check \
crypto/openpgp/s2k/check \ crypto/openpgp/s2k/check \
database/sql/check \
database/sql/driver/check \
debug/dwarf/check \ debug/dwarf/check \
debug/elf/check \ debug/elf/check \
debug/macho/check \ debug/macho/check \
...@@ -3962,9 +3988,9 @@ TEST_PACKAGES = \ ...@@ -3962,9 +3988,9 @@ TEST_PACKAGES = \
exp/norm/check \ exp/norm/check \
exp/proxy/check \ exp/proxy/check \
exp/spdy/check \ exp/spdy/check \
exp/sql/check \
exp/ssh/check \ exp/ssh/check \
exp/terminal/check \ exp/terminal/check \
exp/utf8string/check \
html/template/check \ html/template/check \
go/ast/check \ go/ast/check \
$(go_build_check_omitted_since_it_calls_6g) \ $(go_build_check_omitted_since_it_calls_6g) \
......
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bytes_test package bytes_test
import ( import (
......
...@@ -3,7 +3,13 @@ ...@@ -3,7 +3,13 @@
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// Package heap provides heap operations for any type that implements // Package heap provides heap operations for any type that implements
// heap.Interface. // heap.Interface. A heap is a tree with the property that each node is the
// highest-valued node in its subtree.
//
// A heap is a common way to impement a priority queue. To build a priority
// queue, implement the Heap interface with the (negative) priority as the
// ordering for the Less method, so Push adds items while Pop removes the
// highest-priority item from the queue.
// //
package heap package heap
......
...@@ -20,7 +20,7 @@ import ( ...@@ -20,7 +20,7 @@ import (
// PublicKey represents an ECDSA public key. // PublicKey represents an ECDSA public key.
type PublicKey struct { type PublicKey struct {
*elliptic.Curve elliptic.Curve
X, Y *big.Int X, Y *big.Int
} }
...@@ -34,22 +34,23 @@ var one = new(big.Int).SetInt64(1) ...@@ -34,22 +34,23 @@ var one = new(big.Int).SetInt64(1)
// randFieldElement returns a random element of the field underlying the given // randFieldElement returns a random element of the field underlying the given
// curve using the procedure given in [NSA] A.2.1. // curve using the procedure given in [NSA] A.2.1.
func randFieldElement(c *elliptic.Curve, rand io.Reader) (k *big.Int, err error) { func randFieldElement(c elliptic.Curve, rand io.Reader) (k *big.Int, err error) {
b := make([]byte, c.BitSize/8+8) params := c.Params()
b := make([]byte, params.BitSize/8+8)
_, err = io.ReadFull(rand, b) _, err = io.ReadFull(rand, b)
if err != nil { if err != nil {
return return
} }
k = new(big.Int).SetBytes(b) k = new(big.Int).SetBytes(b)
n := new(big.Int).Sub(c.N, one) n := new(big.Int).Sub(params.N, one)
k.Mod(k, n) k.Mod(k, n)
k.Add(k, one) k.Add(k, one)
return return
} }
// GenerateKey generates a public&private key pair. // GenerateKey generates a public&private key pair.
func GenerateKey(c *elliptic.Curve, rand io.Reader) (priv *PrivateKey, err error) { func GenerateKey(c elliptic.Curve, rand io.Reader) (priv *PrivateKey, err error) {
k, err := randFieldElement(c, rand) k, err := randFieldElement(c, rand)
if err != nil { if err != nil {
return return
...@@ -66,8 +67,8 @@ func GenerateKey(c *elliptic.Curve, rand io.Reader) (priv *PrivateKey, err error ...@@ -66,8 +67,8 @@ func GenerateKey(c *elliptic.Curve, rand io.Reader) (priv *PrivateKey, err error
// about how this is done. [NSA] suggests that this is done in the obvious // about how this is done. [NSA] suggests that this is done in the obvious
// manner, but [SECG] truncates the hash to the bit-length of the curve order // manner, but [SECG] truncates the hash to the bit-length of the curve order
// first. We follow [SECG] because that's what OpenSSL does. // first. We follow [SECG] because that's what OpenSSL does.
func hashToInt(hash []byte, c *elliptic.Curve) *big.Int { func hashToInt(hash []byte, c elliptic.Curve) *big.Int {
orderBits := c.N.BitLen() orderBits := c.Params().N.BitLen()
orderBytes := (orderBits + 7) / 8 orderBytes := (orderBits + 7) / 8
if len(hash) > orderBytes { if len(hash) > orderBytes {
hash = hash[:orderBytes] hash = hash[:orderBytes]
...@@ -88,6 +89,7 @@ func hashToInt(hash []byte, c *elliptic.Curve) *big.Int { ...@@ -88,6 +89,7 @@ func hashToInt(hash []byte, c *elliptic.Curve) *big.Int {
func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err error) { func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err error) {
// See [NSA] 3.4.1 // See [NSA] 3.4.1
c := priv.PublicKey.Curve c := priv.PublicKey.Curve
N := c.Params().N
var k, kInv *big.Int var k, kInv *big.Int
for { for {
...@@ -98,9 +100,9 @@ func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err err ...@@ -98,9 +100,9 @@ func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err err
return return
} }
kInv = new(big.Int).ModInverse(k, c.N) kInv = new(big.Int).ModInverse(k, N)
r, _ = priv.Curve.ScalarBaseMult(k.Bytes()) r, _ = priv.Curve.ScalarBaseMult(k.Bytes())
r.Mod(r, priv.Curve.N) r.Mod(r, N)
if r.Sign() != 0 { if r.Sign() != 0 {
break break
} }
...@@ -110,7 +112,7 @@ func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err err ...@@ -110,7 +112,7 @@ func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err err
s = new(big.Int).Mul(priv.D, r) s = new(big.Int).Mul(priv.D, r)
s.Add(s, e) s.Add(s, e)
s.Mul(s, kInv) s.Mul(s, kInv)
s.Mod(s, priv.PublicKey.Curve.N) s.Mod(s, N)
if s.Sign() != 0 { if s.Sign() != 0 {
break break
} }
...@@ -124,15 +126,16 @@ func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err err ...@@ -124,15 +126,16 @@ func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err err
func Verify(pub *PublicKey, hash []byte, r, s *big.Int) bool { func Verify(pub *PublicKey, hash []byte, r, s *big.Int) bool {
// See [NSA] 3.4.2 // See [NSA] 3.4.2
c := pub.Curve c := pub.Curve
N := c.Params().N
if r.Sign() == 0 || s.Sign() == 0 { if r.Sign() == 0 || s.Sign() == 0 {
return false return false
} }
if r.Cmp(c.N) >= 0 || s.Cmp(c.N) >= 0 { if r.Cmp(N) >= 0 || s.Cmp(N) >= 0 {
return false return false
} }
e := hashToInt(hash, c) e := hashToInt(hash, c)
w := new(big.Int).ModInverse(s, c.N) w := new(big.Int).ModInverse(s, N)
u1 := e.Mul(e, w) u1 := e.Mul(e, w)
u2 := w.Mul(r, w) u2 := w.Mul(r, w)
...@@ -143,6 +146,6 @@ func Verify(pub *PublicKey, hash []byte, r, s *big.Int) bool { ...@@ -143,6 +146,6 @@ func Verify(pub *PublicKey, hash []byte, r, s *big.Int) bool {
return false return false
} }
x, _ := c.Add(x1, y1, x2, y2) x, _ := c.Add(x1, y1, x2, y2)
x.Mod(x, c.N) x.Mod(x, N)
return x.Cmp(r) == 0 return x.Cmp(r) == 0
} }
...@@ -13,7 +13,7 @@ import ( ...@@ -13,7 +13,7 @@ import (
"testing" "testing"
) )
func testKeyGeneration(t *testing.T, c *elliptic.Curve, tag string) { func testKeyGeneration(t *testing.T, c elliptic.Curve, tag string) {
priv, err := GenerateKey(c, rand.Reader) priv, err := GenerateKey(c, rand.Reader)
if err != nil { if err != nil {
t.Errorf("%s: error: %s", tag, err) t.Errorf("%s: error: %s", tag, err)
...@@ -34,7 +34,7 @@ func TestKeyGeneration(t *testing.T) { ...@@ -34,7 +34,7 @@ func TestKeyGeneration(t *testing.T) {
testKeyGeneration(t, elliptic.P521(), "p521") testKeyGeneration(t, elliptic.P521(), "p521")
} }
func testSignAndVerify(t *testing.T, c *elliptic.Curve, tag string) { func testSignAndVerify(t *testing.T, c elliptic.Curve, tag string) {
priv, _ := GenerateKey(c, rand.Reader) priv, _ := GenerateKey(c, rand.Reader)
hashed := []byte("testing") hashed := []byte("testing")
......
...@@ -13,7 +13,7 @@ import ( ...@@ -13,7 +13,7 @@ import (
func TestOnCurve(t *testing.T) { func TestOnCurve(t *testing.T) {
p224 := P224() p224 := P224()
if !p224.IsOnCurve(p224.Gx, p224.Gy) { if !p224.IsOnCurve(p224.Params().Gx, p224.Params().Gy) {
t.Errorf("FAIL") t.Errorf("FAIL")
} }
} }
...@@ -295,7 +295,25 @@ func TestBaseMult(t *testing.T) { ...@@ -295,7 +295,25 @@ func TestBaseMult(t *testing.T) {
} }
x, y := p224.ScalarBaseMult(k.Bytes()) x, y := p224.ScalarBaseMult(k.Bytes())
if fmt.Sprintf("%x", x) != e.x || fmt.Sprintf("%x", y) != e.y { if fmt.Sprintf("%x", x) != e.x || fmt.Sprintf("%x", y) != e.y {
t.Errorf("%d: bad output for k=%s: got (%x, %s), want (%x, %s)", i, e.k, x, y, e.x, e.y) t.Errorf("%d: bad output for k=%s: got (%x, %x), want (%s, %s)", i, e.k, x, y, e.x, e.y)
}
if testing.Short() && i > 5 {
break
}
}
}
func TestGenericBaseMult(t *testing.T) {
// We use the P224 CurveParams directly in order to test the generic implementation.
p224 := P224().Params()
for i, e := range p224BaseMultTests {
k, ok := new(big.Int).SetString(e.k, 10)
if !ok {
t.Errorf("%d: bad value for k: %s", i, e.k)
}
x, y := p224.ScalarBaseMult(k.Bytes())
if fmt.Sprintf("%x", x) != e.x || fmt.Sprintf("%x", y) != e.y {
t.Errorf("%d: bad output for k=%s: got (%x, %x), want (%s, %s)", i, e.k, x, y, e.x, e.y)
} }
if testing.Short() && i > 5 { if testing.Short() && i > 5 {
break break
...@@ -316,13 +334,13 @@ func BenchmarkBaseMult(b *testing.B) { ...@@ -316,13 +334,13 @@ func BenchmarkBaseMult(b *testing.B) {
func TestMarshal(t *testing.T) { func TestMarshal(t *testing.T) {
p224 := P224() p224 := P224()
_, x, y, err := p224.GenerateKey(rand.Reader) _, x, y, err := GenerateKey(p224, rand.Reader)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
} }
serialized := p224.Marshal(x, y) serialized := Marshal(p224, x, y)
xx, yy := p224.Unmarshal(serialized) xx, yy := Unmarshal(p224, serialized)
if xx == nil { if xx == nil {
t.Error("failed to unmarshal") t.Error("failed to unmarshal")
return return
......
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package elliptic
import (
"math/big"
"testing"
)
var toFromBigTests = []string{
"0",
"1",
"23",
"b70e0cb46bb4bf7f321390b94a03c1d356c01122343280d6105c1d21",
"706a46d476dcb76798e6046d89474788d164c18032d268fd10704fa6",
}
func p224AlternativeToBig(in *p224FieldElement) *big.Int {
ret := new(big.Int)
tmp := new(big.Int)
for i := uint(0); i < 8; i++ {
tmp.SetInt64(int64(in[i]))
tmp.Lsh(tmp, 28*i)
ret.Add(ret, tmp)
}
ret.Mod(ret, p224.P)
return ret
}
func TestToFromBig(t *testing.T) {
for i, test := range toFromBigTests {
n, _ := new(big.Int).SetString(test, 16)
var x p224FieldElement
p224FromBig(&x, n)
m := p224ToBig(&x)
if n.Cmp(m) != 0 {
t.Errorf("#%d: %x != %x", i, n, m)
}
q := p224AlternativeToBig(&x)
if n.Cmp(q) != 0 {
t.Errorf("#%d: %x != %x (alternative)", i, n, m)
}
}
}
...@@ -9,32 +9,20 @@ ...@@ -9,32 +9,20 @@
package hmac package hmac
import ( import (
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"hash" "hash"
) )
// FIPS 198: // FIPS 198:
// http://csrc.nist.gov/publications/fips/fips198/fips-198a.pdf // http://csrc.nist.gov/publications/fips/fips198/fips-198a.pdf
// key is zero padded to 64 bytes // key is zero padded to the block size of the hash function
// ipad = 0x36 byte repeated to 64 bytes // ipad = 0x36 byte repeated for key length
// opad = 0x5c byte repeated to 64 bytes // opad = 0x5c byte repeated for key length
// hmac = H([key ^ opad] H([key ^ ipad] text)) // hmac = H([key ^ opad] H([key ^ ipad] text))
const (
// NOTE(rsc): This constant is actually the
// underlying hash function's block size.
// HMAC is only conventionally used with
// MD5 and SHA1, and both use 64-byte blocks.
// The hash.Hash interface doesn't provide a
// way to find out the block size.
padSize = 64
)
type hmac struct { type hmac struct {
size int size int
blocksize int
key, tmp []byte key, tmp []byte
outer, inner hash.Hash outer, inner hash.Hash
} }
...@@ -43,7 +31,7 @@ func (h *hmac) tmpPad(xor byte) { ...@@ -43,7 +31,7 @@ func (h *hmac) tmpPad(xor byte) {
for i, k := range h.key { for i, k := range h.key {
h.tmp[i] = xor ^ k h.tmp[i] = xor ^ k
} }
for i := len(h.key); i < padSize; i++ { for i := len(h.key); i < h.blocksize; i++ {
h.tmp[i] = xor h.tmp[i] = xor
} }
} }
...@@ -52,7 +40,7 @@ func (h *hmac) Sum(in []byte) []byte { ...@@ -52,7 +40,7 @@ func (h *hmac) Sum(in []byte) []byte {
origLen := len(in) origLen := len(in)
in = h.inner.Sum(in) in = h.inner.Sum(in)
h.tmpPad(0x5c) h.tmpPad(0x5c)
copy(h.tmp[padSize:], in[origLen:]) copy(h.tmp[h.blocksize:], in[origLen:])
h.outer.Reset() h.outer.Reset()
h.outer.Write(h.tmp) h.outer.Write(h.tmp)
return h.outer.Sum(in[:origLen]) return h.outer.Sum(in[:origLen])
...@@ -64,20 +52,23 @@ func (h *hmac) Write(p []byte) (n int, err error) { ...@@ -64,20 +52,23 @@ func (h *hmac) Write(p []byte) (n int, err error) {
func (h *hmac) Size() int { return h.size } func (h *hmac) Size() int { return h.size }
func (h *hmac) BlockSize() int { return h.blocksize }
func (h *hmac) Reset() { func (h *hmac) Reset() {
h.inner.Reset() h.inner.Reset()
h.tmpPad(0x36) h.tmpPad(0x36)
h.inner.Write(h.tmp[0:padSize]) h.inner.Write(h.tmp[0:h.blocksize])
} }
// New returns a new HMAC hash using the given hash generator and key. // New returns a new HMAC hash using the given hash.Hash type and key.
func New(h func() hash.Hash, key []byte) hash.Hash { func New(h func() hash.Hash, key []byte) hash.Hash {
hm := new(hmac) hm := new(hmac)
hm.outer = h() hm.outer = h()
hm.inner = h() hm.inner = h()
hm.size = hm.inner.Size() hm.size = hm.inner.Size()
hm.tmp = make([]byte, padSize+hm.size) hm.blocksize = hm.inner.BlockSize()
if len(key) > padSize { hm.tmp = make([]byte, hm.blocksize+hm.size)
if len(key) > hm.blocksize {
// If key is too big, hash it. // If key is too big, hash it.
hm.outer.Write(key) hm.outer.Write(key)
key = hm.outer.Sum(nil) key = hm.outer.Sum(nil)
...@@ -87,12 +78,3 @@ func New(h func() hash.Hash, key []byte) hash.Hash { ...@@ -87,12 +78,3 @@ func New(h func() hash.Hash, key []byte) hash.Hash {
hm.Reset() hm.Reset()
return hm return hm
} }
// NewMD5 returns a new HMAC-MD5 hash using the given key.
func NewMD5(key []byte) hash.Hash { return New(md5.New, key) }
// NewSHA1 returns a new HMAC-SHA1 hash using the given key.
func NewSHA1(key []byte) hash.Hash { return New(sha1.New, key) }
// NewSHA256 returns a new HMAC-SHA256 hash using the given key.
func NewSHA256(key []byte) hash.Hash { return New(sha256.New, key) }
...@@ -17,6 +17,9 @@ func init() { ...@@ -17,6 +17,9 @@ func init() {
// The size of an MD4 checksum in bytes. // The size of an MD4 checksum in bytes.
const Size = 16 const Size = 16
// The blocksize of MD4 in bytes.
const BlockSize = 64
const ( const (
_Chunk = 64 _Chunk = 64
_Init0 = 0x67452301 _Init0 = 0x67452301
...@@ -51,6 +54,8 @@ func New() hash.Hash { ...@@ -51,6 +54,8 @@ func New() hash.Hash {
func (d *digest) Size() int { return Size } func (d *digest) Size() int { return Size }
func (d *digest) BlockSize() int { return BlockSize }
func (d *digest) Write(p []byte) (nn int, err error) { func (d *digest) Write(p []byte) (nn int, err error) {
nn = len(p) nn = len(p)
d.len += uint64(nn) d.len += uint64(nn)
......
...@@ -17,6 +17,9 @@ func init() { ...@@ -17,6 +17,9 @@ func init() {
// The size of an MD5 checksum in bytes. // The size of an MD5 checksum in bytes.
const Size = 16 const Size = 16
// The blocksize of MD5 in bytes.
const BlockSize = 64
const ( const (
_Chunk = 64 _Chunk = 64
_Init0 = 0x67452301 _Init0 = 0x67452301
...@@ -51,6 +54,8 @@ func New() hash.Hash { ...@@ -51,6 +54,8 @@ func New() hash.Hash {
func (d *digest) Size() int { return Size } func (d *digest) Size() int { return Size }
func (d *digest) BlockSize() int { return BlockSize }
func (d *digest) Write(p []byte) (nn int, err error) { func (d *digest) Write(p []byte) (nn int, err error) {
nn = len(p) nn = len(p)
d.len += uint64(nn) d.len += uint64(nn)
......
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ocsp package ocsp
import ( import (
......
...@@ -53,3 +53,7 @@ func (cth *canonicalTextHash) Reset() { ...@@ -53,3 +53,7 @@ func (cth *canonicalTextHash) Reset() {
func (cth *canonicalTextHash) Size() int { func (cth *canonicalTextHash) Size() int {
return cth.h.Size() return cth.h.Size()
} }
func (cth *canonicalTextHash) BlockSize() int {
return cth.h.BlockSize()
}
...@@ -29,6 +29,10 @@ func (r recordingHash) Size() int { ...@@ -29,6 +29,10 @@ func (r recordingHash) Size() int {
panic("shouldn't be called") panic("shouldn't be called")
} }
func (r recordingHash) BlockSize() int {
panic("shouldn't be called")
}
func testCanonicalText(t *testing.T, input, expected string) { func testCanonicalText(t *testing.T, input, expected string) {
r := recordingHash{bytes.NewBuffer(nil)} r := recordingHash{bytes.NewBuffer(nil)}
c := NewCanonicalTextHash(r) c := NewCanonicalTextHash(r)
......
...@@ -55,6 +55,8 @@ func New() hash.Hash { ...@@ -55,6 +55,8 @@ func New() hash.Hash {
func (d *digest) Size() int { return Size } func (d *digest) Size() int { return Size }
func (d *digest) BlockSize() int { return BlockSize }
func (d *digest) Write(p []byte) (nn int, err error) { func (d *digest) Write(p []byte) (nn int, err error) {
nn = len(p) nn = len(p)
d.tc += uint64(nn) d.tc += uint64(nn)
......
...@@ -17,6 +17,9 @@ func init() { ...@@ -17,6 +17,9 @@ func init() {
// The size of a SHA1 checksum in bytes. // The size of a SHA1 checksum in bytes.
const Size = 20 const Size = 20
// The blocksize of SHA1 in bytes.
const BlockSize = 64
const ( const (
_Chunk = 64 _Chunk = 64
_Init0 = 0x67452301 _Init0 = 0x67452301
...@@ -53,6 +56,8 @@ func New() hash.Hash { ...@@ -53,6 +56,8 @@ func New() hash.Hash {
func (d *digest) Size() int { return Size } func (d *digest) Size() int { return Size }
func (d *digest) BlockSize() int { return BlockSize }
func (d *digest) Write(p []byte) (nn int, err error) { func (d *digest) Write(p []byte) (nn int, err error) {
nn = len(p) nn = len(p)
d.len += uint64(nn) d.len += uint64(nn)
......
...@@ -22,6 +22,9 @@ const Size = 32 ...@@ -22,6 +22,9 @@ const Size = 32
// The size of a SHA224 checksum in bytes. // The size of a SHA224 checksum in bytes.
const Size224 = 28 const Size224 = 28
// The blocksize of SHA256 and SHA224 in bytes.
const BlockSize = 64
const ( const (
_Chunk = 64 _Chunk = 64
_Init0 = 0x6A09E667 _Init0 = 0x6A09E667
...@@ -97,6 +100,8 @@ func (d *digest) Size() int { ...@@ -97,6 +100,8 @@ func (d *digest) Size() int {
return Size224 return Size224
} }
func (d *digest) BlockSize() int { return BlockSize }
func (d *digest) Write(p []byte) (nn int, err error) { func (d *digest) Write(p []byte) (nn int, err error) {
nn = len(p) nn = len(p)
d.len += uint64(nn) d.len += uint64(nn)
......
...@@ -22,6 +22,9 @@ const Size = 64 ...@@ -22,6 +22,9 @@ const Size = 64
// The size of a SHA384 checksum in bytes. // The size of a SHA384 checksum in bytes.
const Size384 = 48 const Size384 = 48
// The blocksize of SHA512 and SHA384 in bytes.
const BlockSize = 128
const ( const (
_Chunk = 128 _Chunk = 128
_Init0 = 0x6a09e667f3bcc908 _Init0 = 0x6a09e667f3bcc908
...@@ -97,6 +100,8 @@ func (d *digest) Size() int { ...@@ -97,6 +100,8 @@ func (d *digest) Size() int {
return Size384 return Size384
} }
func (d *digest) BlockSize() int { return BlockSize }
func (d *digest) Write(p []byte) (nn int, err error) { func (d *digest) Write(p []byte) (nn int, err error) {
nn = len(p) nn = len(p)
d.len += uint64(nn) d.len += uint64(nn)
......
...@@ -91,7 +91,7 @@ func macSHA1(version uint16, key []byte) macFunction { ...@@ -91,7 +91,7 @@ func macSHA1(version uint16, key []byte) macFunction {
copy(mac.key, key) copy(mac.key, key)
return mac return mac
} }
return tls10MAC{hmac.NewSHA1(key)} return tls10MAC{hmac.New(sha1.New, key)}
} }
type macFunction interface { type macFunction interface {
......
...@@ -15,6 +15,7 @@ import ( ...@@ -15,6 +15,7 @@ import (
"io" "io"
"net" "net"
"sync" "sync"
"time"
) )
// A Conn represents a secured connection. // A Conn represents a secured connection.
...@@ -86,24 +87,23 @@ func (c *Conn) RemoteAddr() net.Addr { ...@@ -86,24 +87,23 @@ func (c *Conn) RemoteAddr() net.Addr {
return c.conn.RemoteAddr() return c.conn.RemoteAddr()
} }
// SetTimeout sets the read deadline associated with the connection. // SetDeadline sets the read deadline associated with the connection.
// There is no write deadline. // There is no write deadline.
func (c *Conn) SetTimeout(nsec int64) error { // A zero value for t means Read will not time out.
return c.conn.SetTimeout(nsec) func (c *Conn) SetDeadline(t time.Time) error {
return c.conn.SetDeadline(t)
} }
// SetReadTimeout sets the time (in nanoseconds) that // SetReadDeadline sets the read deadline on the underlying connection.
// Read will wait for data before returning a net.Error // A zero value for t means Read will not time out.
// with Timeout() == true. func (c *Conn) SetReadDeadline(t time.Time) error {
// Setting nsec == 0 (the default) disables the deadline. return c.conn.SetReadDeadline(t)
func (c *Conn) SetReadTimeout(nsec int64) error {
return c.conn.SetReadTimeout(nsec)
} }
// SetWriteTimeout exists to satisfy the net.Conn interface // SetWriteDeadline exists to satisfy the net.Conn interface
// but is not implemented by TLS. It always returns an error. // but is not implemented by TLS. It always returns an error.
func (c *Conn) SetWriteTimeout(nsec int64) error { func (c *Conn) SetWriteDeadline(t time.Time) error {
return errors.New("TLS does not support SetWriteTimeout") return errors.New("TLS does not support SetWriteDeadline")
} }
// A halfConn represents one direction of the record layer // A halfConn represents one direction of the record layer
...@@ -744,7 +744,7 @@ func (c *Conn) Write(b []byte) (n int, err error) { ...@@ -744,7 +744,7 @@ func (c *Conn) Write(b []byte) (n int, err error) {
} }
// Read can be made to time out and return a net.Error with Timeout() == true // Read can be made to time out and return a net.Error with Timeout() == true
// after a fixed time limit; see SetTimeout and SetReadTimeout. // after a fixed time limit; see SetDeadline and SetReadDeadline.
func (c *Conn) Read(b []byte) (n int, err error) { func (c *Conn) Read(b []byte) (n int, err error) {
if err = c.Handshake(); err != nil { if err = c.Handshake(); err != nil {
return return
......
...@@ -105,7 +105,7 @@ func md5SHA1Hash(slices ...[]byte) []byte { ...@@ -105,7 +105,7 @@ func md5SHA1Hash(slices ...[]byte) []byte {
// pre-master secret is then calculated using ECDH. // pre-master secret is then calculated using ECDH.
type ecdheRSAKeyAgreement struct { type ecdheRSAKeyAgreement struct {
privateKey []byte privateKey []byte
curve *elliptic.Curve curve elliptic.Curve
x, y *big.Int x, y *big.Int
} }
...@@ -132,11 +132,11 @@ Curve: ...@@ -132,11 +132,11 @@ Curve:
var x, y *big.Int var x, y *big.Int
var err error var err error
ka.privateKey, x, y, err = ka.curve.GenerateKey(config.rand()) ka.privateKey, x, y, err = elliptic.GenerateKey(ka.curve, config.rand())
if err != nil { if err != nil {
return nil, err return nil, err
} }
ecdhePublic := ka.curve.Marshal(x, y) ecdhePublic := elliptic.Marshal(ka.curve, x, y)
// http://tools.ietf.org/html/rfc4492#section-5.4 // http://tools.ietf.org/html/rfc4492#section-5.4
serverECDHParams := make([]byte, 1+2+1+len(ecdhePublic)) serverECDHParams := make([]byte, 1+2+1+len(ecdhePublic))
...@@ -167,12 +167,12 @@ func (ka *ecdheRSAKeyAgreement) processClientKeyExchange(config *Config, ckx *cl ...@@ -167,12 +167,12 @@ func (ka *ecdheRSAKeyAgreement) processClientKeyExchange(config *Config, ckx *cl
if len(ckx.ciphertext) == 0 || int(ckx.ciphertext[0]) != len(ckx.ciphertext)-1 { if len(ckx.ciphertext) == 0 || int(ckx.ciphertext[0]) != len(ckx.ciphertext)-1 {
return nil, errors.New("bad ClientKeyExchange") return nil, errors.New("bad ClientKeyExchange")
} }
x, y := ka.curve.Unmarshal(ckx.ciphertext[1:]) x, y := elliptic.Unmarshal(ka.curve, ckx.ciphertext[1:])
if x == nil { if x == nil {
return nil, errors.New("bad ClientKeyExchange") return nil, errors.New("bad ClientKeyExchange")
} }
x, _ = ka.curve.ScalarMult(x, y, ka.privateKey) x, _ = ka.curve.ScalarMult(x, y, ka.privateKey)
preMasterSecret := make([]byte, (ka.curve.BitSize+7)>>3) preMasterSecret := make([]byte, (ka.curve.Params().BitSize+7)>>3)
xBytes := x.Bytes() xBytes := x.Bytes()
copy(preMasterSecret[len(preMasterSecret)-len(xBytes):], xBytes) copy(preMasterSecret[len(preMasterSecret)-len(xBytes):], xBytes)
...@@ -205,7 +205,7 @@ func (ka *ecdheRSAKeyAgreement) processServerKeyExchange(config *Config, clientH ...@@ -205,7 +205,7 @@ func (ka *ecdheRSAKeyAgreement) processServerKeyExchange(config *Config, clientH
if publicLen+4 > len(skx.key) { if publicLen+4 > len(skx.key) {
return errServerKeyExchange return errServerKeyExchange
} }
ka.x, ka.y = ka.curve.Unmarshal(skx.key[4 : 4+publicLen]) ka.x, ka.y = elliptic.Unmarshal(ka.curve, skx.key[4:4+publicLen])
if ka.x == nil { if ka.x == nil {
return errServerKeyExchange return errServerKeyExchange
} }
...@@ -229,16 +229,16 @@ func (ka *ecdheRSAKeyAgreement) generateClientKeyExchange(config *Config, client ...@@ -229,16 +229,16 @@ func (ka *ecdheRSAKeyAgreement) generateClientKeyExchange(config *Config, client
if ka.curve == nil { if ka.curve == nil {
return nil, nil, errors.New("missing ServerKeyExchange message") return nil, nil, errors.New("missing ServerKeyExchange message")
} }
priv, mx, my, err := ka.curve.GenerateKey(config.rand()) priv, mx, my, err := elliptic.GenerateKey(ka.curve, config.rand())
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
x, _ := ka.curve.ScalarMult(ka.x, ka.y, priv) x, _ := ka.curve.ScalarMult(ka.x, ka.y, priv)
preMasterSecret := make([]byte, (ka.curve.BitSize+7)>>3) preMasterSecret := make([]byte, (ka.curve.Params().BitSize+7)>>3)
xBytes := x.Bytes() xBytes := x.Bytes()
copy(preMasterSecret[len(preMasterSecret)-len(xBytes):], xBytes) copy(preMasterSecret[len(preMasterSecret)-len(xBytes):], xBytes)
serialized := ka.curve.Marshal(mx, my) serialized := elliptic.Marshal(ka.curve, mx, my)
ckx := new(clientKeyExchangeMsg) ckx := new(clientKeyExchangeMsg)
ckx.ciphertext = make([]byte, 1+len(serialized)) ckx.ciphertext = make([]byte, 1+len(serialized))
......
...@@ -13,10 +13,11 @@ import ( ...@@ -13,10 +13,11 @@ import (
// Possible certificate files; stop after finding one. // Possible certificate files; stop after finding one.
var certFiles = []string{ var certFiles = []string{
"/etc/ssl/certs/ca-certificates.crt", // Linux etc "/etc/ssl/certs/ca-certificates.crt", // Linux etc
"/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL "/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL
"/etc/ssl/ca-bundle.pem", // OpenSUSE "/etc/ssl/ca-bundle.pem", // OpenSUSE
"/etc/ssl/cert.pem", // OpenBSD "/etc/ssl/cert.pem", // OpenBSD
"/usr/local/share/certs/ca-root-nss.crt", // FreeBSD
} }
func initDefaultRoots() { func initDefaultRoots() {
......
...@@ -899,6 +899,14 @@ var ( ...@@ -899,6 +899,14 @@ var (
oidRSA = []int{1, 2, 840, 113549, 1, 1, 1} oidRSA = []int{1, 2, 840, 113549, 1, 1, 1}
) )
func subjectBytes(cert *Certificate) ([]byte, error) {
if len(cert.RawSubject) > 0 {
return cert.RawSubject, nil
}
return asn1.Marshal(cert.Subject.ToRDNSequence())
}
// CreateCertificate creates a new certificate based on a template. The // CreateCertificate creates a new certificate based on a template. The
// following members of template are used: SerialNumber, Subject, NotBefore, // following members of template are used: SerialNumber, Subject, NotBefore,
// NotAfter, KeyUsage, BasicConstraintsValid, IsCA, MaxPathLen, SubjectKeyId, // NotAfter, KeyUsage, BasicConstraintsValid, IsCA, MaxPathLen, SubjectKeyId,
...@@ -909,10 +917,23 @@ var ( ...@@ -909,10 +917,23 @@ var (
// signee and priv is the private key of the signer. // signee and priv is the private key of the signer.
// //
// The returned slice is the certificate in DER encoding. // The returned slice is the certificate in DER encoding.
func CreateCertificate(rand io.Reader, template, parent *Certificate, pub *rsa.PublicKey, priv *rsa.PrivateKey) (cert []byte, err error) { //
// The only supported key type is RSA (*rsa.PublicKey for pub, *rsa.PrivateKey
// for priv).
func CreateCertificate(rand io.Reader, template, parent *Certificate, pub interface{}, priv interface{}) (cert []byte, err error) {
rsaPub, ok := pub.(*rsa.PublicKey)
if !ok {
return nil, errors.New("x509: non-RSA public keys not supported")
}
rsaPriv, ok := priv.(*rsa.PrivateKey)
if !ok {
return nil, errors.New("x509: non-RSA private keys not supported")
}
asn1PublicKey, err := asn1.Marshal(rsaPublicKey{ asn1PublicKey, err := asn1.Marshal(rsaPublicKey{
N: pub.N, N: rsaPub.N,
E: pub.E, E: rsaPub.E,
}) })
if err != nil { if err != nil {
return return
...@@ -927,16 +948,12 @@ func CreateCertificate(rand io.Reader, template, parent *Certificate, pub *rsa.P ...@@ -927,16 +948,12 @@ func CreateCertificate(rand io.Reader, template, parent *Certificate, pub *rsa.P
return return
} }
var asn1Issuer []byte asn1Issuer, err := subjectBytes(parent)
if len(parent.RawSubject) > 0 { if err != nil {
asn1Issuer = parent.RawSubject return
} else {
if asn1Issuer, err = asn1.Marshal(parent.Subject.ToRDNSequence()); err != nil {
return
}
} }
asn1Subject, err := asn1.Marshal(template.Subject.ToRDNSequence()) asn1Subject, err := subjectBytes(template)
if err != nil { if err != nil {
return return
} }
...@@ -964,7 +981,7 @@ func CreateCertificate(rand io.Reader, template, parent *Certificate, pub *rsa.P ...@@ -964,7 +981,7 @@ func CreateCertificate(rand io.Reader, template, parent *Certificate, pub *rsa.P
h.Write(tbsCertContents) h.Write(tbsCertContents)
digest := h.Sum(nil) digest := h.Sum(nil)
signature, err := rsa.SignPKCS1v15(rand, priv, crypto.SHA1, digest) signature, err := rsa.SignPKCS1v15(rand, rsaPriv, crypto.SHA1, digest)
if err != nil { if err != nil {
return return
} }
...@@ -1011,7 +1028,13 @@ func ParseDERCRL(derBytes []byte) (certList *pkix.CertificateList, err error) { ...@@ -1011,7 +1028,13 @@ func ParseDERCRL(derBytes []byte) (certList *pkix.CertificateList, err error) {
// CreateCRL returns a DER encoded CRL, signed by this Certificate, that // CreateCRL returns a DER encoded CRL, signed by this Certificate, that
// contains the given list of revoked certificates. // contains the given list of revoked certificates.
func (c *Certificate) CreateCRL(rand io.Reader, priv *rsa.PrivateKey, revokedCerts []pkix.RevokedCertificate, now, expiry time.Time) (crlBytes []byte, err error) { //
// The only supported key type is RSA (*rsa.PrivateKey for priv).
func (c *Certificate) CreateCRL(rand io.Reader, priv interface{}, revokedCerts []pkix.RevokedCertificate, now, expiry time.Time) (crlBytes []byte, err error) {
rsaPriv, ok := priv.(*rsa.PrivateKey)
if !ok {
return nil, errors.New("x509: non-RSA private keys not supported")
}
tbsCertList := pkix.TBSCertificateList{ tbsCertList := pkix.TBSCertificateList{
Version: 2, Version: 2,
Signature: pkix.AlgorithmIdentifier{ Signature: pkix.AlgorithmIdentifier{
...@@ -1032,7 +1055,7 @@ func (c *Certificate) CreateCRL(rand io.Reader, priv *rsa.PrivateKey, revokedCer ...@@ -1032,7 +1055,7 @@ func (c *Certificate) CreateCRL(rand io.Reader, priv *rsa.PrivateKey, revokedCer
h.Write(tbsCertListContents) h.Write(tbsCertListContents)
digest := h.Sum(nil) digest := h.Sum(nil)
signature, err := rsa.SignPKCS1v15(rand, priv, crypto.SHA1, digest) signature, err := rsa.SignPKCS1v15(rand, rsaPriv, crypto.SHA1, digest)
if err != nil { if err != nil {
return return
} }
......
...@@ -7,8 +7,8 @@ ...@@ -7,8 +7,8 @@
package sql package sql
import ( import (
"database/sql/driver"
"errors" "errors"
"exp/sql/driver"
"fmt" "fmt"
"reflect" "reflect"
"strconv" "strconv"
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
package sql package sql
import ( import (
"database/sql/driver"
"fmt" "fmt"
"reflect" "reflect"
"testing" "testing"
...@@ -154,8 +155,8 @@ func TestConversions(t *testing.T) { ...@@ -154,8 +155,8 @@ func TestConversions(t *testing.T) {
} }
} }
func TestNullableString(t *testing.T) { func TestNullString(t *testing.T) {
var ns NullableString var ns NullString
convertAssign(&ns, []byte("foo")) convertAssign(&ns, []byte("foo"))
if !ns.Valid { if !ns.Valid {
t.Errorf("expecting not null") t.Errorf("expecting not null")
...@@ -171,3 +172,35 @@ func TestNullableString(t *testing.T) { ...@@ -171,3 +172,35 @@ func TestNullableString(t *testing.T) {
t.Errorf("expecting blank on nil; got %q", ns.String) t.Errorf("expecting blank on nil; got %q", ns.String)
} }
} }
type valueConverterTest struct {
c driver.ValueConverter
in, out interface{}
err string
}
var valueConverterTests = []valueConverterTest{
{driver.DefaultParameterConverter, NullString{"hi", true}, "hi", ""},
{driver.DefaultParameterConverter, NullString{"", false}, nil, ""},
}
func TestValueConverters(t *testing.T) {
for i, tt := range valueConverterTests {
out, err := tt.c.ConvertValue(tt.in)
goterr := ""
if err != nil {
goterr = err.Error()
}
if goterr != tt.err {
t.Errorf("test %d: %s(%T(%v)) error = %q; want error = %q",
i, tt.c, tt.in, tt.in, goterr, tt.err)
}
if tt.err != "" {
continue
}
if !reflect.DeepEqual(out, tt.out) {
t.Errorf("test %d: %s(%T(%v)) = %v (%T); want %v (%T)",
i, tt.c, tt.in, tt.in, out, out, tt.out, tt.out)
}
}
}
...@@ -32,6 +32,15 @@ type ValueConverter interface { ...@@ -32,6 +32,15 @@ type ValueConverter interface {
ConvertValue(v interface{}) (interface{}, error) ConvertValue(v interface{}) (interface{}, error)
} }
// SubsetValuer is the interface providing the SubsetValue method.
//
// Types implementing SubsetValuer interface are able to convert
// themselves to one of the driver's allowed subset values.
type SubsetValuer interface {
// SubsetValue returns a driver parameter subset value.
SubsetValue() (interface{}, error)
}
// Bool is a ValueConverter that converts input values to bools. // Bool is a ValueConverter that converts input values to bools.
// //
// The conversion rules are: // The conversion rules are:
...@@ -136,6 +145,32 @@ func (stringType) ConvertValue(v interface{}) (interface{}, error) { ...@@ -136,6 +145,32 @@ func (stringType) ConvertValue(v interface{}) (interface{}, error) {
return fmt.Sprintf("%v", v), nil return fmt.Sprintf("%v", v), nil
} }
// Null is a type that implements ValueConverter by allowing nil
// values but otherwise delegating to another ValueConverter.
type Null struct {
Converter ValueConverter
}
func (n Null) ConvertValue(v interface{}) (interface{}, error) {
if v == nil {
return nil, nil
}
return n.Converter.ConvertValue(v)
}
// NotNull is a type that implements ValueConverter by disallowing nil
// values but otherwise delegating to another ValueConverter.
type NotNull struct {
Converter ValueConverter
}
func (n NotNull) ConvertValue(v interface{}) (interface{}, error) {
if v == nil {
return nil, fmt.Errorf("nil value not allowed")
}
return n.Converter.ConvertValue(v)
}
// IsParameterSubsetType reports whether v is of a valid type for a // IsParameterSubsetType reports whether v is of a valid type for a
// parameter. These types are: // parameter. These types are:
// //
...@@ -200,6 +235,17 @@ func (defaultConverter) ConvertValue(v interface{}) (interface{}, error) { ...@@ -200,6 +235,17 @@ func (defaultConverter) ConvertValue(v interface{}) (interface{}, error) {
return v, nil return v, nil
} }
if svi, ok := v.(SubsetValuer); ok {
sv, err := svi.SubsetValue()
if err != nil {
return nil, err
}
if !IsParameterSubsetType(sv) {
return nil, fmt.Errorf("non-subset type %T returned from SubsetValue", sv)
}
return sv, nil
}
rv := reflect.ValueOf(v) rv := reflect.ValueOf(v)
switch rv.Kind() { switch rv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
...@@ -215,5 +261,5 @@ func (defaultConverter) ConvertValue(v interface{}) (interface{}, error) { ...@@ -215,5 +261,5 @@ func (defaultConverter) ConvertValue(v interface{}) (interface{}, error) {
case reflect.Float32, reflect.Float64: case reflect.Float32, reflect.Float64:
return rv.Float(), nil return rv.Float(), nil
} }
return nil, fmt.Errorf("unsupported type %s", rv.Kind()) return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind())
} }
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
package sql package sql
import ( import (
"database/sql/driver"
"errors" "errors"
"fmt" "fmt"
"io" "io"
...@@ -13,8 +14,6 @@ import ( ...@@ -13,8 +14,6 @@ import (
"strings" "strings"
"sync" "sync"
"time" "time"
"exp/sql/driver"
) )
var _ = log.Printf var _ = log.Printf
...@@ -589,7 +588,9 @@ func converterForType(typ string) driver.ValueConverter { ...@@ -589,7 +588,9 @@ func converterForType(typ string) driver.ValueConverter {
case "int32": case "int32":
return driver.Int32 return driver.Int32
case "string": case "string":
return driver.String return driver.NotNull{driver.String}
case "nullstring":
return driver.Null{driver.String}
case "datetime": case "datetime":
return driver.DefaultParameterConverter return driver.DefaultParameterConverter
} }
......
...@@ -7,12 +7,11 @@ ...@@ -7,12 +7,11 @@
package sql package sql
import ( import (
"database/sql/driver"
"errors" "errors"
"fmt" "fmt"
"io" "io"
"sync" "sync"
"exp/sql/driver"
) )
var drivers = make(map[string]driver.Driver) var drivers = make(map[string]driver.Driver)
...@@ -30,11 +29,16 @@ func Register(name string, driver driver.Driver) { ...@@ -30,11 +29,16 @@ func Register(name string, driver driver.Driver) {
drivers[name] = driver drivers[name] = driver
} }
// NullableString represents a string that may be null. // RawBytes is a byte slice that holds a reference to memory owned by
// NullableString implements the ScannerInto interface so // the database itself. After a Scan into a RawBytes, the slice is only
// valid until the next call to Next, Scan, or Close.
type RawBytes []byte
// NullString represents a string that may be null.
// NullString implements the ScannerInto interface so
// it can be used as a scan destination: // it can be used as a scan destination:
// //
// var s NullableString // var s NullString
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&s) // err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&s)
// ... // ...
// if s.Valid { // if s.Valid {
...@@ -44,19 +48,27 @@ func Register(name string, driver driver.Driver) { ...@@ -44,19 +48,27 @@ func Register(name string, driver driver.Driver) {
// } // }
// //
// TODO(bradfitz): add other types. // TODO(bradfitz): add other types.
type NullableString struct { type NullString struct {
String string String string
Valid bool // Valid is true if String is not NULL Valid bool // Valid is true if String is not NULL
} }
// ScanInto implements the ScannerInto interface. // ScanInto implements the ScannerInto interface.
func (ms *NullableString) ScanInto(value interface{}) error { func (ns *NullString) ScanInto(value interface{}) error {
if value == nil { if value == nil {
ms.String, ms.Valid = "", false ns.String, ns.Valid = "", false
return nil return nil
} }
ms.Valid = true ns.Valid = true
return convertAssign(&ms.String, value) return convertAssign(&ns.String, value)
}
// SubsetValue implements the driver SubsetValuer interface.
func (ns NullString) SubsetValue() (interface{}, error) {
if !ns.Valid {
return nil, nil
}
return ns.String, nil
} }
// ScannerInto is an interface used by Scan. // ScannerInto is an interface used by Scan.
...@@ -525,6 +537,27 @@ func (s *Stmt) Exec(args ...interface{}) (Result, error) { ...@@ -525,6 +537,27 @@ func (s *Stmt) Exec(args ...interface{}) (Result, error) {
// Convert args to subset types. // Convert args to subset types.
if cc, ok := si.(driver.ColumnConverter); ok { if cc, ok := si.(driver.ColumnConverter); ok {
for n, arg := range args { for n, arg := range args {
// First, see if the value itself knows how to convert
// itself to a driver type. For example, a NullString
// struct changing into a string or nil.
if svi, ok := arg.(driver.SubsetValuer); ok {
sv, err := svi.SubsetValue()
if err != nil {
return nil, fmt.Errorf("sql: argument index %d from SubsetValue: %v", n, err)
}
if !driver.IsParameterSubsetType(sv) {
return nil, fmt.Errorf("sql: argument index %d: non-subset type %T returned from SubsetValue", n, sv)
}
arg = sv
}
// Second, ask the column to sanity check itself. For
// example, drivers might use this to make sure that
// an int64 values being inserted into a 16-bit
// integer field is in range (before getting
// truncated), or that a nil can't go into a NOT NULL
// column before going across the network to get the
// same error.
args[n], err = cc.ColumnConverter(n).ConvertValue(arg) args[n], err = cc.ColumnConverter(n).ConvertValue(arg)
if err != nil { if err != nil {
return nil, fmt.Errorf("sql: converting Exec argument #%d's type: %v", n, err) return nil, fmt.Errorf("sql: converting Exec argument #%d's type: %v", n, err)
...@@ -760,9 +793,13 @@ func (rs *Rows) Columns() ([]string, error) { ...@@ -760,9 +793,13 @@ func (rs *Rows) Columns() ([]string, error) {
} }
// Scan copies the columns in the current row into the values pointed // Scan copies the columns in the current row into the values pointed
// at by dest. If dest contains pointers to []byte, the slices should // at by dest.
// not be modified and should only be considered valid until the next //
// call to Next or Scan. // If an argument has type *[]byte, Scan saves in that argument a copy
// of the corresponding data. The copy is owned by the caller and can
// be modified and held indefinitely. The copy can be avoided by using
// an argument of type *RawBytes instead; see the documentation for
// RawBytes for restrictions on its use.
func (rs *Rows) Scan(dest ...interface{}) error { func (rs *Rows) Scan(dest ...interface{}) error {
if rs.closed { if rs.closed {
return errors.New("sql: Rows closed") return errors.New("sql: Rows closed")
...@@ -782,6 +819,18 @@ func (rs *Rows) Scan(dest ...interface{}) error { ...@@ -782,6 +819,18 @@ func (rs *Rows) Scan(dest ...interface{}) error {
return fmt.Errorf("sql: Scan error on column index %d: %v", i, err) return fmt.Errorf("sql: Scan error on column index %d: %v", i, err)
} }
} }
for _, dp := range dest {
b, ok := dp.(*[]byte)
if !ok {
continue
}
if _, ok = dp.(*RawBytes); ok {
continue
}
clone := make([]byte, len(*b))
copy(clone, *b)
*b = clone
}
return nil return nil
} }
...@@ -838,6 +887,9 @@ func (r *Row) Scan(dest ...interface{}) error { ...@@ -838,6 +887,9 @@ func (r *Row) Scan(dest ...interface{}) error {
// they were obtained from the network anyway) But for now we // they were obtained from the network anyway) But for now we
// don't care. // don't care.
for _, dp := range dest { for _, dp := range dest {
if _, ok := dp.(*RawBytes); ok {
return errors.New("sql: RawBytes isn't allowed on Row.Scan")
}
b, ok := dp.(*[]byte) b, ok := dp.(*[]byte)
if !ok { if !ok {
continue continue
......
...@@ -76,7 +76,7 @@ func TestQuery(t *testing.T) { ...@@ -76,7 +76,7 @@ func TestQuery(t *testing.T) {
{age: 3, name: "Chris"}, {age: 3, name: "Chris"},
} }
if !reflect.DeepEqual(got, want) { if !reflect.DeepEqual(got, want) {
t.Logf(" got: %#v\nwant: %#v", got, want) t.Errorf("mismatch.\n got: %#v\nwant: %#v", got, want)
} }
// And verify that the final rows.Next() call, which hit EOF, // And verify that the final rows.Next() call, which hit EOF,
...@@ -86,6 +86,43 @@ func TestQuery(t *testing.T) { ...@@ -86,6 +86,43 @@ func TestQuery(t *testing.T) {
} }
} }
func TestByteOwnership(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
rows, err := db.Query("SELECT|people|name,photo|")
if err != nil {
t.Fatalf("Query: %v", err)
}
type row struct {
name []byte
photo RawBytes
}
got := []row{}
for rows.Next() {
var r row
err = rows.Scan(&r.name, &r.photo)
if err != nil {
t.Fatalf("Scan: %v", err)
}
got = append(got, r)
}
corruptMemory := []byte("\xffPHOTO")
want := []row{
{name: []byte("Alice"), photo: corruptMemory},
{name: []byte("Bob"), photo: corruptMemory},
{name: []byte("Chris"), photo: corruptMemory},
}
if !reflect.DeepEqual(got, want) {
t.Errorf("mismatch.\n got: %#v\nwant: %#v", got, want)
}
var photo RawBytes
err = db.QueryRow("SELECT|people|photo|name=?", "Alice").Scan(&photo)
if err == nil {
t.Error("want error scanning into RawBytes from QueryRow")
}
}
func TestRowsColumns(t *testing.T) { func TestRowsColumns(t *testing.T) {
db := newTestDB(t, "people") db := newTestDB(t, "people")
defer closeDB(t, db) defer closeDB(t, db)
...@@ -300,6 +337,68 @@ func TestQueryRowClosingStmt(t *testing.T) { ...@@ -300,6 +337,68 @@ func TestQueryRowClosingStmt(t *testing.T) {
} }
fakeConn := db.freeConn[0].(*fakeConn) fakeConn := db.freeConn[0].(*fakeConn)
if made, closed := fakeConn.stmtsMade, fakeConn.stmtsClosed; made != closed { if made, closed := fakeConn.stmtsMade, fakeConn.stmtsClosed; made != closed {
t.Logf("statement close mismatch: made %d, closed %d", made, closed) t.Errorf("statement close mismatch: made %d, closed %d", made, closed)
}
}
func TestNullStringParam(t *testing.T) {
db := newTestDB(t, "")
defer closeDB(t, db)
exec(t, db, "CREATE|t|id=int32,name=string,favcolor=nullstring")
// Inserts with db.Exec:
exec(t, db, "INSERT|t|id=?,name=?,favcolor=?", 1, "alice", NullString{"aqua", true})
exec(t, db, "INSERT|t|id=?,name=?,favcolor=?", 2, "bob", NullString{"brown", false})
_, err := db.Exec("INSERT|t|id=?,name=?,favcolor=?", 999, nil, nil)
if err == nil {
// TODO: this test fails, but it's just because
// fakeConn implements the optional Execer interface,
// so arguably this is the correct behavior. But
// maybe I should flesh out the fakeConn.Exec
// implementation so this properly fails.
// t.Errorf("expected error inserting nil name with Exec")
}
// Inserts with a prepared statement:
stmt, err := db.Prepare("INSERT|t|id=?,name=?,favcolor=?")
if err != nil {
t.Fatalf("prepare: %v", err)
}
if _, err := stmt.Exec(3, "chris", "chartreuse"); err != nil {
t.Errorf("exec insert chris: %v", err)
}
if _, err := stmt.Exec(4, "dave", NullString{"darkred", true}); err != nil {
t.Errorf("exec insert dave: %v", err)
}
if _, err := stmt.Exec(5, "eleanor", NullString{"eel", false}); err != nil {
t.Errorf("exec insert dave: %v", err)
}
// Can't put null name into non-nullstring column,
if _, err := stmt.Exec(5, NullString{"", false}, nil); err == nil {
t.Errorf("expected error inserting nil name with prepared statement Exec")
}
type nameColor struct {
name string
favColor NullString
}
wantMap := map[int]nameColor{
1: nameColor{"alice", NullString{"aqua", true}},
2: nameColor{"bob", NullString{"", false}},
3: nameColor{"chris", NullString{"chartreuse", true}},
4: nameColor{"dave", NullString{"darkred", true}},
5: nameColor{"eleanor", NullString{"", false}},
}
for id, want := range wantMap {
var got nameColor
if err := db.QueryRow("SELECT|t|name,favcolor|id=?", id).Scan(&got.name, &got.favColor); err != nil {
t.Errorf("id=%d Scan: %v", id, err)
}
if got != want {
t.Errorf("id=%d got %#v, want %#v", id, got, want)
}
} }
} }
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gob package gob
// This file is not normally included in the gob package. Used only for debugging the package itself. // This file is not normally included in the gob package. Used only for debugging the package itself.
......
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main package main
// Need to compile package gob with debug.go to build this program. // Need to compile package gob with debug.go to build this program.
......
...@@ -39,6 +39,8 @@ import ( ...@@ -39,6 +39,8 @@ import (
// //
// String values encode as JSON strings, with each invalid UTF-8 sequence // String values encode as JSON strings, with each invalid UTF-8 sequence
// replaced by the encoding of the Unicode replacement character U+FFFD. // replaced by the encoding of the Unicode replacement character U+FFFD.
// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e"
// to keep some browsers from misinterpreting JSON output as HTML.
// //
// Array and slice values encode as JSON arrays, except that // Array and slice values encode as JSON arrays, except that
// []byte encodes as a base64-encoded string. // []byte encodes as a base64-encoded string.
...@@ -77,7 +79,8 @@ import ( ...@@ -77,7 +79,8 @@ import (
// Int64String int64 `json:",string"` // Int64String int64 `json:",string"`
// //
// The key name will be used if it's a non-empty string consisting of // The key name will be used if it's a non-empty string consisting of
// only Unicode letters, digits, dollar signs, hyphens, and underscores. // only Unicode letters, digits, dollar signs, percent signs, hyphens,
// underscores and slashes.
// //
// Map values encode as JSON objects. // Map values encode as JSON objects.
// The map's key type must be string; the object keys are used directly // The map's key type must be string; the object keys are used directly
...@@ -417,8 +420,13 @@ func isValidTag(s string) bool { ...@@ -417,8 +420,13 @@ func isValidTag(s string) bool {
return false return false
} }
for _, c := range s { for _, c := range s {
if c != '$' && c != '-' && c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) { switch c {
return false case '$', '-', '_', '/', '%':
// Acceptable
default:
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
return false
}
} }
} }
return true return true
......
...@@ -9,7 +9,7 @@ import ( ...@@ -9,7 +9,7 @@ import (
) )
type basicLatin2xTag struct { type basicLatin2xTag struct {
V string `json:"$-"` V string `json:"$%-/"`
} }
type basicLatin3xTag struct { type basicLatin3xTag struct {
...@@ -36,6 +36,10 @@ type miscPlaneTag struct { ...@@ -36,6 +36,10 @@ type miscPlaneTag struct {
V string `json:"色は匂へど"` V string `json:"色は匂へど"`
} }
type percentSlashTag struct {
V string `json:"text/html%"` // http://golang.org/issue/2718
}
type emptyTag struct { type emptyTag struct {
W string W string
} }
...@@ -49,7 +53,7 @@ type badFormatTag struct { ...@@ -49,7 +53,7 @@ type badFormatTag struct {
} }
type badCodeTag struct { type badCodeTag struct {
Z string `json:" !\"#%&'()*+,./"` Z string `json:" !\"#&'()*+,."`
} }
var structTagObjectKeyTests = []struct { var structTagObjectKeyTests = []struct {
...@@ -57,7 +61,7 @@ var structTagObjectKeyTests = []struct { ...@@ -57,7 +61,7 @@ var structTagObjectKeyTests = []struct {
value string value string
key string key string
}{ }{
{basicLatin2xTag{"2x"}, "2x", "$-"}, {basicLatin2xTag{"2x"}, "2x", "$%-/"},
{basicLatin3xTag{"3x"}, "3x", "0123456789"}, {basicLatin3xTag{"3x"}, "3x", "0123456789"},
{basicLatin4xTag{"4x"}, "4x", "ABCDEFGHIJKLMO"}, {basicLatin4xTag{"4x"}, "4x", "ABCDEFGHIJKLMO"},
{basicLatin5xTag{"5x"}, "5x", "PQRSTUVWXYZ_"}, {basicLatin5xTag{"5x"}, "5x", "PQRSTUVWXYZ_"},
...@@ -68,6 +72,7 @@ var structTagObjectKeyTests = []struct { ...@@ -68,6 +72,7 @@ var structTagObjectKeyTests = []struct {
{misnamedTag{"Animal Kingdom"}, "Animal Kingdom", "X"}, {misnamedTag{"Animal Kingdom"}, "Animal Kingdom", "X"},
{badFormatTag{"Orfevre"}, "Orfevre", "Y"}, {badFormatTag{"Orfevre"}, "Orfevre", "Y"},
{badCodeTag{"Reliable Man"}, "Reliable Man", "Z"}, {badCodeTag{"Reliable Man"}, "Reliable Man", "Z"},
{percentSlashTag{"brut"}, "brut", "text/html%"},
} }
func TestStructTagObjectKey(t *testing.T) { func TestStructTagObjectKey(t *testing.T) {
...@@ -88,7 +93,7 @@ func TestStructTagObjectKey(t *testing.T) { ...@@ -88,7 +93,7 @@ func TestStructTagObjectKey(t *testing.T) {
t.Fatalf("Unexpected value: %#q, want %v", s, tt.value) t.Fatalf("Unexpected value: %#q, want %v", s, tt.value)
} }
default: default:
t.Fatalf("Unexpected key: %#q", i) t.Fatalf("Unexpected key: %#q, from %#q", i, b)
} }
} }
} }
......
...@@ -150,6 +150,10 @@ type XMLNameWithoutTag struct { ...@@ -150,6 +150,10 @@ type XMLNameWithoutTag struct {
Value string ",chardata" Value string ",chardata"
} }
type NameInField struct {
Foo Name `xml:"ns foo"`
}
type AttrTest struct { type AttrTest struct {
Int int `xml:",attr"` Int int `xml:",attr"`
Lower int `xml:"int,attr"` Lower int `xml:"int,attr"`
...@@ -483,6 +487,19 @@ var marshalTests = []struct { ...@@ -483,6 +487,19 @@ var marshalTests = []struct {
UnmarshalOnly: true, UnmarshalOnly: true,
}, },
// xml.Name works in a plain field as well.
{
Value: &NameInField{Name{Space: "ns", Local: "foo"}},
ExpectXML: `<NameInField><foo xmlns="ns"></foo></NameInField>`,
},
// Marshaling zero xml.Name uses the tag or field name.
{
Value: &NameInField{},
ExpectXML: `<NameInField><foo xmlns="ns"></foo></NameInField>`,
MarshalOnly: true,
},
// Test attributes // Test attributes
{ {
Value: &AttrTest{ Value: &AttrTest{
......
...@@ -271,6 +271,10 @@ func (p *Parser) unmarshal(val reflect.Value, start *StartElement) error { ...@@ -271,6 +271,10 @@ func (p *Parser) unmarshal(val reflect.Value, start *StartElement) error {
case reflect.Struct: case reflect.Struct:
sv = v sv = v
typ := sv.Type() typ := sv.Type()
if typ == nameType {
v.Set(reflect.ValueOf(start.Name))
break
}
tinfo, err = getTypeInfo(typ) tinfo, err = getTypeInfo(typ)
if err != nil { if err != nil {
return err return err
......
...@@ -46,6 +46,8 @@ const ( ...@@ -46,6 +46,8 @@ const (
var tinfoMap = make(map[reflect.Type]*typeInfo) var tinfoMap = make(map[reflect.Type]*typeInfo)
var tinfoLock sync.RWMutex var tinfoLock sync.RWMutex
var nameType = reflect.TypeOf(Name{})
// getTypeInfo returns the typeInfo structure with details necessary // getTypeInfo returns the typeInfo structure with details necessary
// for marshalling and unmarshalling typ. // for marshalling and unmarshalling typ.
func getTypeInfo(typ reflect.Type) (*typeInfo, error) { func getTypeInfo(typ reflect.Type) (*typeInfo, error) {
...@@ -56,7 +58,7 @@ func getTypeInfo(typ reflect.Type) (*typeInfo, error) { ...@@ -56,7 +58,7 @@ func getTypeInfo(typ reflect.Type) (*typeInfo, error) {
return tinfo, nil return tinfo, nil
} }
tinfo = &typeInfo{} tinfo = &typeInfo{}
if typ.Kind() == reflect.Struct { if typ.Kind() == reflect.Struct && typ != nameType {
n := typ.NumField() n := typ.NumField()
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
f := typ.Field(i) f := typ.Field(i)
......
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package p package p
func _() { func _() {
......
...@@ -83,14 +83,15 @@ func TestInotifyClose(t *testing.T) { ...@@ -83,14 +83,15 @@ func TestInotifyClose(t *testing.T) {
watcher, _ := NewWatcher() watcher, _ := NewWatcher()
watcher.Close() watcher.Close()
done := false done := make(chan bool)
go func() { go func() {
watcher.Close() watcher.Close()
done = true done <- true
}() }()
time.Sleep(50 * time.Millisecond) select {
if !done { case <-done:
case <-time.After(50 * time.Millisecond):
t.Fatal("double Close() test failed: second Close() call didn't return") t.Fatal("double Close() test failed: second Close() call didn't return")
} }
......
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm package norm
import ( import (
......
...@@ -11,7 +11,6 @@ import ( ...@@ -11,7 +11,6 @@ import (
"net" "net"
"net/url" "net/url"
"os" "os"
"strings"
) )
// A Dialer is a means to establish a connection. // A Dialer is a means to establish a connection.
...@@ -70,14 +69,11 @@ func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) ...@@ -70,14 +69,11 @@ func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error))
// Dialer for it to make network requests. // Dialer for it to make network requests.
func FromURL(u *url.URL, forward Dialer) (Dialer, error) { func FromURL(u *url.URL, forward Dialer) (Dialer, error) {
var auth *Auth var auth *Auth
if len(u.RawUserinfo) > 0 { if u.User != nil {
auth = new(Auth) auth = new(Auth)
parts := strings.SplitN(u.RawUserinfo, ":", 1) auth.User = u.User.Username()
if len(parts) == 1 { if p, ok := u.User.Password(); ok {
auth.User = parts[0] auth.Password = p
} else if len(parts) >= 2 {
auth.User = parts[0]
auth.Password = parts[1]
} }
} }
......
...@@ -306,9 +306,8 @@ type clientChan struct { ...@@ -306,9 +306,8 @@ type clientChan struct {
stdout *chanReader // receives the payload of channelData messages stdout *chanReader // receives the payload of channelData messages
stderr *chanReader // receives the payload of channelExtendedData messages stderr *chanReader // receives the payload of channelExtendedData messages
msg chan interface{} // incoming messages msg chan interface{} // incoming messages
theyClosed bool // indicates the close msg has been received from the remote side
theyClosed bool // indicates the close msg has been received from the remote side weClosed bool // incidates the close msg has been sent from our side
weClosed bool // incidates the close msg has been sent from our side
} }
// newClientChan returns a partially constructed *clientChan // newClientChan returns a partially constructed *clientChan
......
...@@ -484,6 +484,26 @@ func intLength(n *big.Int) int { ...@@ -484,6 +484,26 @@ func intLength(n *big.Int) int {
return length return length
} }
func marshalUint32(to []byte, n uint32) []byte {
to[0] = byte(n >> 24)
to[1] = byte(n >> 16)
to[2] = byte(n >> 8)
to[3] = byte(n)
return to[4:]
}
func marshalUint64(to []byte, n uint64) []byte {
to[0] = byte(n >> 56)
to[1] = byte(n >> 48)
to[2] = byte(n >> 40)
to[3] = byte(n >> 32)
to[4] = byte(n >> 24)
to[5] = byte(n >> 16)
to[6] = byte(n >> 8)
to[7] = byte(n)
return to[8:]
}
func marshalInt(to []byte, n *big.Int) []byte { func marshalInt(to []byte, n *big.Int) []byte {
lengthBytes := to lengthBytes := to
to = to[4:] to = to[4:]
......
...@@ -70,7 +70,7 @@ type Session struct { ...@@ -70,7 +70,7 @@ type Session struct {
started bool // true once Start, Run or Shell is invoked. started bool // true once Start, Run or Shell is invoked.
copyFuncs []func() error copyFuncs []func() error
errch chan error // one send per copyFunc errors chan error // one send per copyFunc
// true if pipe method is active // true if pipe method is active
stdinpipe, stdoutpipe, stderrpipe bool stdinpipe, stdoutpipe, stderrpipe bool
...@@ -244,10 +244,10 @@ func (s *Session) start() error { ...@@ -244,10 +244,10 @@ func (s *Session) start() error {
setupFd(s) setupFd(s)
} }
s.errch = make(chan error, len(s.copyFuncs)) s.errors = make(chan error, len(s.copyFuncs))
for _, fn := range s.copyFuncs { for _, fn := range s.copyFuncs {
go func(fn func() error) { go func(fn func() error) {
s.errch <- fn() s.errors <- fn()
}(fn) }(fn)
} }
return nil return nil
...@@ -270,7 +270,7 @@ func (s *Session) Wait() error { ...@@ -270,7 +270,7 @@ func (s *Session) Wait() error {
var copyError error var copyError error
for _ = range s.copyFuncs { for _ = range s.copyFuncs {
if err := <-s.errch; err != nil && copyError == nil { if err := <-s.errors; err != nil && copyError == nil {
copyError = err copyError = err
} }
} }
......
...@@ -9,6 +9,7 @@ import ( ...@@ -9,6 +9,7 @@ import (
"fmt" "fmt"
"io" "io"
"net" "net"
"time"
) )
// Dial initiates a connection to the addr from the remote host. // Dial initiates a connection to the addr from the remote host.
...@@ -107,27 +108,25 @@ func (t *tcpchanconn) RemoteAddr() net.Addr { ...@@ -107,27 +108,25 @@ func (t *tcpchanconn) RemoteAddr() net.Addr {
return t.raddr return t.raddr
} }
// SetTimeout sets the read and write deadlines associated // SetDeadline sets the read and write deadlines associated
// with the connection. // with the connection.
func (t *tcpchanconn) SetTimeout(nsec int64) error { func (t *tcpchanconn) SetDeadline(deadline time.Time) error {
if err := t.SetReadTimeout(nsec); err != nil { if err := t.SetReadDeadline(deadline); err != nil {
return err return err
} }
return t.SetWriteTimeout(nsec) return t.SetWriteDeadline(deadline)
} }
// SetReadTimeout sets the time (in nanoseconds) that // SetReadDeadline sets the read deadline.
// Read will wait for data before returning an error with Timeout() == true. // A zero value for t means Read will not time out.
// Setting nsec == 0 (the default) disables the deadline. // After the deadline, the error from Read will implement net.Error
func (t *tcpchanconn) SetReadTimeout(nsec int64) error { // with Timeout() == true.
return errors.New("ssh: tcpchan: timeout not supported") func (t *tcpchanconn) SetReadDeadline(deadline time.Time) error {
return errors.New("ssh: tcpchan: deadline not supported")
} }
// SetWriteTimeout sets the time (in nanoseconds) that // SetWriteDeadline exists to satisfy the net.Conn interface
// Write will wait to send its data before returning an error with Timeout() == true. // but is not implemented by this type. It always returns an error.
// Setting nsec == 0 (the default) disables the deadline. func (t *tcpchanconn) SetWriteDeadline(deadline time.Time) error {
// Even if write times out, it may return n > 0, indicating that return errors.New("ssh: tcpchan: deadline not supported")
// some of the data was successfully written.
func (t *tcpchanconn) SetWriteTimeout(nsec int64) error {
return errors.New("ssh: tcpchan: timeout not supported")
} }
...@@ -9,6 +9,7 @@ import ( ...@@ -9,6 +9,7 @@ import (
"crypto" "crypto"
"crypto/cipher" "crypto/cipher"
"crypto/hmac" "crypto/hmac"
"crypto/sha1"
"crypto/subtle" "crypto/subtle"
"errors" "errors"
"hash" "hash"
...@@ -266,7 +267,7 @@ func (c *common) setupKeys(d direction, K, H, sessionId []byte, hashFunc crypto. ...@@ -266,7 +267,7 @@ func (c *common) setupKeys(d direction, K, H, sessionId []byte, hashFunc crypto.
generateKeyMaterial(key, d.keyTag, K, H, sessionId, h) generateKeyMaterial(key, d.keyTag, K, H, sessionId, h)
generateKeyMaterial(macKey, d.macKeyTag, K, H, sessionId, h) generateKeyMaterial(macKey, d.macKeyTag, K, H, sessionId, h)
c.mac = truncatingMAC{12, hmac.NewSHA1(macKey)} c.mac = truncatingMAC{12, hmac.New(sha1.New, macKey)}
cipher, err := cipherMode.createCipher(key, iv) cipher, err := cipherMode.createCipher(key, iv)
if err != nil { if err != nil {
...@@ -328,6 +329,8 @@ func (t truncatingMAC) Size() int { ...@@ -328,6 +329,8 @@ func (t truncatingMAC) Size() int {
return t.length return t.length
} }
func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() }
// maxVersionStringBytes is the maximum number of bytes that we'll accept as a // maxVersionStringBytes is the maximum number of bytes that we'll accept as a
// version string. In the event that the client is talking a different protocol // version string. In the event that the client is talking a different protocol
// we need to set a limit otherwise we will keep using more and more memory // we need to set a limit otherwise we will keep using more and more memory
...@@ -337,7 +340,7 @@ const maxVersionStringBytes = 1024 ...@@ -337,7 +340,7 @@ const maxVersionStringBytes = 1024
// Read version string as specified by RFC 4253, section 4.2. // Read version string as specified by RFC 4253, section 4.2.
func readVersion(r io.Reader) ([]byte, error) { func readVersion(r io.Reader) ([]byte, error) {
versionString := make([]byte, 0, 64) versionString := make([]byte, 0, 64)
var ok, seenCR bool var ok bool
var buf [1]byte var buf [1]byte
forEachByte: forEachByte:
for len(versionString) < maxVersionStringBytes { for len(versionString) < maxVersionStringBytes {
...@@ -345,27 +348,22 @@ forEachByte: ...@@ -345,27 +348,22 @@ forEachByte:
if err != nil { if err != nil {
return nil, err return nil, err
} }
b := buf[0] // The RFC says that the version should be terminated with \r\n
// but several SSH servers actually only send a \n.
if !seenCR { if buf[0] == '\n' {
if b == '\r' { ok = true
seenCR = true break forEachByte
}
} else {
if b == '\n' {
ok = true
break forEachByte
} else {
seenCR = false
}
} }
versionString = append(versionString, b) versionString = append(versionString, buf[0])
} }
if !ok { if !ok {
return nil, errors.New("failed to read version string") return nil, errors.New("ssh: failed to read version string")
} }
// We need to remove the CR from versionString // There might be a '\r' on the end which we should remove.
return versionString[:len(versionString)-1], nil if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' {
versionString = versionString[:len(versionString)-1]
}
return versionString, nil
} }
...@@ -11,7 +11,7 @@ import ( ...@@ -11,7 +11,7 @@ import (
) )
func TestReadVersion(t *testing.T) { func TestReadVersion(t *testing.T) {
buf := []byte(serverVersion) buf := serverVersion
result, err := readVersion(bufio.NewReader(bytes.NewBuffer(buf))) result, err := readVersion(bufio.NewReader(bytes.NewBuffer(buf)))
if err != nil { if err != nil {
t.Errorf("readVersion didn't read version correctly: %s", err) t.Errorf("readVersion didn't read version correctly: %s", err)
...@@ -21,6 +21,20 @@ func TestReadVersion(t *testing.T) { ...@@ -21,6 +21,20 @@ func TestReadVersion(t *testing.T) {
} }
} }
func TestReadVersionWithJustLF(t *testing.T) {
var buf []byte
buf = append(buf, serverVersion...)
buf = buf[:len(buf)-1]
buf[len(buf)-1] = '\n'
result, err := readVersion(bufio.NewReader(bytes.NewBuffer(buf)))
if err != nil {
t.Error("readVersion failed to handle just a \n")
}
if !bytes.Equal(buf[:len(buf)-1], result) {
t.Errorf("version read did not match expected: got %x, want %x", result, buf[:len(buf)-1])
}
}
func TestReadVersionTooLong(t *testing.T) { func TestReadVersionTooLong(t *testing.T) {
buf := make([]byte, maxVersionStringBytes+1) buf := make([]byte, maxVersionStringBytes+1)
if _, err := readVersion(bufio.NewReader(bytes.NewBuffer(buf))); err == nil { if _, err := readVersion(bufio.NewReader(bytes.NewBuffer(buf))); err == nil {
...@@ -29,7 +43,7 @@ func TestReadVersionTooLong(t *testing.T) { ...@@ -29,7 +43,7 @@ func TestReadVersionTooLong(t *testing.T) {
} }
func TestReadVersionWithoutCRLF(t *testing.T) { func TestReadVersionWithoutCRLF(t *testing.T) {
buf := []byte(serverVersion) buf := serverVersion
buf = buf[:len(buf)-1] buf = buf[:len(buf)-1]
if _, err := readVersion(bufio.NewReader(bytes.NewBuffer(buf))); err == nil { if _, err := readVersion(bufio.NewReader(bytes.NewBuffer(buf))); err == nil {
t.Error("readVersion did not notice \\n was missing") t.Error("readVersion did not notice \\n was missing")
......
...@@ -47,17 +47,17 @@ var tests = []struct { ...@@ -47,17 +47,17 @@ var tests = []struct {
var fset = token.NewFileSet() var fset = token.NewFileSet()
// TODO(gri) This functionality should be in token.Fileset. func getFile(filename string) (file *token.File) {
func getFile(filename string) *token.File { fset.Iterate(func(f *token.File) bool {
for f := range fset.Files() {
if f.Name() == filename { if f.Name() == filename {
return f file = f
return false // end iteration
} }
} return true
return nil })
return file
} }
// TODO(gri) This functionality should be in token.Fileset.
func getPos(filename string, offset int) token.Pos { func getPos(filename string, offset int) token.Pos {
if f := getFile(filename); f != nil { if f := getFile(filename); f != nil {
return f.Pos(offset) return f.Pos(offset)
...@@ -65,8 +65,6 @@ func getPos(filename string, offset int) token.Pos { ...@@ -65,8 +65,6 @@ func getPos(filename string, offset int) token.Pos {
return token.NoPos return token.NoPos
} }
// TODO(gri) Need to revisit parser interface. We should be able to use parser.ParseFiles
// or a similar function instead.
func parseFiles(t *testing.T, testname string, filenames []string) (map[string]*ast.File, error) { func parseFiles(t *testing.T, testname string, filenames []string) (map[string]*ast.File, error) {
files := make(map[string]*ast.File) files := make(map[string]*ast.File)
var errors scanner.ErrorList var errors scanner.ErrorList
...@@ -145,8 +143,6 @@ func eliminate(t *testing.T, expected map[token.Pos]string, errors error) { ...@@ -145,8 +143,6 @@ func eliminate(t *testing.T, expected map[token.Pos]string, errors error) {
for _, error := range errors.(scanner.ErrorList) { for _, error := range errors.(scanner.ErrorList) {
// error.Pos is a token.Position, but we want // error.Pos is a token.Position, but we want
// a token.Pos so we can do a map lookup // a token.Pos so we can do a map lookup
// TODO(gri) Need to move scanner.Errors over
// to use token.Pos and file set info.
pos := getPos(error.Pos.Filename, error.Pos.Offset) pos := getPos(error.Pos.Filename, error.Pos.Offset)
if msg, found := expected[pos]; found { if msg, found := expected[pos]; found {
// we expect a message at pos; check if it matches // we expect a message at pos; check if it matches
......
...@@ -460,29 +460,32 @@ func (p *gcParser) parseSignature() *Func { ...@@ -460,29 +460,32 @@ func (p *gcParser) parseSignature() *Func {
return &Func{Params: params, Results: results, IsVariadic: isVariadic} return &Func{Params: params, Results: results, IsVariadic: isVariadic}
} }
// MethodSpec = ( identifier | ExportedName ) Signature . // MethodOrEmbedSpec = Name [ Signature ] .
// //
func (p *gcParser) parseMethodSpec() *ast.Object { func (p *gcParser) parseMethodOrEmbedSpec() *ast.Object {
if p.tok == scanner.Ident { p.parseName()
p.expect(scanner.Ident) if p.tok == '(' {
} else { p.parseSignature()
p.parseExportedName() // TODO(gri) compute method object
return ast.NewObj(ast.Fun, "_")
} }
p.parseSignature() // TODO lookup name and return that type
return ast.NewObj(ast.Typ, "_")
// TODO(gri) compute method object
return ast.NewObj(ast.Fun, "_")
} }
// InterfaceType = "interface" "{" [ MethodList ] "}" . // InterfaceType = "interface" "{" [ MethodOrEmbedList ] "}" .
// MethodList = MethodSpec { ";" MethodSpec } . // MethodOrEmbedList = MethodOrEmbedSpec { ";" MethodOrEmbedSpec } .
// //
func (p *gcParser) parseInterfaceType() Type { func (p *gcParser) parseInterfaceType() Type {
var methods ObjList var methods ObjList
parseMethod := func() { parseMethod := func() {
meth := p.parseMethodSpec() switch m := p.parseMethodOrEmbedSpec(); m.Kind {
methods = append(methods, meth) case ast.Typ:
// TODO expand embedded methods
case ast.Fun:
methods = append(methods, m)
}
} }
p.expectKeyword("interface") p.expectKeyword("interface")
......
...@@ -2,9 +2,13 @@ ...@@ -2,9 +2,13 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package utf8 // Package utf8string provides an efficient way to index strings by rune rather than by byte.
package utf8string
import "errors" import (
"errors"
"unicode/utf8"
)
// String wraps a regular string with a small structure that provides more // String wraps a regular string with a small structure that provides more
// efficient indexing by code point index, as opposed to byte index. // efficient indexing by code point index, as opposed to byte index.
...@@ -37,10 +41,10 @@ func (s *String) Init(contents string) *String { ...@@ -37,10 +41,10 @@ func (s *String) Init(contents string) *String {
s.bytePos = 0 s.bytePos = 0
s.runePos = 0 s.runePos = 0
for i := 0; i < len(contents); i++ { for i := 0; i < len(contents); i++ {
if contents[i] >= RuneSelf { if contents[i] >= utf8.RuneSelf {
// Not ASCII. // Not ASCII.
s.numRunes = RuneCountInString(contents) s.numRunes = utf8.RuneCountInString(contents)
_, s.width = DecodeRuneInString(contents) _, s.width = utf8.DecodeRuneInString(contents)
s.nonASCII = i s.nonASCII = i
return s return s
} }
...@@ -121,7 +125,7 @@ func (s *String) At(i int) rune { ...@@ -121,7 +125,7 @@ func (s *String) At(i int) rune {
switch { switch {
case i == s.runePos-1: // backing up one rune case i == s.runePos-1: // backing up one rune
r, s.width = DecodeLastRuneInString(s.str[0:s.bytePos]) r, s.width = utf8.DecodeLastRuneInString(s.str[0:s.bytePos])
s.runePos = i s.runePos = i
s.bytePos -= s.width s.bytePos -= s.width
return r return r
...@@ -130,16 +134,16 @@ func (s *String) At(i int) rune { ...@@ -130,16 +134,16 @@ func (s *String) At(i int) rune {
s.bytePos += s.width s.bytePos += s.width
fallthrough fallthrough
case i == s.runePos: case i == s.runePos:
r, s.width = DecodeRuneInString(s.str[s.bytePos:]) r, s.width = utf8.DecodeRuneInString(s.str[s.bytePos:])
return r return r
case i == 0: // start of string case i == 0: // start of string
r, s.width = DecodeRuneInString(s.str) r, s.width = utf8.DecodeRuneInString(s.str)
s.runePos = 0 s.runePos = 0
s.bytePos = 0 s.bytePos = 0
return r return r
case i == s.numRunes-1: // last rune in string case i == s.numRunes-1: // last rune in string
r, s.width = DecodeLastRuneInString(s.str) r, s.width = utf8.DecodeLastRuneInString(s.str)
s.runePos = i s.runePos = i
s.bytePos = len(s.str) - s.width s.bytePos = len(s.str) - s.width
return r return r
...@@ -175,7 +179,7 @@ func (s *String) At(i int) rune { ...@@ -175,7 +179,7 @@ func (s *String) At(i int) rune {
if forward { if forward {
// TODO: Is it much faster to use a range loop for this scan? // TODO: Is it much faster to use a range loop for this scan?
for { for {
r, s.width = DecodeRuneInString(s.str[s.bytePos:]) r, s.width = utf8.DecodeRuneInString(s.str[s.bytePos:])
if s.runePos == i { if s.runePos == i {
break break
} }
...@@ -184,7 +188,7 @@ func (s *String) At(i int) rune { ...@@ -184,7 +188,7 @@ func (s *String) At(i int) rune {
} }
} else { } else {
for { for {
r, s.width = DecodeLastRuneInString(s.str[0:s.bytePos]) r, s.width = utf8.DecodeLastRuneInString(s.str[0:s.bytePos])
s.runePos-- s.runePos--
s.bytePos -= s.width s.bytePos -= s.width
if s.runePos == i { if s.runePos == i {
......
...@@ -2,14 +2,23 @@ ...@@ -2,14 +2,23 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package utf8_test package utf8string
import ( import (
"math/rand" "math/rand"
"testing" "testing"
. "unicode/utf8" "unicode/utf8"
) )
var testStrings = []string{
"",
"abcd",
"☺☻☹",
"日a本b語ç日ð本Ê語þ日¥本¼語i日©",
"日a本b語ç日ð本Ê語þ日¥本¼語i日©日a本b語ç日ð本Ê語þ日¥本¼語i日©日a本b語ç日ð本Ê語þ日¥本¼語i日©",
"\x80\x80\x80\x80",
}
func TestScanForwards(t *testing.T) { func TestScanForwards(t *testing.T) {
for _, s := range testStrings { for _, s := range testStrings {
runes := []rune(s) runes := []rune(s)
...@@ -106,7 +115,7 @@ func TestLimitSliceAccess(t *testing.T) { ...@@ -106,7 +115,7 @@ func TestLimitSliceAccess(t *testing.T) {
if str.Slice(0, 0) != "" { if str.Slice(0, 0) != "" {
t.Error("failure with empty slice at beginning") t.Error("failure with empty slice at beginning")
} }
nr := RuneCountInString(s) nr := utf8.RuneCountInString(s)
if str.Slice(nr, nr) != "" { if str.Slice(nr, nr) != "" {
t.Error("failure with empty slice at end") t.Error("failure with empty slice at end")
} }
......
...@@ -508,27 +508,28 @@ func BenchmarkSprintfFloat(b *testing.B) { ...@@ -508,27 +508,28 @@ func BenchmarkSprintfFloat(b *testing.B) {
var mallocBuf bytes.Buffer var mallocBuf bytes.Buffer
// gccgo numbers are different because gccgo does not have escape
// analysis yet.
var mallocTest = []struct { var mallocTest = []struct {
count int count int
desc string desc string
fn func() fn func()
}{ }{
{0, `Sprintf("")`, func() { Sprintf("") }}, {5, `Sprintf("")`, func() { Sprintf("") }},
{1, `Sprintf("xxx")`, func() { Sprintf("xxx") }}, {5, `Sprintf("xxx")`, func() { Sprintf("xxx") }},
{1, `Sprintf("%x")`, func() { Sprintf("%x", 7) }}, {5, `Sprintf("%x")`, func() { Sprintf("%x", 7) }},
{2, `Sprintf("%s")`, func() { Sprintf("%s", "hello") }}, {5, `Sprintf("%s")`, func() { Sprintf("%s", "hello") }},
{1, `Sprintf("%x %x")`, func() { Sprintf("%x %x", 7, 112) }}, {5, `Sprintf("%x %x")`, func() { Sprintf("%x %x", 7, 112) }},
{1, `Sprintf("%g")`, func() { Sprintf("%g", 3.14159) }}, // For %g we use a float32, not float64, to guarantee passing the argument
{0, `Fprintf(buf, "%x %x %x")`, func() { mallocBuf.Reset(); Fprintf(&mallocBuf, "%x %x %x", 7, 8, 9) }}, // does not need to allocate memory to store the result in a pointer-sized word.
{1, `Fprintf(buf, "%s")`, func() { mallocBuf.Reset(); Fprintf(&mallocBuf, "%s", "hello") }}, {20, `Sprintf("%g")`, func() { Sprintf("%g", float32(3.14159)) }},
{5, `Fprintf(buf, "%x %x %x")`, func() { mallocBuf.Reset(); Fprintf(&mallocBuf, "%x %x %x", 7, 8, 9) }},
{5, `Fprintf(buf, "%s")`, func() { mallocBuf.Reset(); Fprintf(&mallocBuf, "%s", "hello") }},
} }
var _ bytes.Buffer var _ bytes.Buffer
func TestCountMallocs(t *testing.T) { func TestCountMallocs(t *testing.T) {
if testing.Short() {
return
}
for _, mt := range mallocTest { for _, mt := range mallocTest {
const N = 100 const N = 100
runtime.UpdateMemStats() runtime.UpdateMemStats()
...@@ -538,7 +539,7 @@ func TestCountMallocs(t *testing.T) { ...@@ -538,7 +539,7 @@ func TestCountMallocs(t *testing.T) {
} }
runtime.UpdateMemStats() runtime.UpdateMemStats()
mallocs += runtime.MemStats.Mallocs mallocs += runtime.MemStats.Mallocs
if mallocs/N != uint64(mt.count) { if mallocs/N > uint64(mt.count) {
t.Errorf("%s: expected %d mallocs, got %d", mt.desc, mt.count, mallocs/N) t.Errorf("%s: expected %d mallocs, got %d", mt.desc, mt.count, mallocs/N)
} }
} }
......
...@@ -105,14 +105,14 @@ func FindTree(path string) (tree *Tree, pkg string, err error) { ...@@ -105,14 +105,14 @@ func FindTree(path string) (tree *Tree, pkg string, err error) {
continue continue
} }
tree = t tree = t
pkg = path[len(tpath):] pkg = filepath.ToSlash(path[len(tpath):])
return return
} }
err = fmt.Errorf("path %q not inside a GOPATH", path) err = fmt.Errorf("path %q not inside a GOPATH", path)
return return
} }
tree = defaultTree tree = defaultTree
pkg = path pkg = filepath.ToSlash(path)
for _, t := range Path { for _, t := range Path {
if t.HasSrc(pkg) { if t.HasSrc(pkg) {
tree = t tree = t
......
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pkgtest package pkgtest
import "fmt" import "fmt"
......
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pkgtest_test package pkgtest_test
import "pkgtest" import "pkgtest"
......
...@@ -68,7 +68,8 @@ var ( ...@@ -68,7 +68,8 @@ var (
html_endp = []byte("</p>\n") html_endp = []byte("</p>\n")
html_pre = []byte("<pre>") html_pre = []byte("<pre>")
html_endpre = []byte("</pre>\n") html_endpre = []byte("</pre>\n")
html_h = []byte("<h3>") html_h = []byte(`<h3 id="`)
html_hq = []byte(`">`)
html_endh = []byte("</h3>\n") html_endh = []byte("</h3>\n")
) )
...@@ -225,6 +226,12 @@ type block struct { ...@@ -225,6 +226,12 @@ type block struct {
lines []string lines []string
} }
var nonAlphaNumRx = regexp.MustCompile(`[^a-zA-Z0-9]`)
func anchorID(line string) string {
return nonAlphaNumRx.ReplaceAllString(line, "_")
}
// ToHTML converts comment text to formatted HTML. // ToHTML converts comment text to formatted HTML.
// The comment was prepared by DocReader, // The comment was prepared by DocReader,
// so it is known not to have leading, trailing blank lines // so it is known not to have leading, trailing blank lines
...@@ -253,9 +260,18 @@ func ToHTML(w io.Writer, text string, words map[string]string) { ...@@ -253,9 +260,18 @@ func ToHTML(w io.Writer, text string, words map[string]string) {
w.Write(html_endp) w.Write(html_endp)
case opHead: case opHead:
w.Write(html_h) w.Write(html_h)
id := ""
for _, line := range b.lines { for _, line := range b.lines {
if id == "" {
id = anchorID(line)
w.Write([]byte(id))
w.Write(html_hq)
}
commentEscape(w, line, true) commentEscape(w, line, true)
} }
if id == "" {
w.Write(html_hq)
}
w.Write(html_endh) w.Write(html_endh)
case opPre: case opPre:
w.Write(html_pre) w.Write(html_pre)
......
...@@ -15,7 +15,7 @@ type Package struct { ...@@ -15,7 +15,7 @@ type Package struct {
Doc string Doc string
Name string Name string
ImportPath string ImportPath string
Imports []string // TODO(gri) this field is not computed at the moment Imports []string
Filenames []string Filenames []string
Consts []*Value Consts []*Value
Types []*Type Types []*Type
...@@ -36,8 +36,8 @@ type Value struct { ...@@ -36,8 +36,8 @@ type Value struct {
type Method struct { type Method struct {
*Func *Func
// TODO(gri) The following fields are not set at the moment. // TODO(gri) The following fields are not set at the moment.
Recv *Type // original receiver base type Origin *Type // original receiver base type
Level int // embedding level; 0 means Func is not embedded Level int // embedding level; 0 means Func is not embedded
} }
// Type is the documentation for type declaration. // Type is the documentation for type declaration.
......
...@@ -6,132 +6,110 @@ package doc ...@@ -6,132 +6,110 @@ package doc
import ( import (
"bytes" "bytes"
"fmt" "flag"
"go/ast"
"go/parser" "go/parser"
"go/printer"
"go/token" "go/token"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing" "testing"
"text/template" "text/template"
) )
type sources map[string]string // filename -> file contents var update = flag.Bool("update", false, "update golden (.out) files")
type testCase struct { const dataDir = "testdata"
name string
importPath string var templateTxt = readTemplate("template.txt")
mode Mode
srcs sources func readTemplate(filename string) *template.Template {
doc string t := template.New(filename)
t.Funcs(template.FuncMap{
"node": nodeFmt,
"synopsis": synopsisFmt,
})
return template.Must(t.ParseFiles(filepath.Join(dataDir, filename)))
} }
var tests = make(map[string]*testCase) func nodeFmt(node interface{}, fset *token.FileSet) string {
var buf bytes.Buffer
// To register a new test case, use the pattern: printer.Fprint(&buf, fset, node)
// return strings.Replace(strings.TrimSpace(buf.String()), "\n", "\n\t", -1)
// var _ = register(&testCase{ ... })
//
// (The result value of register is always 0 and only present to enable the pattern.)
//
func register(test *testCase) int {
if _, found := tests[test.name]; found {
panic(fmt.Sprintf("registration failed: test case %q already exists", test.name))
}
tests[test.name] = test
return 0
} }
func runTest(t *testing.T, test *testCase) { func synopsisFmt(s string) string {
// create AST const n = 64
fset := token.NewFileSet() if len(s) > n {
var pkg ast.Package // cut off excess text and go back to a word boundary
pkg.Files = make(map[string]*ast.File) s = s[0:n]
for filename, src := range test.srcs { if i := strings.LastIndexAny(s, "\t\n "); i >= 0 {
file, err := parser.ParseFile(fset, filename, src, parser.ParseComments) s = s[0:i]
if err != nil {
t.Errorf("test %s: %v", test.name, err)
return
}
switch {
case pkg.Name == "":
pkg.Name = file.Name.Name
case pkg.Name != file.Name.Name:
t.Errorf("test %s: different package names in test files", test.name)
return
} }
pkg.Files[filename] = file s = strings.TrimSpace(s) + " ..."
} }
return "// " + strings.Replace(s, "\n", " ", -1)
}
doc := New(&pkg, test.importPath, test.mode).String() func isGoFile(fi os.FileInfo) bool {
if doc != test.doc { name := fi.Name()
//TODO(gri) Enable this once the sorting issue of comments is fixed return !fi.IsDir() &&
//t.Errorf("test %s\n\tgot : %s\n\twant: %s", test.name, doc, test.doc) len(name) > 0 && name[0] != '.' && // ignore .files
} filepath.Ext(name) == ".go"
}
type bundle struct {
*Package
FSet *token.FileSet
} }
func Test(t *testing.T) { func Test(t *testing.T) {
for _, test := range tests { // get all packages
runTest(t, test) fset := token.NewFileSet()
pkgs, err := parser.ParseDir(fset, dataDir, isGoFile, parser.ParseComments)
if err != nil {
t.Fatal(err)
} }
}
// ---------------------------------------------------------------------------- // test all packages
// Printing support for _, pkg := range pkgs {
importpath := dataDir + "/" + pkg.Name
doc := New(pkg, importpath, 0)
func (pkg *Package) String() string { // golden files always use / in filenames - canonicalize them
var buf bytes.Buffer for i, filename := range doc.Filenames {
docText.Execute(&buf, pkg) // ignore error - test will fail w/ incorrect output doc.Filenames[i] = filepath.ToSlash(filename)
return buf.String() }
}
// print documentation
var buf bytes.Buffer
if err := templateTxt.Execute(&buf, bundle{doc, fset}); err != nil {
t.Error(err)
continue
}
got := buf.Bytes()
// update golden file if necessary
golden := filepath.Join(dataDir, pkg.Name+".out")
if *update {
err := ioutil.WriteFile(golden, got, 0644)
if err != nil {
t.Error(err)
}
continue
}
// TODO(gri) complete template // get golden file
var docText = template.Must(template.New("docText").Parse( want, err := ioutil.ReadFile(golden)
` if err != nil {
PACKAGE {{.Name}} t.Error(err)
DOC {{printf "%q" .Doc}} continue
IMPORTPATH {{.ImportPath}} }
FILENAMES {{.Filenames}}
`)) // compare
if bytes.Compare(got, want) != 0 {
// ---------------------------------------------------------------------------- t.Errorf("package %s\n\tgot:\n%s\n\twant:\n%s", pkg.Name, got, want)
// Test cases }
}
// Test that all package comments and bugs are collected, }
// and that the importPath is correctly set.
//
var _ = register(&testCase{
name: "p",
importPath: "p",
srcs: sources{
"p1.go": "// comment 1\npackage p\n//BUG(uid): bug1",
"p0.go": "// comment 0\npackage p\n// BUG(uid): bug0",
},
doc: `
PACKAGE p
DOC "comment 0\n\ncomment 1\n"
IMPORTPATH p
FILENAMES [p0.go p1.go]
`,
})
// Test basic functionality.
//
var _ = register(&testCase{
name: "p1",
importPath: "p",
srcs: sources{
"p.go": `
package p
import "a"
const pi = 3.14 // pi
type T struct{} // T
var V T // v
func F(x int) int {} // F
`,
},
doc: `
PACKAGE p
DOC ""
IMPORTPATH p
FILENAMES [p.go]
`,
})
...@@ -124,6 +124,9 @@ func (doc *docReader) filterType(tinfo *typeInfo, typ ast.Expr) bool { ...@@ -124,6 +124,9 @@ func (doc *docReader) filterType(tinfo *typeInfo, typ ast.Expr) bool {
func (doc *docReader) filterSpec(spec ast.Spec) bool { func (doc *docReader) filterSpec(spec ast.Spec) bool {
switch s := spec.(type) { switch s := spec.(type) {
case *ast.ImportSpec:
// always keep imports so we can collect them
return true
case *ast.ValueSpec: case *ast.ValueSpec:
s.Names = filterIdentList(s.Names) s.Names = filterIdentList(s.Names)
if len(s.Names) > 0 { if len(s.Names) > 0 {
......
...@@ -9,6 +9,7 @@ import ( ...@@ -9,6 +9,7 @@ import (
"go/token" "go/token"
"regexp" "regexp"
"sort" "sort"
"strconv"
) )
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
...@@ -55,6 +56,7 @@ type docReader struct { ...@@ -55,6 +56,7 @@ type docReader struct {
doc *ast.CommentGroup // package documentation, if any doc *ast.CommentGroup // package documentation, if any
pkgName string pkgName string
mode Mode mode Mode
imports map[string]int
values []*ast.GenDecl // consts and vars values []*ast.GenDecl // consts and vars
types map[string]*typeInfo types map[string]*typeInfo
embedded map[string]*typeInfo // embedded types, possibly not exported embedded map[string]*typeInfo // embedded types, possibly not exported
...@@ -65,6 +67,7 @@ type docReader struct { ...@@ -65,6 +67,7 @@ type docReader struct {
func (doc *docReader) init(pkgName string, mode Mode) { func (doc *docReader) init(pkgName string, mode Mode) {
doc.pkgName = pkgName doc.pkgName = pkgName
doc.mode = mode doc.mode = mode
doc.imports = make(map[string]int)
doc.types = make(map[string]*typeInfo) doc.types = make(map[string]*typeInfo)
doc.embedded = make(map[string]*typeInfo) doc.embedded = make(map[string]*typeInfo)
doc.funcs = make(map[string]*ast.FuncDecl) doc.funcs = make(map[string]*ast.FuncDecl)
...@@ -244,6 +247,13 @@ func (doc *docReader) addDecl(decl ast.Decl) { ...@@ -244,6 +247,13 @@ func (doc *docReader) addDecl(decl ast.Decl) {
case *ast.GenDecl: case *ast.GenDecl:
if len(d.Specs) > 0 { if len(d.Specs) > 0 {
switch d.Tok { switch d.Tok {
case token.IMPORT:
// imports are handled individually
for _, spec := range d.Specs {
if import_, err := strconv.Unquote(spec.(*ast.ImportSpec).Path.Value); err == nil {
doc.imports[import_] = 1
}
}
case token.CONST, token.VAR: case token.CONST, token.VAR:
// constants and variables are always handled as a group // constants and variables are always handled as a group
doc.addValue(d) doc.addValue(d)
...@@ -346,6 +356,17 @@ func (doc *docReader) addFile(src *ast.File) { ...@@ -346,6 +356,17 @@ func (doc *docReader) addFile(src *ast.File) {
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Conversion to external representation // Conversion to external representation
func (doc *docReader) makeImports() []string {
list := make([]string, len(doc.imports))
i := 0
for import_ := range doc.imports {
list[i] = import_
i++
}
sort.Strings(list)
return list
}
type sortValue []*Value type sortValue []*Value
func (p sortValue) Len() int { return len(p) } func (p sortValue) Len() int { return len(p) }
...@@ -661,6 +682,7 @@ func (doc *docReader) newDoc(importpath string, filenames []string) *Package { ...@@ -661,6 +682,7 @@ func (doc *docReader) newDoc(importpath string, filenames []string) *Package {
// doc.funcs and thus must be called before any other // doc.funcs and thus must be called before any other
// function consuming those lists // function consuming those lists
p.Types = doc.makeTypes(doc.types) p.Types = doc.makeTypes(doc.types)
p.Imports = doc.makeImports()
p.Consts = makeValues(doc.values, token.CONST) p.Consts = makeValues(doc.values, token.CONST)
p.Vars = makeValues(doc.values, token.VAR) p.Vars = makeValues(doc.values, token.VAR)
p.Funcs = makeFuncs(doc.funcs) p.Funcs = makeFuncs(doc.funcs)
......
// comment 0 comment 1
PACKAGE a
IMPORTPATH
testdata/a
FILENAMES
testdata/a0.go
testdata/a1.go
BUGS
// bug0
// bug1
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// comment 0
package a
//BUG(uid): bug0
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// comment 1
package a
//BUG(uid): bug1
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package b
import "a"
const Pi = 3.14 // Pi
var MaxInt int // MaxInt
type T struct{} // T
var V T // v
func F(x int) int {} // F
//
PACKAGE b
IMPORTPATH
testdata/b
IMPORTS
a
FILENAMES
testdata/b.go
CONSTANTS
//
const Pi = 3.14 // Pi
VARIABLES
//
var MaxInt int // MaxInt
FUNCTIONS
//
func F(x int) int
TYPES
//
type T struct{} // T
//
var V T // v
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package testing
import (
"flag"
"fmt"
"os"
"runtime"
"time"
)
var matchBenchmarks = flag.String("test.bench", "", "regular expression to select benchmarks to run")
var benchTime = flag.Float64("test.benchtime", 1, "approximate run time for each benchmark, in seconds")
// An internal type but exported because it is cross-package; part of the implementation
// of gotest.
type InternalBenchmark struct {
Name string
F func(b *B)
}
// B is a type passed to Benchmark functions to manage benchmark
// timing and to specify the number of iterations to run.
type B struct {
common
N int
benchmark InternalBenchmark
bytes int64
timerOn bool
result BenchmarkResult
}
// StartTimer starts timing a test. This function is called automatically
// before a benchmark starts, but it can also used to resume timing after
// a call to StopTimer.
func (b *B) StartTimer() {
if !b.timerOn {
b.start = time.Now()
b.timerOn = true
}
}
// StopTimer stops timing a test. This can be used to pause the timer
// while performing complex initialization that you don't
// want to measure.
func (b *B) StopTimer() {
if b.timerOn {
b.duration += time.Now().Sub(b.start)
b.timerOn = false
}
}
// ResetTimer sets the elapsed benchmark time to zero.
// It does not affect whether the timer is running.
func (b *B) ResetTimer() {
if b.timerOn {
b.start = time.Now()
}
b.duration = 0
}
// SetBytes records the number of bytes processed in a single operation.
// If this is called, the benchmark will report ns/op and MB/s.
func (b *B) SetBytes(n int64) { b.bytes = n }
func (b *B) nsPerOp() int64 {
if b.N <= 0 {
return 0
}
return b.duration.Nanoseconds() / int64(b.N)
}
// runN runs a single benchmark for the specified number of iterations.
func (b *B) runN(n int) {
// Try to get a comparable environment for each run
// by clearing garbage from previous runs.
runtime.GC()
b.N = n
b.ResetTimer()
b.StartTimer()
b.benchmark.F(b)
b.StopTimer()
}
func min(x, y int) int {
if x > y {
return y
}
return x
}
func max(x, y int) int {
if x < y {
return y
}
return x
}
// roundDown10 rounds a number down to the nearest power of 10.
func roundDown10(n int) int {
var tens = 0
// tens = floor(log_10(n))
for n > 10 {
n = n / 10
tens++
}
// result = 10^tens
result := 1
for i := 0; i < tens; i++ {
result *= 10
}
return result
}
// roundUp rounds x up to a number of the form [1eX, 2eX, 5eX].
func roundUp(n int) int {
base := roundDown10(n)
if n < (2 * base) {
return 2 * base
}
if n < (5 * base) {
return 5 * base
}
return 10 * base
}
// run times the benchmark function in a separate goroutine.
func (b *B) run() BenchmarkResult {
go b.launch()
<-b.signal
return b.result
}
// launch launches the benchmark function. It gradually increases the number
// of benchmark iterations until the benchmark runs for a second in order
// to get a reasonable measurement. It prints timing information in this form
// testing.BenchmarkHello 100000 19 ns/op
// launch is run by the fun function as a separate goroutine.
func (b *B) launch() {
// Run the benchmark for a single iteration in case it's expensive.
n := 1
// Signal that we're done whether we return normally
// or by FailNow's runtime.Goexit.
defer func() {
b.signal <- b
}()
b.runN(n)
// Run the benchmark for at least the specified amount of time.
d := time.Duration(*benchTime * float64(time.Second))
for !b.failed && b.duration < d && n < 1e9 {
last := n
// Predict iterations/sec.
if b.nsPerOp() == 0 {
n = 1e9
} else {
n = int(d.Nanoseconds() / b.nsPerOp())
}
// Run more iterations than we think we'll need for a second (1.5x).
// Don't grow too fast in case we had timing errors previously.
// Be sure to run at least one more than last time.
n = max(min(n+n/2, 100*last), last+1)
// Round up to something easy to read.
n = roundUp(n)
b.runN(n)
}
b.result = BenchmarkResult{b.N, b.duration, b.bytes}
}
// The results of a benchmark run.
type BenchmarkResult struct {
N int // The number of iterations.
T time.Duration // The total time taken.
Bytes int64 // Bytes processed in one iteration.
}
func (r BenchmarkResult) NsPerOp() int64 {
if r.N <= 0 {
return 0
}
return r.T.Nanoseconds() / int64(r.N)
}
func (r BenchmarkResult) mbPerSec() float64 {
if r.Bytes <= 0 || r.T <= 0 || r.N <= 0 {
return 0
}
return (float64(r.Bytes) * float64(r.N) / 1e6) / r.T.Seconds()
}
func (r BenchmarkResult) String() string {
mbs := r.mbPerSec()
mb := ""
if mbs != 0 {
mb = fmt.Sprintf("\t%7.2f MB/s", mbs)
}
nsop := r.NsPerOp()
ns := fmt.Sprintf("%10d ns/op", nsop)
if r.N > 0 && nsop < 100 {
// The format specifiers here make sure that
// the ones digits line up for all three possible formats.
if nsop < 10 {
ns = fmt.Sprintf("%13.2f ns/op", float64(r.T.Nanoseconds())/float64(r.N))
} else {
ns = fmt.Sprintf("%12.1f ns/op", float64(r.T.Nanoseconds())/float64(r.N))
}
}
return fmt.Sprintf("%8d\t%s%s", r.N, ns, mb)
}
// An internal function but exported because it is cross-package; part of the implementation
// of gotest.
func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) {
// If no flag was specified, don't run benchmarks.
if len(*matchBenchmarks) == 0 {
return
}
for _, Benchmark := range benchmarks {
matched, err := matchString(*matchBenchmarks, Benchmark.Name)
if err != nil {
fmt.Fprintf(os.Stderr, "testing: invalid regexp for -test.bench: %s\n", err)
os.Exit(1)
}
if !matched {
continue
}
for _, procs := range cpuList {
runtime.GOMAXPROCS(procs)
b := &B{
common: common{
signal: make(chan interface{}),
},
benchmark: Benchmark,
}
benchName := Benchmark.Name
if procs != 1 {
benchName = fmt.Sprintf("%s-%d", Benchmark.Name, procs)
}
fmt.Printf("%s\t", benchName)
r := b.run()
if b.failed {
// The output could be very long here, but probably isn't.
// We print it all, regardless, because we don't want to trim the reason
// the benchmark failed.
fmt.Printf("--- FAIL: %s\n%s", benchName, b.output)
continue
}
fmt.Printf("%v\n", r)
// Unlike with tests, we ignore the -chatty flag and always print output for
// benchmarks since the output generation time will skew the results.
if len(b.output) > 0 {
b.trimOutput()
fmt.Printf("--- BENCH: %s\n%s", benchName, b.output)
}
if p := runtime.GOMAXPROCS(-1); p != procs {
fmt.Fprintf(os.Stderr, "testing: %s left GOMAXPROCS set to %d\n", benchName, p)
}
}
}
}
// trimOutput shortens the output from a benchmark, which can be very long.
func (b *B) trimOutput() {
// The output is likely to appear multiple times because the benchmark
// is run multiple times, but at least it will be seen. This is not a big deal
// because benchmarks rarely print, but just in case, we trim it if it's too long.
const maxNewlines = 10
for nlCount, j := 0, 0; j < len(b.output); j++ {
if b.output[j] == '\n' {
nlCount++
if nlCount >= maxNewlines {
b.output = append(b.output[:j], "\n\t... [output truncated]\n"...)
break
}
}
}
}
// Benchmark benchmarks a single function. Useful for creating
// custom benchmarks that do not use gotest.
func Benchmark(f func(b *B)) BenchmarkResult {
b := &B{
common: common{
signal: make(chan interface{}),
},
benchmark: InternalBenchmark{"", f},
}
return b.run()
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package testing
import (
"bytes"
"fmt"
"io"
"os"
"strings"
"time"
)
type InternalExample struct {
Name string
F func()
Output string
}
func RunExamples(examples []InternalExample) (ok bool) {
ok = true
var eg InternalExample
stdout, stderr := os.Stdout, os.Stderr
defer func() {
os.Stdout, os.Stderr = stdout, stderr
if e := recover(); e != nil {
fmt.Printf("--- FAIL: %s\npanic: %v\n", eg.Name, e)
os.Exit(1)
}
}()
for _, eg = range examples {
if *chatty {
fmt.Printf("=== RUN: %s\n", eg.Name)
}
// capture stdout and stderr
r, w, err := os.Pipe()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
os.Stdout, os.Stderr = w, w
outC := make(chan string)
go func() {
buf := new(bytes.Buffer)
_, err := io.Copy(buf, r)
if err != nil {
fmt.Fprintf(stderr, "testing: copying pipe: %v\n", err)
os.Exit(1)
}
outC <- buf.String()
}()
// run example
t0 := time.Now()
eg.F()
dt := time.Now().Sub(t0)
// close pipe, restore stdout/stderr, get output
w.Close()
os.Stdout, os.Stderr = stdout, stderr
out := <-outC
// report any errors
tstr := fmt.Sprintf("(%.2f seconds)", dt.Seconds())
if g, e := strings.TrimSpace(out), strings.TrimSpace(eg.Output); g != e {
fmt.Printf("--- FAIL: %s %s\ngot:\n%s\nwant:\n%s\n",
eg.Name, tstr, g, e)
ok = false
} else if *chatty {
fmt.Printf("--- PASS: %s %s\n", eg.Name, tstr)
}
}
return
}
{{synopsis .Doc}}
PACKAGE {{.Name}}
IMPORTPATH
{{.ImportPath}}
{{with .Imports}}IMPORTS
{{range .}} {{.}}
{{end}}
{{end}}{{/*
*/}}FILENAMES
{{range .Filenames}} {{.}}
{{end}}{{/*
*/}}{{with .Consts}}
CONSTANTS
{{range .}} {{synopsis .Doc}}
{{node .Decl $.FSet}}
{{end}}{{end}}{{/*
*/}}{{with .Vars}}
VARIABLES
{{range .}} {{synopsis .Doc}}
{{node .Decl $.FSet}}
{{end}}{{end}}{{/*
*/}}{{with .Funcs}}
FUNCTIONS
{{range .}} {{synopsis .Doc}}
{{node .Decl $.FSet}}
{{end}}{{end}}{{/*
*/}}{{with .Types}}
TYPES
{{range .}} {{synopsis .Doc}}
{{node .Decl $.FSet}}
{{range .Consts}} {{synopsis .Doc}}
{{node .Decl $.FSet}}
{{end}}{{/*
*/}}{{range .Vars}} {{synopsis .Doc}}
{{node .Decl $.FSet}}
{{end}}{{/*
*/}}{{range .Funcs}} {{synopsis .Doc}}
{{node .Decl $.FSet}}
{{end}}{{/*
*/}}{{range .Methods}} {{synopsis .Doc}}
{{node .Decl $.FSet}}
{{end}}{{end}}{{end}}{{/*
*/}}{{with .Bugs}}
BUGS
{{range .}} {{synopsis .}}
{{end}}{{end}}
\ No newline at end of file
// Package testing provides support for automated testing of Go ...
PACKAGE testing
IMPORTPATH
testdata/testing
IMPORTS
bytes
flag
fmt
io
os
runtime
runtime/pprof
strconv
strings
time
FILENAMES
testdata/benchmark.go
testdata/example.go
testdata/testing.go
FUNCTIONS
// An internal function but exported because it is cross-package; ...
func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample)
// An internal function but exported because it is cross-package; ...
func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark)
//
func RunExamples(examples []InternalExample) (ok bool)
//
func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool)
// Short reports whether the -test.short flag is set.
func Short() bool
TYPES
// B is a type passed to Benchmark functions to manage benchmark ...
type B struct {
N int
// contains filtered or unexported fields
}
// Error is equivalent to Log() followed by Fail().
func (c *B) Error(args ...interface{})
// Errorf is equivalent to Logf() followed by Fail().
func (c *B) Errorf(format string, args ...interface{})
// Fail marks the function as having failed but continues ...
func (c *B) Fail()
// FailNow marks the function as having failed and stops its ...
func (c *B) FailNow()
// Failed returns whether the function has failed.
func (c *B) Failed() bool
// Fatal is equivalent to Log() followed by FailNow().
func (c *B) Fatal(args ...interface{})
// Fatalf is equivalent to Logf() followed by FailNow().
func (c *B) Fatalf(format string, args ...interface{})
// Log formats its arguments using default formatting, analogous ...
func (c *B) Log(args ...interface{})
// Logf formats its arguments according to the format, analogous ...
func (c *B) Logf(format string, args ...interface{})
// ResetTimer sets the elapsed benchmark time to zero. It does not ...
func (b *B) ResetTimer()
// SetBytes records the number of bytes processed in a single ...
func (b *B) SetBytes(n int64)
// StartTimer starts timing a test. This function is called ...
func (b *B) StartTimer()
// StopTimer stops timing a test. This can be used to pause the ...
func (b *B) StopTimer()
// The results of a benchmark run.
type BenchmarkResult struct {
N int // The number of iterations.
T time.Duration // The total time taken.
Bytes int64 // Bytes processed in one iteration.
}
// Benchmark benchmarks a single function. Useful for creating ...
func Benchmark(f func(b *B)) BenchmarkResult
//
func (r BenchmarkResult) NsPerOp() int64
//
func (r BenchmarkResult) String() string
// An internal type but exported because it is cross-package; part ...
type InternalBenchmark struct {
Name string
F func(b *B)
}
//
type InternalExample struct {
Name string
F func()
Output string
}
// An internal type but exported because it is cross-package; part ...
type InternalTest struct {
Name string
F func(*T)
}
// T is a type passed to Test functions to manage test state and ...
type T struct {
// contains filtered or unexported fields
}
// Error is equivalent to Log() followed by Fail().
func (c *T) Error(args ...interface{})
// Errorf is equivalent to Logf() followed by Fail().
func (c *T) Errorf(format string, args ...interface{})
// Fail marks the function as having failed but continues ...
func (c *T) Fail()
// FailNow marks the function as having failed and stops its ...
func (c *T) FailNow()
// Failed returns whether the function has failed.
func (c *T) Failed() bool
// Fatal is equivalent to Log() followed by FailNow().
func (c *T) Fatal(args ...interface{})
// Fatalf is equivalent to Logf() followed by FailNow().
func (c *T) Fatalf(format string, args ...interface{})
// Log formats its arguments using default formatting, analogous ...
func (c *T) Log(args ...interface{})
// Logf formats its arguments according to the format, analogous ...
func (c *T) Logf(format string, args ...interface{})
// Parallel signals that this test is to be run in parallel with ...
func (t *T) Parallel()
...@@ -135,8 +135,10 @@ func ParseDir(fset *token.FileSet, path string, filter func(os.FileInfo) bool, m ...@@ -135,8 +135,10 @@ func ParseDir(fset *token.FileSet, path string, filter func(os.FileInfo) bool, m
// //
func ParseExpr(x string) (ast.Expr, error) { func ParseExpr(x string) (ast.Expr, error) {
// parse x within the context of a complete package for correct scopes; // parse x within the context of a complete package for correct scopes;
// use //line directive for correct positions in error messages // use //line directive for correct positions in error messages and put
file, err := ParseFile(token.NewFileSet(), "", "package p;func _(){_=\n//line :1\n"+x+";}", 0) // x alone on a separate line (handles line comments), followed by a ';'
// to force an error if the expression is incomplete
file, err := ParseFile(token.NewFileSet(), "", "package p;func _(){_=\n//line :1\n"+x+"\n;}", 0)
if err != nil { if err != nil {
return nil, err return nil, err
} }
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
// source which can then be tokenized through repeated calls to the Scan // source which can then be tokenized through repeated calls to the Scan
// function. Typical use: // function. Typical use:
// //
// var s Scanner // var s scanner.Scanner
// fset := token.NewFileSet() // position information is relative to fset // fset := token.NewFileSet() // position information is relative to fset
// file := fset.AddFile(filename, fset.Base(), len(src)) // register file // file := fset.AddFile(filename, fset.Base(), len(src)) // register file
// s.Init(file, src, nil /* no error handler */, 0) // s.Init(file, src, nil /* no error handler */, 0)
......
...@@ -12,6 +12,9 @@ import ( ...@@ -12,6 +12,9 @@ import (
"sync" "sync"
) )
// -----------------------------------------------------------------------------
// Positions
// Position describes an arbitrary source position // Position describes an arbitrary source position
// including the file, line, and column location. // including the file, line, and column location.
// A Position is valid if the line number is > 0. // A Position is valid if the line number is > 0.
...@@ -81,84 +84,8 @@ func (p Pos) IsValid() bool { ...@@ -81,84 +84,8 @@ func (p Pos) IsValid() bool {
return p != NoPos return p != NoPos
} }
func searchFiles(a []*File, x int) int { // -----------------------------------------------------------------------------
return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1 // File
}
func (s *FileSet) file(p Pos) *File {
// common case: p is in last file touched
if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size {
return f
}
// p is not in last file touched - search all files
if i := searchFiles(s.files, int(p)); i >= 0 {
f := s.files[i]
// f.base <= int(p) by definition of searchFiles
if int(p) <= f.base+f.size {
s.last = f
return f
}
}
return nil
}
// File returns the file which contains the position p.
// If no such file is found (for instance for p == NoPos),
// the result is nil.
//
func (s *FileSet) File(p Pos) (f *File) {
if p != NoPos {
s.mutex.RLock()
f = s.file(p)
s.mutex.RUnlock()
}
return
}
func (f *File) position(p Pos) (pos Position) {
offset := int(p) - f.base
pos.Offset = offset
pos.Filename, pos.Line, pos.Column = f.info(offset)
return
}
// Position converts a Pos in the fileset into a general Position.
func (s *FileSet) Position(p Pos) (pos Position) {
if p != NoPos {
s.mutex.RLock()
if f := s.file(p); f != nil {
pos = f.position(p)
}
s.mutex.RUnlock()
}
return
}
// A lineInfo object describes alternative file and line number
// information (such as provided via a //line comment in a .go
// file) for a given file offset.
type lineInfo struct {
// fields are exported to make them accessible to gob
Offset int
Filename string
Line int
}
// AddLineInfo adds alternative file and line number information for
// a given file offset. The offset must be larger than the offset for
// the previously added alternative line info and smaller than the
// file size; otherwise the information is ignored.
//
// AddLineInfo is typically used to register alternative position
// information for //line filename:line comments in source files.
//
func (f *File) AddLineInfo(offset int, filename string, line int) {
f.set.mutex.Lock()
if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size {
f.infos = append(f.infos, lineInfo{offset, filename, line})
}
f.set.mutex.Unlock()
}
// A File is a handle for a file belonging to a FileSet. // A File is a handle for a file belonging to a FileSet.
// A File has a name, size, and line offset table. // A File has a name, size, and line offset table.
...@@ -253,6 +180,32 @@ func (f *File) SetLinesForContent(content []byte) { ...@@ -253,6 +180,32 @@ func (f *File) SetLinesForContent(content []byte) {
f.set.mutex.Unlock() f.set.mutex.Unlock()
} }
// A lineInfo object describes alternative file and line number
// information (such as provided via a //line comment in a .go
// file) for a given file offset.
type lineInfo struct {
// fields are exported to make them accessible to gob
Offset int
Filename string
Line int
}
// AddLineInfo adds alternative file and line number information for
// a given file offset. The offset must be larger than the offset for
// the previously added alternative line info and smaller than the
// file size; otherwise the information is ignored.
//
// AddLineInfo is typically used to register alternative position
// information for //line filename:line comments in source files.
//
func (f *File) AddLineInfo(offset int, filename string, line int) {
f.set.mutex.Lock()
if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size {
f.infos = append(f.infos, lineInfo{offset, filename, line})
}
f.set.mutex.Unlock()
}
// Pos returns the Pos value for the given file offset; // Pos returns the Pos value for the given file offset;
// the offset must be <= f.Size(). // the offset must be <= f.Size().
// f.Pos(f.Offset(p)) == p. // f.Pos(f.Offset(p)) == p.
...@@ -283,41 +236,6 @@ func (f *File) Line(p Pos) int { ...@@ -283,41 +236,6 @@ func (f *File) Line(p Pos) int {
return f.Position(p).Line return f.Position(p).Line
} }
// Position returns the Position value for the given file position p;
// p must be a Pos value in that file or NoPos.
//
func (f *File) Position(p Pos) (pos Position) {
if p != NoPos {
if int(p) < f.base || int(p) > f.base+f.size {
panic("illegal Pos value")
}
pos = f.position(p)
}
return
}
func searchInts(a []int, x int) int {
// This function body is a manually inlined version of:
//
// return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1
//
// With better compiler optimizations, this may not be needed in the
// future, but at the moment this change improves the go/printer
// benchmark performance by ~30%. This has a direct impact on the
// speed of gofmt and thus seems worthwhile (2011-04-29).
i, j := 0, len(a)
for i < j {
h := i + (j-i)/2 // avoid overflow when computing h
// i ≤ h < j
if a[h] <= x {
i = h + 1
} else {
j = h
}
}
return i - 1
}
func searchLineInfos(a []lineInfo, x int) int { func searchLineInfos(a []lineInfo, x int) int {
return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1 return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1
} }
...@@ -341,6 +259,29 @@ func (f *File) info(offset int) (filename string, line, column int) { ...@@ -341,6 +259,29 @@ func (f *File) info(offset int) (filename string, line, column int) {
return return
} }
func (f *File) position(p Pos) (pos Position) {
offset := int(p) - f.base
pos.Offset = offset
pos.Filename, pos.Line, pos.Column = f.info(offset)
return
}
// Position returns the Position value for the given file position p;
// p must be a Pos value in that file or NoPos.
//
func (f *File) Position(p Pos) (pos Position) {
if p != NoPos {
if int(p) < f.base || int(p) > f.base+f.size {
panic("illegal Pos value")
}
pos = f.position(p)
}
return
}
// -----------------------------------------------------------------------------
// FileSet
// A FileSet represents a set of source files. // A FileSet represents a set of source files.
// Methods of file sets are synchronized; multiple goroutines // Methods of file sets are synchronized; multiple goroutines
// may invoke them concurrently. // may invoke them concurrently.
...@@ -404,23 +345,91 @@ func (s *FileSet) AddFile(filename string, base, size int) *File { ...@@ -404,23 +345,91 @@ func (s *FileSet) AddFile(filename string, base, size int) *File {
return f return f
} }
// Files returns the files added to the file set. // Iterate calls f for the files in the file set in the order they were added
func (s *FileSet) Files() <-chan *File { // until f returns false.
ch := make(chan *File) //
go func() { func (s *FileSet) Iterate(f func(*File) bool) {
for i := 0; ; i++ { for i := 0; ; i++ {
var f *File var file *File
s.mutex.RLock() s.mutex.RLock()
if i < len(s.files) { if i < len(s.files) {
f = s.files[i] file = s.files[i]
} }
s.mutex.RUnlock() s.mutex.RUnlock()
if f == nil { if file == nil || !f(file) {
break break
}
ch <- f
} }
close(ch) }
}() }
return ch
func searchFiles(a []*File, x int) int {
return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1
}
func (s *FileSet) file(p Pos) *File {
// common case: p is in last file
if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size {
return f
}
// p is not in last file - search all files
if i := searchFiles(s.files, int(p)); i >= 0 {
f := s.files[i]
// f.base <= int(p) by definition of searchFiles
if int(p) <= f.base+f.size {
s.last = f
return f
}
}
return nil
}
// File returns the file that contains the position p.
// If no such file is found (for instance for p == NoPos),
// the result is nil.
//
func (s *FileSet) File(p Pos) (f *File) {
if p != NoPos {
s.mutex.RLock()
f = s.file(p)
s.mutex.RUnlock()
}
return
}
// Position converts a Pos in the fileset into a general Position.
func (s *FileSet) Position(p Pos) (pos Position) {
if p != NoPos {
s.mutex.RLock()
if f := s.file(p); f != nil {
pos = f.position(p)
}
s.mutex.RUnlock()
}
return
}
// -----------------------------------------------------------------------------
// Helper functions
func searchInts(a []int, x int) int {
// This function body is a manually inlined version of:
//
// return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1
//
// With better compiler optimizations, this may not be needed in the
// future, but at the moment this change improves the go/printer
// benchmark performance by ~30%. This has a direct impact on the
// speed of gofmt and thus seems worthwhile (2011-04-29).
// TODO(gri): Remove this when compilers have caught up.
i, j := 0, len(a)
for i < j {
h := i + (j-i)/2 // avoid overflow when computing h
// i ≤ h < j
if a[h] <= x {
i = h + 1
} else {
j = h
}
}
return i - 1
} }
...@@ -167,12 +167,13 @@ func TestFiles(t *testing.T) { ...@@ -167,12 +167,13 @@ func TestFiles(t *testing.T) {
for i, test := range tests { for i, test := range tests {
fset.AddFile(test.filename, fset.Base(), test.size) fset.AddFile(test.filename, fset.Base(), test.size)
j := 0 j := 0
for g := range fset.Files() { fset.Iterate(func(f *File) bool {
if g.Name() != tests[j].filename { if f.Name() != tests[j].filename {
t.Errorf("expected filename = %s; got %s", tests[j].filename, g.Name()) t.Errorf("expected filename = %s; got %s", tests[j].filename, f.Name())
} }
j++ j++
} return true
})
if j != i+1 { if j != i+1 {
t.Errorf("expected %d files; got %d", i+1, j) t.Errorf("expected %d files; got %d", i+1, j)
} }
......
...@@ -38,6 +38,8 @@ func New() hash.Hash32 { ...@@ -38,6 +38,8 @@ func New() hash.Hash32 {
func (d *digest) Size() int { return Size } func (d *digest) Size() int { return Size }
func (d *digest) BlockSize() int { return 1 }
// Add p to the running checksum a, b. // Add p to the running checksum a, b.
func update(a, b uint32, p []byte) (aa, bb uint32) { func update(a, b uint32, p []byte) (aa, bb uint32) {
for _, pi := range p { for _, pi := range p {
......
...@@ -94,6 +94,8 @@ func NewIEEE() hash.Hash32 { return New(IEEETable) } ...@@ -94,6 +94,8 @@ func NewIEEE() hash.Hash32 { return New(IEEETable) }
func (d *digest) Size() int { return Size } func (d *digest) Size() int { return Size }
func (d *digest) BlockSize() int { return 1 }
func (d *digest) Reset() { d.crc = 0 } func (d *digest) Reset() { d.crc = 0 }
func update(crc uint32, tab *Table, p []byte) uint32 { func update(crc uint32, tab *Table, p []byte) uint32 {
......
...@@ -53,6 +53,8 @@ func New(tab *Table) hash.Hash64 { return &digest{0, tab} } ...@@ -53,6 +53,8 @@ func New(tab *Table) hash.Hash64 { return &digest{0, tab} }
func (d *digest) Size() int { return Size } func (d *digest) Size() int { return Size }
func (d *digest) BlockSize() int { return 1 }
func (d *digest) Reset() { d.crc = 0 } func (d *digest) Reset() { d.crc = 0 }
func update(crc uint64, tab *Table, p []byte) uint64 { func update(crc uint64, tab *Table, p []byte) uint64 {
......
...@@ -104,6 +104,11 @@ func (s *sum32a) Size() int { return 4 } ...@@ -104,6 +104,11 @@ func (s *sum32a) Size() int { return 4 }
func (s *sum64) Size() int { return 8 } func (s *sum64) Size() int { return 8 }
func (s *sum64a) Size() int { return 8 } func (s *sum64a) Size() int { return 8 }
func (s *sum32) BlockSize() int { return 1 }
func (s *sum32a) BlockSize() int { return 1 }
func (s *sum64) BlockSize() int { return 1 }
func (s *sum64a) BlockSize() int { return 1 }
func (s *sum32) Sum(in []byte) []byte { func (s *sum32) Sum(in []byte) []byte {
v := uint32(*s) v := uint32(*s)
in = append(in, byte(v>>24)) in = append(in, byte(v>>24))
......
...@@ -22,6 +22,12 @@ type Hash interface { ...@@ -22,6 +22,12 @@ type Hash interface {
// Size returns the number of bytes Sum will return. // Size returns the number of bytes Sum will return.
Size() int Size() int
// BlockSize returns the hash's underlying block size.
// The Write method must be able to accept any amount
// of data, but it may operate more efficiently if all writes
// are a multiple of the block size.
BlockSize() int
} }
// Hash32 is the common interface implemented by all 32-bit hash functions. // Hash32 is the common interface implemented by all 32-bit hash functions.
......
...@@ -1713,8 +1713,8 @@ func parseForeignContent(p *parser) bool { ...@@ -1713,8 +1713,8 @@ func parseForeignContent(p *parser) bool {
} }
if breakout[p.tok.Data] { if breakout[p.tok.Data] {
for i := len(p.oe) - 1; i >= 0; i-- { for i := len(p.oe) - 1; i >= 0; i-- {
// TODO: HTML, MathML integration points. // TODO: MathML integration points.
if p.oe[i].Namespace == "" { if p.oe[i].Namespace == "" || htmlIntegrationPoint(p.oe[i]) {
p.oe = p.oe[:i+1] p.oe = p.oe[:i+1]
break break
} }
......
...@@ -184,7 +184,7 @@ func TestParser(t *testing.T) { ...@@ -184,7 +184,7 @@ func TestParser(t *testing.T) {
{"tests4.dat", -1}, {"tests4.dat", -1},
{"tests5.dat", -1}, {"tests5.dat", -1},
{"tests6.dat", -1}, {"tests6.dat", -1},
{"tests10.dat", 33}, {"tests10.dat", 35},
} }
for _, tf := range testFiles { for _, tf := range testFiles {
f, err := os.Open("testdata/webkit/" + tf.filename) f, err := os.Open("testdata/webkit/" + tf.filename)
......
...@@ -31,8 +31,8 @@ Example ...@@ -31,8 +31,8 @@ Example
import "text/template" import "text/template"
... ...
t, err := (&template.Set{}).Parse(`{{define "T"}}Hello, {{.}}!{{end}}`) t, err := template.New("foo").Parse(`{{define "T"}}Hello, {{.}}!{{end}}`)
err = t.Execute(out, "T", "<script>alert('you have been pwned')</script>") err = t.ExecuteTemplate(out, "T", "<script>alert('you have been pwned')</script>")
produces produces
...@@ -42,12 +42,12 @@ but with contextual autoescaping, ...@@ -42,12 +42,12 @@ but with contextual autoescaping,
import "html/template" import "html/template"
... ...
t, err := (&template.Set{}).Parse(`{{define "T"}}Hello, {{.}}!{{end}}`) t, err := template.New("foo").Parse(`{{define "T"}}Hello, {{.}}!{{end}}`)
err = t.Execute(out, "T", "<script>alert('you have been pwned')</script>") err = t.ExecuteTemplate(out, "T", "<script>alert('you have been pwned')</script>")
produces safe, escaped HTML output produces safe, escaped HTML output
Hello, &lt;script&gt;alert('you have been pwned')&lt;/script&gt;! Hello, &lt;script&gt;alert(&#39;you have been pwned&#39;)&lt;/script&gt;!
Contexts Contexts
...@@ -57,8 +57,8 @@ functions to each simple action pipeline, so given the excerpt ...@@ -57,8 +57,8 @@ functions to each simple action pipeline, so given the excerpt
<a href="/search?q={{.}}">{{.}}</a> <a href="/search?q={{.}}">{{.}}</a>
At parse time each {{.}} is overwritten to add escaping functions as necessary, At parse time each {{.}} is overwritten to add escaping functions as necessary.
in this case, In this case it becomes
<a href="/search?q={{. | urlquery}}">{{. | html}}</a> <a href="/search?q={{. | urlquery}}">{{. | html}}</a>
......
...@@ -899,7 +899,7 @@ func TestErrors(t *testing.T) { ...@@ -899,7 +899,7 @@ func TestErrors(t *testing.T) {
}, },
{ {
`<a href="{{if .F}}/foo?a={{else}}/bar/{{end}}{{.H}}">`, `<a href="{{if .F}}/foo?a={{else}}/bar/{{end}}{{.H}}">`,
"z:1: (action: [(command: [F=[H]])]) appears in an ambiguous URL context", "z:1: {{.H}} appears in an ambiguous URL context",
}, },
{ {
`<a onclick="alert('Hello \`, `<a onclick="alert('Hello \`,
...@@ -1490,62 +1490,62 @@ func TestEnsurePipelineContains(t *testing.T) { ...@@ -1490,62 +1490,62 @@ func TestEnsurePipelineContains(t *testing.T) {
}{ }{
{ {
"{{.X}}", "{{.X}}",
"[(command: [F=[X]])]", ".X",
[]string{}, []string{},
}, },
{ {
"{{.X | html}}", "{{.X | html}}",
"[(command: [F=[X]]) (command: [I=html])]", ".X | html",
[]string{}, []string{},
}, },
{ {
"{{.X}}", "{{.X}}",
"[(command: [F=[X]]) (command: [I=html])]", ".X | html",
[]string{"html"}, []string{"html"},
}, },
{ {
"{{.X | html}}", "{{.X | html}}",
"[(command: [F=[X]]) (command: [I=html]) (command: [I=urlquery])]", ".X | html | urlquery",
[]string{"urlquery"}, []string{"urlquery"},
}, },
{ {
"{{.X | html | urlquery}}", "{{.X | html | urlquery}}",
"[(command: [F=[X]]) (command: [I=html]) (command: [I=urlquery])]", ".X | html | urlquery",
[]string{"urlquery"}, []string{"urlquery"},
}, },
{ {
"{{.X | html | urlquery}}", "{{.X | html | urlquery}}",
"[(command: [F=[X]]) (command: [I=html]) (command: [I=urlquery])]", ".X | html | urlquery",
[]string{"html", "urlquery"}, []string{"html", "urlquery"},
}, },
{ {
"{{.X | html | urlquery}}", "{{.X | html | urlquery}}",
"[(command: [F=[X]]) (command: [I=html]) (command: [I=urlquery])]", ".X | html | urlquery",
[]string{"html"}, []string{"html"},
}, },
{ {
"{{.X | urlquery}}", "{{.X | urlquery}}",
"[(command: [F=[X]]) (command: [I=html]) (command: [I=urlquery])]", ".X | html | urlquery",
[]string{"html", "urlquery"}, []string{"html", "urlquery"},
}, },
{ {
"{{.X | html | print}}", "{{.X | html | print}}",
"[(command: [F=[X]]) (command: [I=urlquery]) (command: [I=html]) (command: [I=print])]", ".X | urlquery | html | print",
[]string{"urlquery", "html"}, []string{"urlquery", "html"},
}, },
} }
for _, test := range tests { for i, test := range tests {
tmpl := template.Must(template.New("test").Parse(test.input)) tmpl := template.Must(template.New("test").Parse(test.input))
action, ok := (tmpl.Tree.Root.Nodes[0].(*parse.ActionNode)) action, ok := (tmpl.Tree.Root.Nodes[0].(*parse.ActionNode))
if !ok { if !ok {
t.Errorf("First node is not an action: %s", test.input) t.Errorf("#%d: First node is not an action: %s", i, test.input)
continue continue
} }
pipe := action.Pipe pipe := action.Pipe
ensurePipelineContains(pipe, test.ids) ensurePipelineContains(pipe, test.ids)
got := pipe.String() got := pipe.String()
if got != test.output { if got != test.output {
t.Errorf("%s, %v: want\n\t%s\ngot\n\t%s", test.input, test.ids, test.output, got) t.Errorf("#%d: %s, %v: want\n\t%s\ngot\n\t%s", i, test.input, test.ids, test.output, got)
} }
} }
} }
......
...@@ -152,26 +152,35 @@ func (m *modelFunc) Convert(c Color) Color { ...@@ -152,26 +152,35 @@ func (m *modelFunc) Convert(c Color) Color {
return m.f(c) return m.f(c)
} }
// RGBAModel is the Model for RGBA colors. // Models for the standard color types.
var RGBAModel Model = ModelFunc(func(c Color) Color { var (
RGBAModel Model = ModelFunc(rgbaModel)
RGBA64Model Model = ModelFunc(rgba64Model)
NRGBAModel Model = ModelFunc(nrgbaModel)
NRGBA64Model Model = ModelFunc(nrgba64Model)
AlphaModel Model = ModelFunc(alphaModel)
Alpha16Model Model = ModelFunc(alpha16Model)
GrayModel Model = ModelFunc(grayModel)
Gray16Model Model = ModelFunc(gray16Model)
)
func rgbaModel(c Color) Color {
if _, ok := c.(RGBA); ok { if _, ok := c.(RGBA); ok {
return c return c
} }
r, g, b, a := c.RGBA() r, g, b, a := c.RGBA()
return RGBA{uint8(r >> 8), uint8(g >> 8), uint8(b >> 8), uint8(a >> 8)} return RGBA{uint8(r >> 8), uint8(g >> 8), uint8(b >> 8), uint8(a >> 8)}
}) }
// RGBAModel is the Model for RGBA64 colors. func rgba64Model(c Color) Color {
var RGBA64Model Model = ModelFunc(func(c Color) Color {
if _, ok := c.(RGBA64); ok { if _, ok := c.(RGBA64); ok {
return c return c
} }
r, g, b, a := c.RGBA() r, g, b, a := c.RGBA()
return RGBA64{uint16(r), uint16(g), uint16(b), uint16(a)} return RGBA64{uint16(r), uint16(g), uint16(b), uint16(a)}
}) }
// NRGBAModel is the Model for NRGBA colors. func nrgbaModel(c Color) Color {
var NRGBAModel Model = ModelFunc(func(c Color) Color {
if _, ok := c.(NRGBA); ok { if _, ok := c.(NRGBA); ok {
return c return c
} }
...@@ -187,10 +196,9 @@ var NRGBAModel Model = ModelFunc(func(c Color) Color { ...@@ -187,10 +196,9 @@ var NRGBAModel Model = ModelFunc(func(c Color) Color {
g = (g * 0xffff) / a g = (g * 0xffff) / a
b = (b * 0xffff) / a b = (b * 0xffff) / a
return NRGBA{uint8(r >> 8), uint8(g >> 8), uint8(b >> 8), uint8(a >> 8)} return NRGBA{uint8(r >> 8), uint8(g >> 8), uint8(b >> 8), uint8(a >> 8)}
}) }
// NRGBAModel is the Model for NRGBA64 colors. func nrgba64Model(c Color) Color {
var NRGBA64Model Model = ModelFunc(func(c Color) Color {
if _, ok := c.(NRGBA64); ok { if _, ok := c.(NRGBA64); ok {
return c return c
} }
...@@ -206,45 +214,41 @@ var NRGBA64Model Model = ModelFunc(func(c Color) Color { ...@@ -206,45 +214,41 @@ var NRGBA64Model Model = ModelFunc(func(c Color) Color {
g = (g * 0xffff) / a g = (g * 0xffff) / a
b = (b * 0xffff) / a b = (b * 0xffff) / a
return NRGBA64{uint16(r), uint16(g), uint16(b), uint16(a)} return NRGBA64{uint16(r), uint16(g), uint16(b), uint16(a)}
}) }
// AlphaModel is the Model for Alpha colors. func alphaModel(c Color) Color {
var AlphaModel Model = ModelFunc(func(c Color) Color {
if _, ok := c.(Alpha); ok { if _, ok := c.(Alpha); ok {
return c return c
} }
_, _, _, a := c.RGBA() _, _, _, a := c.RGBA()
return Alpha{uint8(a >> 8)} return Alpha{uint8(a >> 8)}
}) }
// Alpha16Model is the Model for Alpha16 colors. func alpha16Model(c Color) Color {
var Alpha16Model Model = ModelFunc(func(c Color) Color {
if _, ok := c.(Alpha16); ok { if _, ok := c.(Alpha16); ok {
return c return c
} }
_, _, _, a := c.RGBA() _, _, _, a := c.RGBA()
return Alpha16{uint16(a)} return Alpha16{uint16(a)}
}) }
// GrayModel is the Model for Gray colors. func grayModel(c Color) Color {
var GrayModel Model = ModelFunc(func(c Color) Color {
if _, ok := c.(Gray); ok { if _, ok := c.(Gray); ok {
return c return c
} }
r, g, b, _ := c.RGBA() r, g, b, _ := c.RGBA()
y := (299*r + 587*g + 114*b + 500) / 1000 y := (299*r + 587*g + 114*b + 500) / 1000
return Gray{uint8(y >> 8)} return Gray{uint8(y >> 8)}
}) }
// Gray16Model is the Model for Gray16 colors. func gray16Model(c Color) Color {
var Gray16Model Model = ModelFunc(func(c Color) Color {
if _, ok := c.(Gray16); ok { if _, ok := c.(Gray16); ok {
return c return c
} }
r, g, b, _ := c.RGBA() r, g, b, _ := c.RGBA()
y := (299*r + 587*g + 114*b + 500) / 1000 y := (299*r + 587*g + 114*b + 500) / 1000
return Gray16{uint16(y)} return Gray16{uint16(y)}
}) }
// Palette is a palette of colors. // Palette is a palette of colors.
type Palette []Color type Palette []Color
...@@ -290,13 +294,10 @@ func (p Palette) Index(c Color) int { ...@@ -290,13 +294,10 @@ func (p Palette) Index(c Color) int {
return ret return ret
} }
// Standard colors.
var ( var (
// Black is an opaque black Color. Black = Gray16{0}
Black = Gray16{0} White = Gray16{0xffff}
// White is an opaque white Color.
White = Gray16{0xffff}
// Transparent is a fully transparent Color.
Transparent = Alpha16{0} Transparent = Alpha16{0}
// Opaque is a fully opaque Color. Opaque = Alpha16{0xffff}
Opaque = Alpha16{0xffff}
) )
...@@ -4,8 +4,7 @@ ...@@ -4,8 +4,7 @@
package color package color
// RGBToYCbCr converts an RGB triple to a Y'CbCr triple. All components lie // RGBToYCbCr converts an RGB triple to a Y'CbCr triple.
// within the range [0, 255].
func RGBToYCbCr(r, g, b uint8) (uint8, uint8, uint8) { func RGBToYCbCr(r, g, b uint8) (uint8, uint8, uint8) {
// The JFIF specification says: // The JFIF specification says:
// Y' = 0.2990*R + 0.5870*G + 0.1140*B // Y' = 0.2990*R + 0.5870*G + 0.1140*B
...@@ -36,8 +35,7 @@ func RGBToYCbCr(r, g, b uint8) (uint8, uint8, uint8) { ...@@ -36,8 +35,7 @@ func RGBToYCbCr(r, g, b uint8) (uint8, uint8, uint8) {
return uint8(yy), uint8(cb), uint8(cr) return uint8(yy), uint8(cb), uint8(cr)
} }
// YCbCrToRGB converts a Y'CbCr triple to an RGB triple. All components lie // YCbCrToRGB converts a Y'CbCr triple to an RGB triple.
// within the range [0, 255].
func YCbCrToRGB(y, cb, cr uint8) (uint8, uint8, uint8) { func YCbCrToRGB(y, cb, cr uint8) (uint8, uint8, uint8) {
// The JFIF specification says: // The JFIF specification says:
// R = Y' + 1.40200*(Cr-128) // R = Y' + 1.40200*(Cr-128)
...@@ -89,11 +87,13 @@ func (c YCbCr) RGBA() (uint32, uint32, uint32, uint32) { ...@@ -89,11 +87,13 @@ func (c YCbCr) RGBA() (uint32, uint32, uint32, uint32) {
} }
// YCbCrModel is the Model for Y'CbCr colors. // YCbCrModel is the Model for Y'CbCr colors.
var YCbCrModel Model = ModelFunc(func(c Color) Color { var YCbCrModel Model = ModelFunc(yCbCrModel)
func yCbCrModel(c Color) Color {
if _, ok := c.(YCbCr); ok { if _, ok := c.(YCbCr); ok {
return c return c
} }
r, g, b, _ := c.RGBA() r, g, b, _ := c.RGBA()
y, u, v := RGBToYCbCr(uint8(r>>8), uint8(g>>8), uint8(b>>8)) y, u, v := RGBToYCbCr(uint8(r>>8), uint8(g>>8), uint8(b>>8))
return YCbCr{y, u, v} return YCbCr{y, u, v}
}) }
...@@ -171,7 +171,7 @@ func drawFillOver(dst *image.RGBA, r image.Rectangle, src *image.Uniform) { ...@@ -171,7 +171,7 @@ func drawFillOver(dst *image.RGBA, r image.Rectangle, src *image.Uniform) {
sr, sg, sb, sa := src.RGBA() sr, sg, sb, sa := src.RGBA()
// The 0x101 is here for the same reason as in drawRGBA. // The 0x101 is here for the same reason as in drawRGBA.
a := (m - sa) * 0x101 a := (m - sa) * 0x101
i0 := (r.Min.Y-dst.Rect.Min.Y)*dst.Stride + (r.Min.X-dst.Rect.Min.X)*4 i0 := dst.PixOffset(r.Min.X, r.Min.Y)
i1 := i0 + r.Dx()*4 i1 := i0 + r.Dx()*4
for y := r.Min.Y; y != r.Max.Y; y++ { for y := r.Min.Y; y != r.Max.Y; y++ {
for i := i0; i < i1; i += 4 { for i := i0; i < i1; i += 4 {
...@@ -195,7 +195,7 @@ func drawFillSrc(dst *image.RGBA, r image.Rectangle, src *image.Uniform) { ...@@ -195,7 +195,7 @@ func drawFillSrc(dst *image.RGBA, r image.Rectangle, src *image.Uniform) {
// The built-in copy function is faster than a straightforward for loop to fill the destination with // The built-in copy function is faster than a straightforward for loop to fill the destination with
// the color, but copy requires a slice source. We therefore use a for loop to fill the first row, and // the color, but copy requires a slice source. We therefore use a for loop to fill the first row, and
// then use the first row as the slice source for the remaining rows. // then use the first row as the slice source for the remaining rows.
i0 := (r.Min.Y-dst.Rect.Min.Y)*dst.Stride + (r.Min.X-dst.Rect.Min.X)*4 i0 := dst.PixOffset(r.Min.X, r.Min.Y)
i1 := i0 + r.Dx()*4 i1 := i0 + r.Dx()*4
for i := i0; i < i1; i += 4 { for i := i0; i < i1; i += 4 {
dst.Pix[i+0] = uint8(sr >> 8) dst.Pix[i+0] = uint8(sr >> 8)
...@@ -213,8 +213,8 @@ func drawFillSrc(dst *image.RGBA, r image.Rectangle, src *image.Uniform) { ...@@ -213,8 +213,8 @@ func drawFillSrc(dst *image.RGBA, r image.Rectangle, src *image.Uniform) {
func drawCopyOver(dst *image.RGBA, r image.Rectangle, src *image.RGBA, sp image.Point) { func drawCopyOver(dst *image.RGBA, r image.Rectangle, src *image.RGBA, sp image.Point) {
dx, dy := r.Dx(), r.Dy() dx, dy := r.Dx(), r.Dy()
d0 := (r.Min.Y-dst.Rect.Min.Y)*dst.Stride + (r.Min.X-dst.Rect.Min.X)*4 d0 := dst.PixOffset(r.Min.X, r.Min.Y)
s0 := (sp.Y-src.Rect.Min.Y)*src.Stride + (sp.X-src.Rect.Min.X)*4 s0 := src.PixOffset(sp.X, sp.Y)
var ( var (
ddelta, sdelta int ddelta, sdelta int
i0, i1, idelta int i0, i1, idelta int
...@@ -261,8 +261,8 @@ func drawCopyOver(dst *image.RGBA, r image.Rectangle, src *image.RGBA, sp image. ...@@ -261,8 +261,8 @@ func drawCopyOver(dst *image.RGBA, r image.Rectangle, src *image.RGBA, sp image.
func drawCopySrc(dst *image.RGBA, r image.Rectangle, src *image.RGBA, sp image.Point) { func drawCopySrc(dst *image.RGBA, r image.Rectangle, src *image.RGBA, sp image.Point) {
n, dy := 4*r.Dx(), r.Dy() n, dy := 4*r.Dx(), r.Dy()
d0 := (r.Min.Y-dst.Rect.Min.Y)*dst.Stride + (r.Min.X-dst.Rect.Min.X)*4 d0 := dst.PixOffset(r.Min.X, r.Min.Y)
s0 := (sp.Y-src.Rect.Min.Y)*src.Stride + (sp.X-src.Rect.Min.X)*4 s0 := src.PixOffset(sp.X, sp.Y)
var ddelta, sdelta int var ddelta, sdelta int
if r.Min.Y <= sp.Y { if r.Min.Y <= sp.Y {
ddelta = dst.Stride ddelta = dst.Stride
...@@ -348,9 +348,6 @@ func drawNRGBASrc(dst *image.RGBA, r image.Rectangle, src *image.NRGBA, sp image ...@@ -348,9 +348,6 @@ func drawNRGBASrc(dst *image.RGBA, r image.Rectangle, src *image.NRGBA, sp image
func drawYCbCr(dst *image.RGBA, r image.Rectangle, src *image.YCbCr, sp image.Point) { func drawYCbCr(dst *image.RGBA, r image.Rectangle, src *image.YCbCr, sp image.Point) {
// An image.YCbCr is always fully opaque, and so if the mask is implicitly nil // An image.YCbCr is always fully opaque, and so if the mask is implicitly nil
// (i.e. fully opaque) then the op is effectively always Src. // (i.e. fully opaque) then the op is effectively always Src.
var (
yy, cb, cr uint8
)
x0 := (r.Min.X - dst.Rect.Min.X) * 4 x0 := (r.Min.X - dst.Rect.Min.X) * 4
x1 := (r.Max.X - dst.Rect.Min.X) * 4 x1 := (r.Max.X - dst.Rect.Min.X) * 4
y0 := r.Min.Y - dst.Rect.Min.Y y0 := r.Min.Y - dst.Rect.Min.Y
...@@ -359,12 +356,11 @@ func drawYCbCr(dst *image.RGBA, r image.Rectangle, src *image.YCbCr, sp image.Po ...@@ -359,12 +356,11 @@ func drawYCbCr(dst *image.RGBA, r image.Rectangle, src *image.YCbCr, sp image.Po
case image.YCbCrSubsampleRatio422: case image.YCbCrSubsampleRatio422:
for y, sy := y0, sp.Y; y != y1; y, sy = y+1, sy+1 { for y, sy := y0, sp.Y; y != y1; y, sy = y+1, sy+1 {
dpix := dst.Pix[y*dst.Stride:] dpix := dst.Pix[y*dst.Stride:]
for x, sx := x0, sp.X; x != x1; x, sx = x+4, sx+1 { yi := (sy-src.Rect.Min.Y)*src.YStride + (sp.X - src.Rect.Min.X)
i := sx / 2 ciBase := (sy-src.Rect.Min.Y)*src.CStride - src.Rect.Min.X/2
yy = src.Y[sy*src.YStride+sx] for x, sx := x0, sp.X; x != x1; x, sx, yi = x+4, sx+1, yi+1 {
cb = src.Cb[sy*src.CStride+i] ci := ciBase + sx/2
cr = src.Cr[sy*src.CStride+i] rr, gg, bb := color.YCbCrToRGB(src.Y[yi], src.Cb[ci], src.Cr[ci])
rr, gg, bb := color.YCbCrToRGB(yy, cb, cr)
dpix[x+0] = rr dpix[x+0] = rr
dpix[x+1] = gg dpix[x+1] = gg
dpix[x+2] = bb dpix[x+2] = bb
...@@ -374,12 +370,11 @@ func drawYCbCr(dst *image.RGBA, r image.Rectangle, src *image.YCbCr, sp image.Po ...@@ -374,12 +370,11 @@ func drawYCbCr(dst *image.RGBA, r image.Rectangle, src *image.YCbCr, sp image.Po
case image.YCbCrSubsampleRatio420: case image.YCbCrSubsampleRatio420:
for y, sy := y0, sp.Y; y != y1; y, sy = y+1, sy+1 { for y, sy := y0, sp.Y; y != y1; y, sy = y+1, sy+1 {
dpix := dst.Pix[y*dst.Stride:] dpix := dst.Pix[y*dst.Stride:]
for x, sx := x0, sp.X; x != x1; x, sx = x+4, sx+1 { yi := (sy-src.Rect.Min.Y)*src.YStride + (sp.X - src.Rect.Min.X)
i, j := sx/2, sy/2 ciBase := (sy/2-src.Rect.Min.Y/2)*src.CStride - src.Rect.Min.X/2
yy = src.Y[sy*src.YStride+sx] for x, sx := x0, sp.X; x != x1; x, sx, yi = x+4, sx+1, yi+1 {
cb = src.Cb[j*src.CStride+i] ci := ciBase + sx/2
cr = src.Cr[j*src.CStride+i] rr, gg, bb := color.YCbCrToRGB(src.Y[yi], src.Cb[ci], src.Cr[ci])
rr, gg, bb := color.YCbCrToRGB(yy, cb, cr)
dpix[x+0] = rr dpix[x+0] = rr
dpix[x+1] = gg dpix[x+1] = gg
dpix[x+2] = bb dpix[x+2] = bb
...@@ -390,11 +385,10 @@ func drawYCbCr(dst *image.RGBA, r image.Rectangle, src *image.YCbCr, sp image.Po ...@@ -390,11 +385,10 @@ func drawYCbCr(dst *image.RGBA, r image.Rectangle, src *image.YCbCr, sp image.Po
// Default to 4:4:4 subsampling. // Default to 4:4:4 subsampling.
for y, sy := y0, sp.Y; y != y1; y, sy = y+1, sy+1 { for y, sy := y0, sp.Y; y != y1; y, sy = y+1, sy+1 {
dpix := dst.Pix[y*dst.Stride:] dpix := dst.Pix[y*dst.Stride:]
for x, sx := x0, sp.X; x != x1; x, sx = x+4, sx+1 { yi := (sy-src.Rect.Min.Y)*src.YStride + (sp.X - src.Rect.Min.X)
yy = src.Y[sy*src.YStride+sx] ci := (sy-src.Rect.Min.Y)*src.CStride + (sp.X - src.Rect.Min.X)
cb = src.Cb[sy*src.CStride+sx] for x := x0; x != x1; x, yi, ci = x+4, yi+1, ci+1 {
cr = src.Cr[sy*src.CStride+sx] rr, gg, bb := color.YCbCrToRGB(src.Y[yi], src.Cb[ci], src.Cr[ci])
rr, gg, bb := color.YCbCrToRGB(yy, cb, cr)
dpix[x+0] = rr dpix[x+0] = rr
dpix[x+1] = gg dpix[x+1] = gg
dpix[x+2] = bb dpix[x+2] = bb
...@@ -405,9 +399,9 @@ func drawYCbCr(dst *image.RGBA, r image.Rectangle, src *image.YCbCr, sp image.Po ...@@ -405,9 +399,9 @@ func drawYCbCr(dst *image.RGBA, r image.Rectangle, src *image.YCbCr, sp image.Po
} }
func drawGlyphOver(dst *image.RGBA, r image.Rectangle, src *image.Uniform, mask *image.Alpha, mp image.Point) { func drawGlyphOver(dst *image.RGBA, r image.Rectangle, src *image.Uniform, mask *image.Alpha, mp image.Point) {
i0 := (r.Min.Y-dst.Rect.Min.Y)*dst.Stride + (r.Min.X-dst.Rect.Min.X)*4 i0 := dst.PixOffset(r.Min.X, r.Min.Y)
i1 := i0 + r.Dx()*4 i1 := i0 + r.Dx()*4
mi0 := (mp.Y-mask.Rect.Min.Y)*mask.Stride + mp.X - mask.Rect.Min.X mi0 := mask.PixOffset(mp.X, mp.Y)
sr, sg, sb, sa := src.RGBA() sr, sg, sb, sa := src.RGBA()
for y, my := r.Min.Y, mp.Y; y != r.Max.Y; y, my = y+1, my+1 { for y, my := r.Min.Y, mp.Y; y != r.Max.Y; y, my = y+1, my+1 {
for i, mi := i0, mi0; i < i1; i, mi = i+4, mi+1 { for i, mi := i0, mi0; i < i1; i, mi = i+4, mi+1 {
...@@ -451,7 +445,7 @@ func drawRGBA(dst *image.RGBA, r image.Rectangle, src image.Image, sp image.Poin ...@@ -451,7 +445,7 @@ func drawRGBA(dst *image.RGBA, r image.Rectangle, src image.Image, sp image.Poin
sx0 := sp.X + x0 - r.Min.X sx0 := sp.X + x0 - r.Min.X
mx0 := mp.X + x0 - r.Min.X mx0 := mp.X + x0 - r.Min.X
sx1 := sx0 + (x1 - x0) sx1 := sx0 + (x1 - x0)
i0 := (y0-dst.Rect.Min.Y)*dst.Stride + (x0-dst.Rect.Min.X)*4 i0 := dst.PixOffset(x0, y0)
di := dx * 4 di := dx * 4
for y := y0; y != y1; y, sy, my = y+dy, sy+dy, my+dy { for y := y0; y != y1; y, sy, my = y+dy, sy+dy, my+dy {
for i, sx, mx := i0, sx0, mx0; sx != sx1; i, sx, mx = i+di, sx+dx, mx+dx { for i, sx, mx := i0, sx0, mx0; sx != sx1; i, sx, mx = i+di, sx+dx, mx+dx {
......
...@@ -112,7 +112,7 @@ func (r Rectangle) Add(p Point) Rectangle { ...@@ -112,7 +112,7 @@ func (r Rectangle) Add(p Point) Rectangle {
} }
} }
// Add returns the rectangle r translated by -p. // Sub returns the rectangle r translated by -p.
func (r Rectangle) Sub(p Point) Rectangle { func (r Rectangle) Sub(p Point) Rectangle {
return Rectangle{ return Rectangle{
Point{r.Min.X - p.X, r.Min.Y - p.Y}, Point{r.Min.X - p.X, r.Min.Y - p.Y},
......
...@@ -203,8 +203,7 @@ func (d *decoder) makeImg(h0, v0, mxx, myy int) { ...@@ -203,8 +203,7 @@ func (d *decoder) makeImg(h0, v0, mxx, myy int) {
return return
} }
var subsampleRatio image.YCbCrSubsampleRatio var subsampleRatio image.YCbCrSubsampleRatio
n := h0 * v0 switch h0 * v0 {
switch n {
case 1: case 1:
subsampleRatio = image.YCbCrSubsampleRatio444 subsampleRatio = image.YCbCrSubsampleRatio444
case 2: case 2:
...@@ -214,16 +213,8 @@ func (d *decoder) makeImg(h0, v0, mxx, myy int) { ...@@ -214,16 +213,8 @@ func (d *decoder) makeImg(h0, v0, mxx, myy int) {
default: default:
panic("unreachable") panic("unreachable")
} }
b := make([]byte, mxx*myy*(1*8*8*n+2*8*8)) m := image.NewYCbCr(image.Rect(0, 0, 8*h0*mxx, 8*v0*myy), subsampleRatio)
d.img3 = &image.YCbCr{ d.img3 = m.SubImage(image.Rect(0, 0, d.width, d.height)).(*image.YCbCr)
Y: b[mxx*myy*(0*8*8*n+0*8*8) : mxx*myy*(1*8*8*n+0*8*8)],
Cb: b[mxx*myy*(1*8*8*n+0*8*8) : mxx*myy*(1*8*8*n+1*8*8)],
Cr: b[mxx*myy*(1*8*8*n+1*8*8) : mxx*myy*(1*8*8*n+2*8*8)],
SubsampleRatio: subsampleRatio,
YStride: mxx * 8 * h0,
CStride: mxx * 8,
Rect: image.Rect(0, 0, d.width, d.height),
}
} }
// Specified in section B.2.3. // Specified in section B.2.3.
......
...@@ -223,8 +223,8 @@ func (d *decoder) decode(dst image.Image, ymin, ymax int) error { ...@@ -223,8 +223,8 @@ func (d *decoder) decode(dst image.Image, ymin, ymax int) error {
} }
case mRGB: case mRGB:
img := dst.(*image.RGBA) img := dst.(*image.RGBA)
min := (ymin-img.Rect.Min.Y)*img.Stride - img.Rect.Min.X*4 min := img.PixOffset(0, ymin)
max := (ymax-img.Rect.Min.Y)*img.Stride - img.Rect.Min.X*4 max := img.PixOffset(0, ymax)
var off int var off int
for i := min; i < max; i += 4 { for i := min; i < max; i += 4 {
img.Pix[i+0] = d.buf[off+0] img.Pix[i+0] = d.buf[off+0]
...@@ -235,16 +235,16 @@ func (d *decoder) decode(dst image.Image, ymin, ymax int) error { ...@@ -235,16 +235,16 @@ func (d *decoder) decode(dst image.Image, ymin, ymax int) error {
} }
case mNRGBA: case mNRGBA:
img := dst.(*image.NRGBA) img := dst.(*image.NRGBA)
min := (ymin-img.Rect.Min.Y)*img.Stride - img.Rect.Min.X*4 min := img.PixOffset(0, ymin)
max := (ymax-img.Rect.Min.Y)*img.Stride - img.Rect.Min.X*4 max := img.PixOffset(0, ymax)
if len(d.buf) != max-min { if len(d.buf) != max-min {
return FormatError("short data strip") return FormatError("short data strip")
} }
copy(img.Pix[min:max], d.buf) copy(img.Pix[min:max], d.buf)
case mRGBA: case mRGBA:
img := dst.(*image.RGBA) img := dst.(*image.RGBA)
min := (ymin-img.Rect.Min.Y)*img.Stride - img.Rect.Min.X*4 min := img.PixOffset(0, ymin)
max := (ymax-img.Rect.Min.Y)*img.Stride - img.Rect.Min.X*4 max := img.PixOffset(0, ymax)
if len(d.buf) != max-min { if len(d.buf) != max-min {
return FormatError("short data strip") return FormatError("short data strip")
} }
......
...@@ -17,6 +17,18 @@ const ( ...@@ -17,6 +17,18 @@ const (
YCbCrSubsampleRatio420 YCbCrSubsampleRatio420
) )
func (s YCbCrSubsampleRatio) String() string {
switch s {
case YCbCrSubsampleRatio444:
return "YCbCrSubsampleRatio444"
case YCbCrSubsampleRatio422:
return "YCbCrSubsampleRatio422"
case YCbCrSubsampleRatio420:
return "YCbCrSubsampleRatio420"
}
return "YCbCrSubsampleRatioUnknown"
}
// YCbCr is an in-memory image of Y'CbCr colors. There is one Y sample per // YCbCr is an in-memory image of Y'CbCr colors. There is one Y sample per
// pixel, but each Cb and Cr sample can span one or more pixels. // pixel, but each Cb and Cr sample can span one or more pixels.
// YStride is the Y slice index delta between vertically adjacent pixels. // YStride is the Y slice index delta between vertically adjacent pixels.
...@@ -28,9 +40,7 @@ const ( ...@@ -28,9 +40,7 @@ const (
// For 4:2:2, CStride == YStride/2 && len(Cb) == len(Cr) == len(Y)/2. // For 4:2:2, CStride == YStride/2 && len(Cb) == len(Cr) == len(Y)/2.
// For 4:2:0, CStride == YStride/2 && len(Cb) == len(Cr) == len(Y)/4. // For 4:2:0, CStride == YStride/2 && len(Cb) == len(Cr) == len(Y)/4.
type YCbCr struct { type YCbCr struct {
Y []uint8 Y, Cb, Cr []uint8
Cb []uint8
Cr []uint8
YStride int YStride int
CStride int CStride int
SubsampleRatio YCbCrSubsampleRatio SubsampleRatio YCbCrSubsampleRatio
...@@ -49,39 +59,86 @@ func (p *YCbCr) At(x, y int) color.Color { ...@@ -49,39 +59,86 @@ func (p *YCbCr) At(x, y int) color.Color {
if !(Point{x, y}.In(p.Rect)) { if !(Point{x, y}.In(p.Rect)) {
return color.YCbCr{} return color.YCbCr{}
} }
yi := p.YOffset(x, y)
ci := p.COffset(x, y)
return color.YCbCr{
p.Y[yi],
p.Cb[ci],
p.Cr[ci],
}
}
// YOffset returns the index of the first element of Y that corresponds to
// the pixel at (x, y).
func (p *YCbCr) YOffset(x, y int) int {
return (y-p.Rect.Min.Y)*p.YStride + (x - p.Rect.Min.X)
}
// COffset returns the index of the first element of Cb or Cr that corresponds
// to the pixel at (x, y).
func (p *YCbCr) COffset(x, y int) int {
switch p.SubsampleRatio { switch p.SubsampleRatio {
case YCbCrSubsampleRatio422: case YCbCrSubsampleRatio422:
i := x / 2 return (y-p.Rect.Min.Y)*p.CStride + (x/2 - p.Rect.Min.X/2)
return color.YCbCr{
p.Y[y*p.YStride+x],
p.Cb[y*p.CStride+i],
p.Cr[y*p.CStride+i],
}
case YCbCrSubsampleRatio420: case YCbCrSubsampleRatio420:
i, j := x/2, y/2 return (y/2-p.Rect.Min.Y/2)*p.CStride + (x/2 - p.Rect.Min.X/2)
return color.YCbCr{
p.Y[y*p.YStride+x],
p.Cb[j*p.CStride+i],
p.Cr[j*p.CStride+i],
}
} }
// Default to 4:4:4 subsampling. // Default to 4:4:4 subsampling.
return color.YCbCr{ return (y-p.Rect.Min.Y)*p.CStride + (x - p.Rect.Min.X)
p.Y[y*p.YStride+x],
p.Cb[y*p.CStride+x],
p.Cr[y*p.CStride+x],
}
} }
// SubImage returns an image representing the portion of the image p visible // SubImage returns an image representing the portion of the image p visible
// through r. The returned value shares pixels with the original image. // through r. The returned value shares pixels with the original image.
func (p *YCbCr) SubImage(r Rectangle) Image { func (p *YCbCr) SubImage(r Rectangle) Image {
q := new(YCbCr) r = r.Intersect(p.Rect)
*q = *p // If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
q.Rect = q.Rect.Intersect(r) // either r1 or r2 if the intersection is empty. Without explicitly checking for
return q // this, the Pix[i:] expression below can panic.
if r.Empty() {
return &YCbCr{
SubsampleRatio: p.SubsampleRatio,
}
}
yi := p.YOffset(r.Min.X, r.Min.Y)
ci := p.COffset(r.Min.X, r.Min.Y)
return &YCbCr{
Y: p.Y[yi:],
Cb: p.Cb[ci:],
Cr: p.Cr[ci:],
SubsampleRatio: p.SubsampleRatio,
YStride: p.YStride,
CStride: p.CStride,
Rect: r,
}
} }
func (p *YCbCr) Opaque() bool { func (p *YCbCr) Opaque() bool {
return true return true
} }
// NewYCbCr returns a new YCbCr with the given bounds and subsample ratio.
func NewYCbCr(r Rectangle, subsampleRatio YCbCrSubsampleRatio) *YCbCr {
w, h, cw, ch := r.Dx(), r.Dy(), 0, 0
switch subsampleRatio {
case YCbCrSubsampleRatio422:
cw = (r.Max.X+1)/2 - r.Min.X/2
ch = h
case YCbCrSubsampleRatio420:
cw = (r.Max.X+1)/2 - r.Min.X/2
ch = (r.Max.Y+1)/2 - r.Min.Y/2
default:
// Default to 4:4:4 subsampling.
cw = w
ch = h
}
b := make([]byte, w*h+2*cw*ch)
return &YCbCr{
Y: b[:w*h],
Cb: b[w*h+0*cw*ch : w*h+1*cw*ch],
Cr: b[w*h+1*cw*ch : w*h+2*cw*ch],
SubsampleRatio: subsampleRatio,
YStride: w,
CStride: cw,
Rect: r,
}
}
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package image_test
import (
. "image"
"image/color"
"testing"
)
func TestYCbCr(t *testing.T) {
rects := []Rectangle{
Rect(0, 0, 16, 16),
Rect(1, 0, 16, 16),
Rect(0, 1, 16, 16),
Rect(1, 1, 16, 16),
Rect(1, 1, 15, 16),
Rect(1, 1, 16, 15),
Rect(1, 1, 15, 15),
Rect(2, 3, 14, 15),
Rect(7, 0, 7, 16),
Rect(0, 8, 16, 8),
Rect(0, 0, 10, 11),
Rect(5, 6, 16, 16),
Rect(7, 7, 8, 8),
Rect(7, 8, 8, 9),
Rect(8, 7, 9, 8),
Rect(8, 8, 9, 9),
Rect(7, 7, 17, 17),
Rect(8, 8, 17, 17),
Rect(9, 9, 17, 17),
Rect(10, 10, 17, 17),
}
subsampleRatios := []YCbCrSubsampleRatio{
YCbCrSubsampleRatio444,
YCbCrSubsampleRatio422,
YCbCrSubsampleRatio420,
}
deltas := []Point{
Pt(0, 0),
Pt(1000, 1001),
Pt(5001, -400),
Pt(-701, -801),
}
for _, r := range rects {
for _, subsampleRatio := range subsampleRatios {
for _, delta := range deltas {
testYCbCr(t, r, subsampleRatio, delta)
}
}
}
}
func testYCbCr(t *testing.T, r Rectangle, subsampleRatio YCbCrSubsampleRatio, delta Point) {
// Create a YCbCr image m, whose bounds are r translated by (delta.X, delta.Y).
r1 := r.Add(delta)
m := NewYCbCr(r1, subsampleRatio)
// Test that the image buffer is reasonably small even if (delta.X, delta.Y) is far from the origin.
if len(m.Y) > 100*100 {
t.Errorf("r=%v, subsampleRatio=%v, delta=%v: image buffer is too large",
r, subsampleRatio, delta)
return
}
// Initialize m's pixels. For 422 and 420 subsampling, some of the Cb and Cr elements
// will be set multiple times. That's OK. We just want to avoid a uniform image.
for y := r1.Min.Y; y < r1.Max.Y; y++ {
for x := r1.Min.X; x < r1.Max.X; x++ {
yi := m.YOffset(x, y)
ci := m.COffset(x, y)
m.Y[yi] = uint8(16*y + x)
m.Cb[ci] = uint8(y + 16*x)
m.Cr[ci] = uint8(y + 16*x)
}
}
// Make various sub-images of m.
for y0 := delta.Y + 3; y0 < delta.Y+7; y0++ {
for y1 := delta.Y + 8; y1 < delta.Y+13; y1++ {
for x0 := delta.X + 3; x0 < delta.X+7; x0++ {
for x1 := delta.X + 8; x1 < delta.X+13; x1++ {
subRect := Rect(x0, y0, x1, y1)
sub := m.SubImage(subRect).(*YCbCr)
// For each point in the sub-image's bounds, check that m.At(x, y) equals sub.At(x, y).
for y := sub.Rect.Min.Y; y < sub.Rect.Max.Y; y++ {
for x := sub.Rect.Min.X; x < sub.Rect.Max.X; x++ {
color0 := m.At(x, y).(color.YCbCr)
color1 := sub.At(x, y).(color.YCbCr)
if color0 != color1 {
t.Errorf("r=%v, subsampleRatio=%v, delta=%v, x=%d, y=%d, color0=%v, color1=%v",
r, subsampleRatio, delta, x, y, color0, color1)
return
}
}
}
}
}
}
}
}
...@@ -50,7 +50,7 @@ func ReadFile(filename string) ([]byte, error) { ...@@ -50,7 +50,7 @@ func ReadFile(filename string) ([]byte, error) {
// WriteFile writes data to a file named by filename. // WriteFile writes data to a file named by filename.
// If the file does not exist, WriteFile creates it with permissions perm; // If the file does not exist, WriteFile creates it with permissions perm;
// otherwise WriteFile truncates it before writing. // otherwise WriteFile truncates it before writing.
func WriteFile(filename string, data []byte, perm uint32) error { func WriteFile(filename string, data []byte, perm os.FileMode) error {
f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
if err != nil { if err != nil {
return err return err
......
...@@ -8,6 +8,7 @@ import ( ...@@ -8,6 +8,7 @@ import (
"log" "log"
"net" "net"
"testing" "testing"
"time"
) )
var serverAddr string var serverAddr string
...@@ -31,7 +32,7 @@ func startServer(done chan<- string) { ...@@ -31,7 +32,7 @@ func startServer(done chan<- string) {
log.Fatalf("net.ListenPacket failed udp :0 %v", e) log.Fatalf("net.ListenPacket failed udp :0 %v", e)
} }
serverAddr = c.LocalAddr().String() serverAddr = c.LocalAddr().String()
c.SetReadTimeout(100e6) // 100ms c.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
go runSyslog(c, done) go runSyslog(c, done)
} }
......
...@@ -107,7 +107,9 @@ func (r *Rand) Perm(n int) []int { ...@@ -107,7 +107,9 @@ func (r *Rand) Perm(n int) []int {
var globalRand = New(&lockedSource{src: NewSource(1)}) var globalRand = New(&lockedSource{src: NewSource(1)})
// Seed uses the provided seed value to initialize the generator to a deterministic state. // Seed uses the provided seed value to initialize the generator to a
// deterministic state. If Seed is not called, the generator behaves as
// if seeded by Seed(1).
func Seed(seed int64) { globalRand.Seed(seed) } func Seed(seed int64) { globalRand.Seed(seed) }
// Int63 returns a non-negative pseudo-random 63-bit integer as an int64. // Int63 returns a non-negative pseudo-random 63-bit integer as an int64.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment