Commit 501699af by Ian Lance Taylor

libgo: Update to weekly.2012-02-22 release.

From-SVN: r184819
parent 34c5f21a
......@@ -4,6 +4,9 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Test that the Go environment variables are present and accessible through
// package os and package runtime.
package main
import (
......@@ -12,18 +15,14 @@ import (
)
func main() {
ga, e0 := os.Getenverror("GOARCH")
if e0 != nil {
print("$GOARCH: ", e0.Error(), "\n")
os.Exit(1)
}
ga := os.Getenv("GOARCH")
if ga != runtime.GOARCH {
print("$GOARCH=", ga, "!= runtime.GOARCH=", runtime.GOARCH, "\n")
os.Exit(1)
}
xxx, e1 := os.Getenverror("DOES_NOT_EXIST")
if e1 != os.ENOENV {
print("$DOES_NOT_EXIST=", xxx, "; err = ", e1.Error(), "\n")
xxx := os.Getenv("DOES_NOT_EXIST")
if xxx != "" {
print("$DOES_NOT_EXIST=", xxx, "\n")
os.Exit(1)
}
}
......@@ -7,7 +7,7 @@
package main
import (
"os"
"errors"
"strconv"
)
......@@ -44,7 +44,7 @@ func main() {
}
mm := make(map[string]error)
trace = ""
mm["abc"] = os.EINVAL
mm["abc"] = errors.New("invalid")
*i(), mm[f()] = strconv.Atoi(h())
if mm["abc"] != nil || trace != "ifh" {
println("BUG1", mm["abc"], trace)
......
43cf9b39b647
96bd78e7d35e
The first line of this file holds the Mercurial revision number of the
last merge done from the master library sources.
......@@ -504,7 +504,7 @@ runtime1.c: $(srcdir)/runtime/runtime1.goc goc2c
mv -f $@.tmp $@
sema.c: $(srcdir)/runtime/sema.goc goc2c
./goc2c --gcc --go-prefix libgo_runtime $< > $@.tmp
./goc2c --gcc --go-prefix libgo_sync $< > $@.tmp
mv -f $@.tmp $@
sigqueue.c: $(srcdir)/runtime/sigqueue.goc goc2c
......@@ -847,6 +847,7 @@ go_sync_files = \
go/sync/cond.go \
go/sync/mutex.go \
go/sync/once.go \
go/sync/runtime.go \
go/sync/rwmutex.go \
go/sync/waitgroup.go
......@@ -878,6 +879,7 @@ go_time_files = \
go/time/tick.go \
go/time/time.go \
go/time/zoneinfo.go \
go/time/zoneinfo_read.go \
go/time/zoneinfo_unix.go
go_unicode_files = \
......@@ -1091,6 +1093,7 @@ go_exp_norm_files = \
go/exp/norm/composition.go \
go/exp/norm/forminfo.go \
go/exp/norm/input.go \
go/exp/norm/iter.go \
go/exp/norm/normalize.go \
go/exp/norm/readwriter.go \
go/exp/norm/tables.go \
......@@ -1132,7 +1135,8 @@ go_go_doc_files = \
go/go/doc/example.go \
go/go/doc/exports.go \
go/go/doc/filter.go \
go/go/doc/reader.go
go/go/doc/reader.go \
go/go/doc/synopsis.go
go_go_parser_files = \
go/go/parser/interface.go \
go/go/parser/parser.go
......@@ -1159,7 +1163,6 @@ go_hash_fnv_files = \
go_html_template_files = \
go/html/template/attr.go \
go/html/template/clone.go \
go/html/template/content.go \
go/html/template/context.go \
go/html/template/css.go \
......
......@@ -1157,6 +1157,7 @@ go_sync_files = \
go/sync/cond.go \
go/sync/mutex.go \
go/sync/once.go \
go/sync/runtime.go \
go/sync/rwmutex.go \
go/sync/waitgroup.go
......@@ -1182,6 +1183,7 @@ go_time_files = \
go/time/tick.go \
go/time/time.go \
go/time/zoneinfo.go \
go/time/zoneinfo_read.go \
go/time/zoneinfo_unix.go
go_unicode_files = \
......@@ -1427,6 +1429,7 @@ go_exp_norm_files = \
go/exp/norm/composition.go \
go/exp/norm/forminfo.go \
go/exp/norm/input.go \
go/exp/norm/iter.go \
go/exp/norm/normalize.go \
go/exp/norm/readwriter.go \
go/exp/norm/tables.go \
......@@ -1474,7 +1477,8 @@ go_go_doc_files = \
go/go/doc/example.go \
go/go/doc/exports.go \
go/go/doc/filter.go \
go/go/doc/reader.go
go/go/doc/reader.go \
go/go/doc/synopsis.go
go_go_parser_files = \
go/go/parser/interface.go \
......@@ -1508,7 +1512,6 @@ go_hash_fnv_files = \
go_html_template_files = \
go/html/template/attr.go \
go/html/template/clone.go \
go/html/template/content.go \
go/html/template/context.go \
go/html/template/css.go \
......@@ -4318,7 +4321,7 @@ runtime1.c: $(srcdir)/runtime/runtime1.goc goc2c
mv -f $@.tmp $@
sema.c: $(srcdir)/runtime/sema.goc goc2c
./goc2c --gcc --go-prefix libgo_runtime $< > $@.tmp
./goc2c --gcc --go-prefix libgo_sync $< > $@.tmp
mv -f $@.tmp $@
sigqueue.c: $(srcdir)/runtime/sigqueue.goc goc2c
......
......@@ -106,9 +106,12 @@ func (b *Reader) Peek(n int) ([]byte, error) {
if m > n {
m = n
}
err := b.readErr()
if m < n && err == nil {
err = ErrBufferFull
var err error
if m < n {
err = b.readErr()
if err == nil {
err = ErrBufferFull
}
}
return b.buf[b.r : b.r+m], err
}
......
......@@ -539,6 +539,27 @@ func TestPeek(t *testing.T) {
if _, err := buf.Peek(1); err != io.EOF {
t.Fatalf("want EOF got %v", err)
}
// Test for issue 3022, not exposing a reader's error on a successful Peek.
buf = NewReaderSize(dataAndEOFReader("abcd"), 32)
if s, err := buf.Peek(2); string(s) != "ab" || err != nil {
t.Errorf(`Peek(2) on "abcd", EOF = %q, %v; want "ab", nil`, string(s), err)
}
if s, err := buf.Peek(4); string(s) != "abcd" || err != nil {
t.Errorf(`Peek(4) on "abcd", EOF = %q, %v; want "abcd", nil`, string(s), err)
}
if n, err := buf.Read(p[0:5]); string(p[0:n]) != "abcd" || err != nil {
t.Fatalf("Read after peek = %q, %v; want abcd, EOF", p[0:n], err)
}
if n, err := buf.Read(p[0:1]); string(p[0:n]) != "" || err != io.EOF {
t.Fatalf(`second Read after peek = %q, %v; want "", EOF`, p[0:n], err)
}
}
type dataAndEOFReader string
func (r dataAndEOFReader) Read(p []byte) (int, error) {
return copy(p, r), io.EOF
}
func TestPeekThenUnreadRune(t *testing.T) {
......
......@@ -13,6 +13,7 @@ import (
// Compare returns an integer comparing the two byte arrays lexicographically.
// The result will be 0 if a==b, -1 if a < b, and +1 if a > b
// A nil argument is equivalent to an empty slice.
func Compare(a, b []byte) int {
m := len(a)
if m > len(b) {
......@@ -37,6 +38,7 @@ func Compare(a, b []byte) int {
}
// Equal returns a boolean reporting whether a == b.
// A nil argument is equivalent to an empty slice.
func Equal(a, b []byte) bool
func equalPortable(a, b []byte) bool {
......
......@@ -46,32 +46,39 @@ type BinOpTest struct {
i int
}
var comparetests = []BinOpTest{
{"", "", 0},
{"a", "", 1},
{"", "a", -1},
{"abc", "abc", 0},
{"ab", "abc", -1},
{"abc", "ab", 1},
{"x", "ab", 1},
{"ab", "x", -1},
{"x", "a", 1},
{"b", "x", -1},
var compareTests = []struct {
a, b []byte
i int
}{
{[]byte(""), []byte(""), 0},
{[]byte("a"), []byte(""), 1},
{[]byte(""), []byte("a"), -1},
{[]byte("abc"), []byte("abc"), 0},
{[]byte("ab"), []byte("abc"), -1},
{[]byte("abc"), []byte("ab"), 1},
{[]byte("x"), []byte("ab"), 1},
{[]byte("ab"), []byte("x"), -1},
{[]byte("x"), []byte("a"), 1},
{[]byte("b"), []byte("x"), -1},
// nil tests
{nil, nil, 0},
{[]byte(""), nil, 0},
{nil, []byte(""), 0},
{[]byte("a"), nil, 1},
{nil, []byte("a"), -1},
}
func TestCompare(t *testing.T) {
for _, tt := range comparetests {
a := []byte(tt.a)
b := []byte(tt.b)
cmp := Compare(a, b)
for _, tt := range compareTests {
cmp := Compare(tt.a, tt.b)
if cmp != tt.i {
t.Errorf(`Compare(%q, %q) = %v`, tt.a, tt.b, cmp)
}
eql := Equal(a, b)
eql := Equal(tt.a, tt.b)
if eql != (tt.i == 0) {
t.Errorf(`Equal(%q, %q) = %v`, tt.a, tt.b, eql)
}
eql = EqualPortable(a, b)
eql = EqualPortable(tt.a, tt.b)
if eql != (tt.i == 0) {
t.Errorf(`EqualPortable(%q, %q) = %v`, tt.a, tt.b, eql)
}
......
......@@ -11,18 +11,18 @@ import (
"os"
)
// Hello world!
func ExampleBuffer() {
var b Buffer // A Buffer needs no initialization.
b.Write([]byte("Hello "))
b.Write([]byte("world!"))
b.WriteTo(os.Stdout)
// Output: Hello world!
}
// Gophers rule!
func ExampleBuffer_reader() {
// A Buffer can turn a string or a []byte into an io.Reader.
buf := NewBufferString("R29waGVycyBydWxlIQ==")
dec := base64.NewDecoder(base64.StdEncoding, buf)
io.Copy(os.Stdout, dec)
// Output: Gophers rule!
}
......@@ -306,6 +306,9 @@ func TestDeflateInflateString(t *testing.T) {
t.Error(err)
}
testToFromWithLimit(t, gold, test.label, test.limit)
if testing.Short() {
break
}
}
}
......@@ -363,6 +366,10 @@ func TestWriterDict(t *testing.T) {
// See http://code.google.com/p/go/issues/detail?id=2508
func TestRegression2508(t *testing.T) {
if testing.Short() {
t.Logf("test disabled with -short")
return
}
w, err := NewWriter(ioutil.Discard, 1)
if err != nil {
t.Fatalf("NewWriter: %v", err)
......
......@@ -57,11 +57,26 @@ func (pq *PriorityQueue) Pop() interface{} {
return item
}
// 99:seven 88:five 77:zero 66:nine 55:three 44:two 33:six 22:one 11:four 00:eight
func ExampleInterface() {
// The full code of this example, including the methods that implement
// heap.Interface, is in the file src/pkg/container/heap/example_test.go.
// update is not used by the example but shows how to take the top item from
// the queue, update its priority and value, and put it back.
func (pq *PriorityQueue) update(value string, priority int) {
item := heap.Pop(pq).(*Item)
item.value = value
item.priority = priority
heap.Push(pq, item)
}
// changePriority is not used by the example but shows how to change the
// priority of an arbitrary item.
func (pq *PriorityQueue) changePriority(item *Item, priority int) {
heap.Remove(pq, item.index)
item.priority = priority
heap.Push(pq, item)
}
// This example pushes 10 items into a PriorityQueue and takes them out in
// order of priority.
func Example() {
const nItem = 10
// Random priorities for the items (a permutation of 0..9, times 11)).
priorities := [nItem]int{
......@@ -85,21 +100,6 @@ func ExampleInterface() {
item := heap.Pop(&pq).(*Item)
fmt.Printf("%.2d:%s ", item.priority, item.value)
}
}
// update is not used by the example but shows how to take the top item from the queue,
// update its priority and value, and put it back.
func (pq *PriorityQueue) update(value string, priority int) {
item := heap.Pop(pq).(*Item)
item.value = value
item.priority = priority
heap.Push(pq, item)
}
// changePriority is not used by the example but shows how to change the priority of an arbitrary
// item.
func (pq *PriorityQueue) changePriority(item *Item, priority int) {
heap.Remove(pq, item.index)
item.priority = priority
heap.Push(pq, item)
// Output:
// 99:seven 88:five 77:zero 66:nine 55:three 44:two 33:six 22:one 11:four 00:eight
}
......@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package dsa implements the Digital Signature Algorithm, as defined in FIPS 186-3
// Package dsa implements the Digital Signature Algorithm, as defined in FIPS 186-3.
package dsa
import (
......
......@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package md5
package md5_test
import (
"crypto/md5"
"fmt"
"io"
"testing"
......@@ -52,7 +53,7 @@ var golden = []md5Test{
func TestGolden(t *testing.T) {
for i := 0; i < len(golden); i++ {
g := golden[i]
c := New()
c := md5.New()
for j := 0; j < 3; j++ {
if j < 2 {
io.WriteString(c, g.in)
......@@ -69,3 +70,11 @@ func TestGolden(t *testing.T) {
}
}
}
func ExampleNew() {
h := md5.New()
io.WriteString(h, "The fog is getting thicker!")
io.WriteString(h, "And Leon's getting laaarger!")
fmt.Printf("%x", h.Sum(nil))
// Output: e2c569be17396eca2a2e3c11578123ed
}
......@@ -4,9 +4,10 @@
// SHA1 hash algorithm. See RFC 3174.
package sha1
package sha1_test
import (
"crypto/sha1"
"fmt"
"io"
"testing"
......@@ -54,7 +55,7 @@ var golden = []sha1Test{
func TestGolden(t *testing.T) {
for i := 0; i < len(golden); i++ {
g := golden[i]
c := New()
c := sha1.New()
for j := 0; j < 3; j++ {
if j < 2 {
io.WriteString(c, g.in)
......@@ -71,3 +72,10 @@ func TestGolden(t *testing.T) {
}
}
}
func ExampleNew() {
h := sha1.New()
io.WriteString(h, "His money is twice tainted: 'taint yours and 'taint mine.")
fmt.Printf("% x", h.Sum(nil))
// Output: 59 7f 6a 54 00 10 f9 4c 15 d7 18 06 a9 9a 2c 87 10 e7 47 bd
}
......@@ -273,7 +273,7 @@ func (c *Conn) clientHandshake() error {
masterSecret, clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV :=
keysFromPreMasterSecret(c.vers, preMasterSecret, hello.random, serverHello.random, suite.macLen, suite.keyLen, suite.ivLen)
clientCipher := suite.cipher(clientKey, clientIV, false /* not for reading */ )
clientCipher := suite.cipher(clientKey, clientIV, false /* not for reading */)
clientHash := suite.mac(c.vers, clientMAC)
c.out.prepareCipherSpec(c.vers, clientCipher, clientHash)
c.writeRecord(recordTypeChangeCipherSpec, []byte{1})
......@@ -294,7 +294,7 @@ func (c *Conn) clientHandshake() error {
finishedHash.Write(finished.marshal())
c.writeRecord(recordTypeHandshake, finished.marshal())
serverCipher := suite.cipher(serverKey, serverIV, true /* for reading */ )
serverCipher := suite.cipher(serverKey, serverIV, true /* for reading */)
serverHash := suite.mac(c.vers, serverMAC)
c.in.prepareCipherSpec(c.vers, serverCipher, serverHash)
c.readRecord(recordTypeChangeCipherSpec)
......
......@@ -295,7 +295,7 @@ FindCipherSuite:
masterSecret, clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV :=
keysFromPreMasterSecret(c.vers, preMasterSecret, clientHello.random, hello.random, suite.macLen, suite.keyLen, suite.ivLen)
clientCipher := suite.cipher(clientKey, clientIV, true /* for reading */ )
clientCipher := suite.cipher(clientKey, clientIV, true /* for reading */)
clientHash := suite.mac(c.vers, clientMAC)
c.in.prepareCipherSpec(c.vers, clientCipher, clientHash)
c.readRecord(recordTypeChangeCipherSpec)
......@@ -333,7 +333,7 @@ FindCipherSuite:
finishedHash.Write(clientFinished.marshal())
serverCipher := suite.cipher(serverKey, serverIV, false /* not for reading */ )
serverCipher := suite.cipher(serverKey, serverIV, false /* not for reading */)
serverHash := suite.mac(c.vers, serverMAC)
c.out.prepareCipherSpec(c.vers, serverCipher, serverHash)
c.writeRecord(recordTypeChangeCipherSpec, []byte{1})
......
......@@ -17,8 +17,8 @@ import (
// subsetTypeArgs takes a slice of arguments from callers of the sql
// package and converts them into a slice of the driver package's
// "subset types".
func subsetTypeArgs(args []interface{}) ([]interface{}, error) {
out := make([]interface{}, len(args))
func subsetTypeArgs(args []interface{}) ([]driver.Value, error) {
out := make([]driver.Value, len(args))
for n, arg := range args {
var err error
out[n], err = driver.DefaultParameterConverter.ConvertValue(arg)
......
......@@ -6,21 +6,20 @@
// drivers as used by package sql.
//
// Most code should use package sql.
//
// Drivers only need to be aware of a subset of Go's types. The sql package
// will convert all types into one of the following:
package driver
import "errors"
// A driver Value is a value that drivers must be able to handle.
// A Value is either nil or an instance of one of these types:
//
// int64
// float64
// bool
// nil
// []byte
// string [*] everywhere except from Rows.Next.
// time.Time
//
package driver
import "errors"
type Value interface{}
// Driver is the interface that must be implemented by a database
// driver.
......@@ -50,11 +49,9 @@ var ErrSkip = errors.New("driver: skip fast-path; continue as if unimplemented")
// first prepare a query, execute the statement, and then close the
// statement.
//
// All arguments are of a subset type as defined in the package docs.
//
// Exec may return ErrSkip.
type Execer interface {
Exec(query string, args []interface{}) (Result, error)
Exec(query string, args []Value) (Result, error)
}
// Conn is a connection to a database. It is not used concurrently
......@@ -127,18 +124,17 @@ type Stmt interface {
NumInput() int
// Exec executes a query that doesn't return rows, such
// as an INSERT or UPDATE. The args are all of a subset
// type as defined above.
Exec(args []interface{}) (Result, error)
// as an INSERT or UPDATE.
Exec(args []Value) (Result, error)
// Exec executes a query that may return rows, such as a
// SELECT. The args of all of a subset type as defined above.
Query(args []interface{}) (Rows, error)
// SELECT.
Query(args []Value) (Rows, error)
}
// ColumnConverter may be optionally implemented by Stmt if the
// the statement is aware of its own columns' types and can
// convert from any type to a driver subset type.
// convert from any type to a driver Value.
type ColumnConverter interface {
// ColumnConverter returns a ValueConverter for the provided
// column index. If the type of a specific column isn't known
......@@ -162,12 +158,12 @@ type Rows interface {
// the provided slice. The provided slice will be the same
// size as the Columns() are wide.
//
// The dest slice may be populated with only with values
// of subset types defined above, but excluding string.
// The dest slice may be populated only with
// a driver Value type, but excluding string.
// All string values must be converted to []byte.
//
// Next should return io.EOF when there are no more rows.
Next(dest []interface{}) error
Next(dest []Value) error
}
// Tx is a transaction.
......@@ -190,18 +186,19 @@ func (v RowsAffected) RowsAffected() (int64, error) {
return int64(v), nil
}
// DDLSuccess is a pre-defined Result for drivers to return when a DDL
// command succeeds.
var DDLSuccess ddlSuccess
// ResultNoRows is a pre-defined Result for drivers to return when a DDL
// command (such as a CREATE TABLE) succeeds. It returns an error for both
// LastInsertId and RowsAffected.
var ResultNoRows noRows
type ddlSuccess struct{}
type noRows struct{}
var _ Result = ddlSuccess{}
var _ Result = noRows{}
func (ddlSuccess) LastInsertId() (int64, error) {
func (noRows) LastInsertId() (int64, error) {
return 0, errors.New("no LastInsertId available after DDL statement")
}
func (ddlSuccess) RowsAffected() (int64, error) {
func (noRows) RowsAffected() (int64, error) {
return 0, errors.New("no RowsAffected available after DDL statement")
}
......@@ -17,28 +17,28 @@ import (
// driver package to provide consistent implementations of conversions
// between drivers. The ValueConverters have several uses:
//
// * converting from the subset types as provided by the sql package
// * converting from the Value types as provided by the sql package
// into a database table's specific column type and making sure it
// fits, such as making sure a particular int64 fits in a
// table's uint16 column.
//
// * converting a value as given from the database into one of the
// subset types.
// driver Value types.
//
// * by the sql package, for converting from a driver's subset type
// * by the sql package, for converting from a driver's Value type
// to a user's type in a scan.
type ValueConverter interface {
// ConvertValue converts a value to a restricted subset type.
ConvertValue(v interface{}) (interface{}, error)
// ConvertValue converts a value to a driver Value.
ConvertValue(v interface{}) (Value, error)
}
// SubsetValuer is the interface providing the SubsetValue method.
// Valuer is the interface providing the Value method.
//
// Types implementing SubsetValuer interface are able to convert
// themselves to one of the driver's allowed subset values.
type SubsetValuer interface {
// SubsetValue returns a driver parameter subset value.
SubsetValue() (interface{}, error)
// Types implementing Valuer interface are able to convert
// themselves to a driver Value.
type Valuer interface {
// Value returns a driver Value.
Value() (Value, error)
}
// Bool is a ValueConverter that converts input values to bools.
......@@ -59,7 +59,7 @@ var _ ValueConverter = boolType{}
func (boolType) String() string { return "Bool" }
func (boolType) ConvertValue(src interface{}) (interface{}, error) {
func (boolType) ConvertValue(src interface{}) (Value, error) {
switch s := src.(type) {
case bool:
return s, nil
......@@ -104,7 +104,7 @@ type int32Type struct{}
var _ ValueConverter = int32Type{}
func (int32Type) ConvertValue(v interface{}) (interface{}, error) {
func (int32Type) ConvertValue(v interface{}) (Value, error) {
rv := reflect.ValueOf(v)
switch rv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
......@@ -137,7 +137,7 @@ var String stringType
type stringType struct{}
func (stringType) ConvertValue(v interface{}) (interface{}, error) {
func (stringType) ConvertValue(v interface{}) (Value, error) {
switch v.(type) {
case string, []byte:
return v, nil
......@@ -151,7 +151,7 @@ type Null struct {
Converter ValueConverter
}
func (n Null) ConvertValue(v interface{}) (interface{}, error) {
func (n Null) ConvertValue(v interface{}) (Value, error) {
if v == nil {
return nil, nil
}
......@@ -164,28 +164,17 @@ type NotNull struct {
Converter ValueConverter
}
func (n NotNull) ConvertValue(v interface{}) (interface{}, error) {
func (n NotNull) ConvertValue(v interface{}) (Value, error) {
if v == nil {
return nil, fmt.Errorf("nil value not allowed")
}
return n.Converter.ConvertValue(v)
}
// IsParameterSubsetType reports whether v is of a valid type for a
// parameter. These types are:
//
// int64
// float64
// bool
// nil
// []byte
// time.Time
// string
//
// This is the same list as IsScanSubsetType, with the addition of
// string.
func IsParameterSubsetType(v interface{}) bool {
if IsScanSubsetType(v) {
// IsValue reports whether v is a valid Value parameter type.
// Unlike IsScanValue, IsValue permits the string type.
func IsValue(v interface{}) bool {
if IsScanValue(v) {
return true
}
if _, ok := v.(string); ok {
......@@ -194,18 +183,9 @@ func IsParameterSubsetType(v interface{}) bool {
return false
}
// IsScanSubsetType reports whether v is of a valid type for a
// value populated by Rows.Next. These types are:
//
// int64
// float64
// bool
// nil
// []byte
// time.Time
//
// This is the same list as IsParameterSubsetType, without string.
func IsScanSubsetType(v interface{}) bool {
// IsScanValue reports whether v is a valid Value scan type.
// Unlike IsValue, IsScanValue does not permit the string type.
func IsScanValue(v interface{}) bool {
if v == nil {
return true
}
......@@ -221,7 +201,7 @@ func IsScanSubsetType(v interface{}) bool {
// ColumnConverter.
//
// DefaultParameterConverter returns the given value directly if
// IsSubsetType(value). Otherwise integer type are converted to
// IsValue(value). Otherwise integer type are converted to
// int64, floats to float64, and strings to []byte. Other types are
// an error.
var DefaultParameterConverter defaultConverter
......@@ -230,18 +210,18 @@ type defaultConverter struct{}
var _ ValueConverter = defaultConverter{}
func (defaultConverter) ConvertValue(v interface{}) (interface{}, error) {
if IsParameterSubsetType(v) {
func (defaultConverter) ConvertValue(v interface{}) (Value, error) {
if IsValue(v) {
return v, nil
}
if svi, ok := v.(SubsetValuer); ok {
sv, err := svi.SubsetValue()
if svi, ok := v.(Valuer); ok {
sv, err := svi.Value()
if err != nil {
return nil, err
}
if !IsParameterSubsetType(sv) {
return nil, fmt.Errorf("non-subset type %T returned from SubsetValue", sv)
if !IsValue(sv) {
return nil, fmt.Errorf("non-Value type %T returned from Value", sv)
}
return sv, nil
}
......
......@@ -217,7 +217,7 @@ func (c *fakeConn) Close() error {
return nil
}
func checkSubsetTypes(args []interface{}) error {
func checkSubsetTypes(args []driver.Value) error {
for n, arg := range args {
switch arg.(type) {
case int64, float64, bool, nil, []byte, string, time.Time:
......@@ -228,7 +228,7 @@ func checkSubsetTypes(args []interface{}) error {
return nil
}
func (c *fakeConn) Exec(query string, args []interface{}) (driver.Result, error) {
func (c *fakeConn) Exec(query string, args []driver.Value) (driver.Result, error) {
// This is an optional interface, but it's implemented here
// just to check that all the args of of the proper types.
// ErrSkip is returned so the caller acts as if we didn't
......@@ -379,7 +379,7 @@ func (s *fakeStmt) Close() error {
var errClosed = errors.New("fakedb: statement has been closed")
func (s *fakeStmt) Exec(args []interface{}) (driver.Result, error) {
func (s *fakeStmt) Exec(args []driver.Value) (driver.Result, error) {
if s.closed {
return nil, errClosed
}
......@@ -392,12 +392,12 @@ func (s *fakeStmt) Exec(args []interface{}) (driver.Result, error) {
switch s.cmd {
case "WIPE":
db.wipe()
return driver.DDLSuccess, nil
return driver.ResultNoRows, nil
case "CREATE":
if err := db.createTable(s.table, s.colName, s.colType); err != nil {
return nil, err
}
return driver.DDLSuccess, nil
return driver.ResultNoRows, nil
case "INSERT":
return s.execInsert(args)
}
......@@ -405,7 +405,7 @@ func (s *fakeStmt) Exec(args []interface{}) (driver.Result, error) {
return nil, fmt.Errorf("unimplemented statement Exec command type of %q", s.cmd)
}
func (s *fakeStmt) execInsert(args []interface{}) (driver.Result, error) {
func (s *fakeStmt) execInsert(args []driver.Value) (driver.Result, error) {
db := s.c.db
if len(args) != s.placeholders {
panic("error in pkg db; should only get here if size is correct")
......@@ -441,7 +441,7 @@ func (s *fakeStmt) execInsert(args []interface{}) (driver.Result, error) {
return driver.RowsAffected(1), nil
}
func (s *fakeStmt) Query(args []interface{}) (driver.Rows, error) {
func (s *fakeStmt) Query(args []driver.Value) (driver.Rows, error) {
if s.closed {
return nil, errClosed
}
......@@ -548,7 +548,7 @@ func (rc *rowsCursor) Columns() []string {
return rc.cols
}
func (rc *rowsCursor) Next(dest []interface{}) error {
func (rc *rowsCursor) Next(dest []driver.Value) error {
if rc.closed {
return errors.New("fakedb: cursor is closed")
}
......
......@@ -62,8 +62,8 @@ func (ns *NullString) Scan(value interface{}) error {
return convertAssign(&ns.String, value)
}
// SubsetValue implements the driver SubsetValuer interface.
func (ns NullString) SubsetValue() (interface{}, error) {
// Value implements the driver Valuer interface.
func (ns NullString) Value() (driver.Value, error) {
if !ns.Valid {
return nil, nil
}
......@@ -88,8 +88,8 @@ func (n *NullInt64) Scan(value interface{}) error {
return convertAssign(&n.Int64, value)
}
// SubsetValue implements the driver SubsetValuer interface.
func (n NullInt64) SubsetValue() (interface{}, error) {
// Value implements the driver Valuer interface.
func (n NullInt64) Value() (driver.Value, error) {
if !n.Valid {
return nil, nil
}
......@@ -114,8 +114,8 @@ func (n *NullFloat64) Scan(value interface{}) error {
return convertAssign(&n.Float64, value)
}
// SubsetValue implements the driver SubsetValuer interface.
func (n NullFloat64) SubsetValue() (interface{}, error) {
// Value implements the driver Valuer interface.
func (n NullFloat64) Value() (driver.Value, error) {
if !n.Valid {
return nil, nil
}
......@@ -140,8 +140,8 @@ func (n *NullBool) Scan(value interface{}) error {
return convertAssign(&n.Bool, value)
}
// SubsetValue implements the driver SubsetValuer interface.
func (n NullBool) SubsetValue() (interface{}, error) {
// Value implements the driver Valuer interface.
func (n NullBool) Value() (driver.Value, error) {
if !n.Valid {
return nil, nil
}
......@@ -523,8 +523,13 @@ func (tx *Tx) Exec(query string, args ...interface{}) (Result, error) {
}
defer tx.releaseConn()
sargs, err := subsetTypeArgs(args)
if err != nil {
return nil, err
}
if execer, ok := ci.(driver.Execer); ok {
resi, err := execer.Exec(query, args)
resi, err := execer.Exec(query, sargs)
if err == nil {
return result{resi}, nil
}
......@@ -539,11 +544,6 @@ func (tx *Tx) Exec(query string, args ...interface{}) (Result, error) {
}
defer sti.Close()
sargs, err := subsetTypeArgs(args)
if err != nil {
return nil, err
}
resi, err := sti.Exec(sargs)
if err != nil {
return nil, err
......@@ -618,19 +618,21 @@ func (s *Stmt) Exec(args ...interface{}) (Result, error) {
return nil, fmt.Errorf("sql: expected %d arguments, got %d", want, len(args))
}
sargs := make([]driver.Value, len(args))
// Convert args to subset types.
if cc, ok := si.(driver.ColumnConverter); ok {
for n, arg := range args {
// First, see if the value itself knows how to convert
// itself to a driver type. For example, a NullString
// struct changing into a string or nil.
if svi, ok := arg.(driver.SubsetValuer); ok {
sv, err := svi.SubsetValue()
if svi, ok := arg.(driver.Valuer); ok {
sv, err := svi.Value()
if err != nil {
return nil, fmt.Errorf("sql: argument index %d from SubsetValue: %v", n, err)
return nil, fmt.Errorf("sql: argument index %d from Value: %v", n, err)
}
if !driver.IsParameterSubsetType(sv) {
return nil, fmt.Errorf("sql: argument index %d: non-subset type %T returned from SubsetValue", n, sv)
if !driver.IsValue(sv) {
return nil, fmt.Errorf("sql: argument index %d: non-subset type %T returned from Value", n, sv)
}
arg = sv
}
......@@ -642,25 +644,25 @@ func (s *Stmt) Exec(args ...interface{}) (Result, error) {
// truncated), or that a nil can't go into a NOT NULL
// column before going across the network to get the
// same error.
args[n], err = cc.ColumnConverter(n).ConvertValue(arg)
sargs[n], err = cc.ColumnConverter(n).ConvertValue(arg)
if err != nil {
return nil, fmt.Errorf("sql: converting Exec argument #%d's type: %v", n, err)
}
if !driver.IsParameterSubsetType(args[n]) {
if !driver.IsValue(sargs[n]) {
return nil, fmt.Errorf("sql: driver ColumnConverter error converted %T to unsupported type %T",
arg, args[n])
arg, sargs[n])
}
}
} else {
for n, arg := range args {
args[n], err = driver.DefaultParameterConverter.ConvertValue(arg)
sargs[n], err = driver.DefaultParameterConverter.ConvertValue(arg)
if err != nil {
return nil, fmt.Errorf("sql: converting Exec argument #%d's type: %v", n, err)
}
}
}
resi, err := si.Exec(args)
resi, err := si.Exec(sargs)
if err != nil {
return nil, err
}
......@@ -829,7 +831,7 @@ type Rows struct {
rowsi driver.Rows
closed bool
lastcols []interface{}
lastcols []driver.Value
lasterr error
closeStmt *Stmt // if non-nil, statement to Close on close
}
......@@ -846,7 +848,7 @@ func (rs *Rows) Next() bool {
return false
}
if rs.lastcols == nil {
rs.lastcols = make([]interface{}, len(rs.rowsi.Columns()))
rs.lastcols = make([]driver.Value, len(rs.rowsi.Columns()))
}
rs.lasterr = rs.rowsi.Next(rs.lastcols)
if rs.lasterr == io.EOF {
......
......@@ -31,8 +31,9 @@ type Data struct {
}
// New returns a new Data object initialized from the given parameters.
// Clients should typically use [TODO(rsc): method to be named later] instead of calling
// New directly.
// Rather than calling this function directly, clients should typically use
// the DWARF method of the File type of the appropriate package debug/elf,
// debug/macho, or debug/pe.
//
// The []byte arguments are the data from the corresponding debug section
// in the object file; for example, for an ELF object, abbrev is the contents of
......
......@@ -28,8 +28,13 @@ typedef struct my_struct {
volatile int vi;
char x : 1;
int y : 4;
int z[0];
long long array[40];
int zz[0];
} t_my_struct;
typedef struct my_struct1 {
int zz [1];
} t_my_struct1;
typedef union my_union {
volatile int vi;
char x : 1;
......@@ -65,7 +70,8 @@ t_func_void_of_char *a9;
t_func_void_of_void *a10;
t_func_void_of_ptr_char_dots *a11;
t_my_struct *a12;
t_my_union *a12a;
t_my_struct1 *a12a;
t_my_union *a12b;
t_my_enum *a13;
t_my_list *a14;
t_my_tree *a15;
......
......@@ -426,6 +426,8 @@ func (d *Data) Type(off Offset) (Type, error) {
t.StructName, _ = e.Val(AttrName).(string)
t.Incomplete = e.Val(AttrDeclaration) != nil
t.Field = make([]*StructField, 0, 8)
var lastFieldType Type
var lastFieldBitOffset int64
for kid := next(); kid != nil; kid = next() {
if kid.Tag == TagMember {
f := new(StructField)
......@@ -444,11 +446,32 @@ func (d *Data) Type(off Offset) (Type, error) {
goto Error
}
}
haveBitOffset := false
f.Name, _ = kid.Val(AttrName).(string)
f.ByteSize, _ = kid.Val(AttrByteSize).(int64)
f.BitOffset, _ = kid.Val(AttrBitOffset).(int64)
f.BitOffset, haveBitOffset = kid.Val(AttrBitOffset).(int64)
f.BitSize, _ = kid.Val(AttrBitSize).(int64)
t.Field = append(t.Field, f)
bito := f.BitOffset
if !haveBitOffset {
bito = f.ByteOffset * 8
}
if bito == lastFieldBitOffset && t.Kind != "union" {
// Last field was zero width. Fix array length.
// (DWARF writes out 0-length arrays as if they were 1-length arrays.)
zeroArray(lastFieldType)
}
lastFieldType = f.Type
lastFieldBitOffset = bito
}
}
if t.Kind != "union" {
b, ok := e.Val(AttrByteSize).(int64)
if ok && b*8 == lastFieldBitOffset {
// Final field must be zero width. Fix array length.
zeroArray(lastFieldType)
}
}
......@@ -579,3 +602,14 @@ Error:
delete(d.typeCache, off)
return nil, err
}
func zeroArray(t Type) {
for {
at, ok := t.(*ArrayType)
if !ok {
break
}
at.Count = 0
t = at.Type
}
}
......@@ -25,13 +25,22 @@ var typedefTests = map[string]string{
"t_func_void_of_char": "func(char) void",
"t_func_void_of_void": "func() void",
"t_func_void_of_ptr_char_dots": "func(*char, ...) void",
"t_my_struct": "struct my_struct {vi volatile int@0; x char@4 : 1@7; y int@4 : 4@27; array [40]long long int@8}",
"t_my_struct": "struct my_struct {vi volatile int@0; x char@4 : 1@7; y int@4 : 4@27; z [0]int@8; array [40]long long int@8; zz [0]int@328}",
"t_my_struct1": "struct my_struct1 {zz [1]int@0}",
"t_my_union": "union my_union {vi volatile int@0; x char@0 : 1@7; y int@0 : 4@28; array [40]long long int@0}",
"t_my_enum": "enum my_enum {e1=1; e2=2; e3=-5; e4=1000000000000000}",
"t_my_list": "struct list {val short int@0; next *t_my_list@8}",
"t_my_tree": "struct tree {left *struct tree@0; right *struct tree@8; val long long unsigned int@16}",
}
// As Apple converts gcc to a clang-based front end
// they keep breaking the DWARF output. This map lists the
// conversion from real answer to Apple answer.
var machoBug = map[string]string{
"func(*char, ...) void": "func(*char) void",
"enum my_enum {e1=1; e2=2; e3=-5; e4=1000000000000000}": "enum my_enum {e1=1; e2=2; e3=-5; e4=-1530494976}",
}
func elfData(t *testing.T, name string) *Data {
f, err := elf.Open(name)
if err != nil {
......@@ -58,13 +67,13 @@ func machoData(t *testing.T, name string) *Data {
return d
}
func TestTypedefsELF(t *testing.T) { testTypedefs(t, elfData(t, "testdata/typedef.elf")) }
func TestTypedefsELF(t *testing.T) { testTypedefs(t, elfData(t, "testdata/typedef.elf"), "elf") }
func TestTypedefsMachO(t *testing.T) {
testTypedefs(t, machoData(t, "testdata/typedef.macho"))
testTypedefs(t, machoData(t, "testdata/typedef.macho"), "macho")
}
func testTypedefs(t *testing.T, d *Data) {
func testTypedefs(t *testing.T, d *Data, kind string) {
r := d.Reader()
seen := make(map[string]bool)
for {
......@@ -93,7 +102,7 @@ func testTypedefs(t *testing.T, d *Data) {
t.Errorf("multiple definitions for %s", t1.Name)
}
seen[t1.Name] = true
if typstr != want {
if typstr != want && (kind != "macho" || typstr != machoBug[want]) {
t.Errorf("%s:\n\thave %s\n\twant %s", t1.Name, typstr, want)
}
}
......
......@@ -6,15 +6,37 @@ package gosym
import (
"debug/elf"
"fmt"
"os"
"os/exec"
"runtime"
"strings"
"testing"
)
var pclinetestBinary string
func dotest() bool {
// For now, only works on ELF platforms.
// TODO: convert to work with new go tool
return false && runtime.GOOS == "linux" && runtime.GOARCH == "amd64"
if runtime.GOOS != "linux" || runtime.GOARCH != "amd64" {
return false
}
if pclinetestBinary != "" {
return true
}
// This command builds pclinetest from pclinetest.asm;
// the resulting binary looks like it was built from pclinetest.s,
// but we have renamed it to keep it away from the go tool.
pclinetestBinary = os.TempDir() + "/pclinetest"
command := fmt.Sprintf("go tool 6a -o %s.6 pclinetest.asm && go tool 6l -E main -o %s %s.6",
pclinetestBinary, pclinetestBinary, pclinetestBinary)
cmd := exec.Command("sh", "-c", command)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
panic(err)
}
return true
}
func getTable(t *testing.T) *Table {
......@@ -149,7 +171,7 @@ func TestPCLine(t *testing.T) {
return
}
f, tab := crack("_test/pclinetest", t)
f, tab := crack(pclinetestBinary, t)
text := f.Section(".text")
textdat, err := text.Data()
if err != nil {
......@@ -163,10 +185,13 @@ func TestPCLine(t *testing.T) {
file, line, fn := tab.PCToLine(pc)
off := pc - text.Addr // TODO(rsc): should not need off; bug in 8g
wantLine += int(textdat[off])
t.Logf("off is %d", off)
if fn == nil {
t.Errorf("failed to get line of PC %#x", pc)
} else if len(file) < 12 || file[len(file)-12:] != "pclinetest.s" || line != wantLine || fn != sym {
t.Errorf("expected %s:%d (%s) at PC %#x, got %s:%d (%s)", "pclinetest.s", wantLine, sym.Name, pc, file, line, fn.Name)
} else if !strings.HasSuffix(file, "pclinetest.asm") {
t.Errorf("expected %s (%s) at PC %#x, got %s (%s)", "pclinetest.asm", sym.Name, pc, file, fn.Name)
} else if line != wantLine || fn != sym {
t.Errorf("expected :%d (%s) at PC %#x, got :%d (%s)", wantLine, sym.Name, pc, line, fn.Name)
}
}
......
......@@ -464,7 +464,7 @@ func allocate(rtyp reflect.Type, p uintptr, indir int) uintptr {
// decodeSingle decodes a top-level value that is not a struct and stores it through p.
// Such values are preceded by a zero, making them have the memory layout of a
// struct field (although with an illegal field number).
func (dec *Decoder) decodeSingle(engine *decEngine, ut *userTypeInfo, basep uintptr) (err error) {
func (dec *Decoder) decodeSingle(engine *decEngine, ut *userTypeInfo, basep uintptr) {
state := dec.newDecoderState(&dec.buf)
state.fieldnum = singletonField
delta := int(state.decodeUint())
......@@ -473,7 +473,7 @@ func (dec *Decoder) decodeSingle(engine *decEngine, ut *userTypeInfo, basep uint
}
instr := &engine.instr[singletonField]
if instr.indir != ut.indir {
return errors.New("gob: internal error: inconsistent indirection")
errorf("internal error: inconsistent indirection instr %d ut %d", instr.indir, ut.indir)
}
ptr := unsafe.Pointer(basep) // offset will be zero
if instr.indir > 1 {
......@@ -481,10 +481,9 @@ func (dec *Decoder) decodeSingle(engine *decEngine, ut *userTypeInfo, basep uint
}
instr.op(instr, state, ptr)
dec.freeDecoderState(state)
return nil
}
// decodeSingle decodes a top-level struct and stores it through p.
// decodeStruct decodes a top-level struct and stores it through p.
// Indir is for the value, not the type. At the time of the call it may
// differ from ut.indir, which was computed when the engine was built.
// This state cannot arise for decodeSingle, which is called directly
......@@ -839,11 +838,10 @@ func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProg
}
case reflect.Map:
name = "element of " + name
keyId := dec.wireType[wireId].MapT.Key
elemId := dec.wireType[wireId].MapT.Elem
keyOp, keyIndir := dec.decOpFor(keyId, t.Key(), name, inProgress)
elemOp, elemIndir := dec.decOpFor(elemId, t.Elem(), name, inProgress)
keyOp, keyIndir := dec.decOpFor(keyId, t.Key(), "key of "+name, inProgress)
elemOp, elemIndir := dec.decOpFor(elemId, t.Elem(), "element of "+name, inProgress)
ovfl := overflow(name)
op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
up := unsafe.Pointer(p)
......@@ -1151,7 +1149,7 @@ func (dec *Decoder) compileDec(remoteId typeId, ut *userTypeInfo) (engine *decEn
// getDecEnginePtr returns the engine for the specified type.
func (dec *Decoder) getDecEnginePtr(remoteId typeId, ut *userTypeInfo) (enginePtr **decEngine, err error) {
rt := ut.base
rt := ut.user
decoderMap, ok := dec.decoderCache[rt]
if !ok {
decoderMap = make(map[typeId]**decEngine)
......
......@@ -685,3 +685,54 @@ func TestSliceIncompatibility(t *testing.T) {
t.Error("expected compatibility error")
}
}
// Mutually recursive slices of structs caused problems.
type Bug3 struct {
Num int
Children []*Bug3
}
func TestGobPtrSlices(t *testing.T) {
in := []*Bug3{
&Bug3{1, nil},
&Bug3{2, nil},
}
b := new(bytes.Buffer)
err := NewEncoder(b).Encode(&in)
if err != nil {
t.Fatal("encode:", err)
}
var out []*Bug3
err = NewDecoder(b).Decode(&out)
if err != nil {
t.Fatal("decode:", err)
}
if !reflect.DeepEqual(in, out) {
t.Fatal("got %v; wanted %v", out, in)
}
}
// getDecEnginePtr cached engine for ut.base instead of ut.user so we passed
// a *map and then tried to reuse its engine to decode the inner map.
func TestPtrToMapOfMap(t *testing.T) {
Register(make(map[string]interface{}))
subdata := make(map[string]interface{})
subdata["bar"] = "baz"
data := make(map[string]interface{})
data["foo"] = subdata
b := new(bytes.Buffer)
err := NewEncoder(b).Encode(data)
if err != nil {
t.Fatal("encode:", err)
}
var newData map[string]interface{}
err = NewDecoder(b).Decode(&newData)
if err != nil {
t.Fatal("decode:", err)
}
if !reflect.DeepEqual(data, newData) {
t.Fatalf("expected %v got %v", data, newData)
}
}
......@@ -152,6 +152,10 @@ var idToType = make(map[typeId]gobType)
var builtinIdToType map[typeId]gobType // set in init() after builtins are established
func setTypeId(typ gobType) {
// When building recursive types, someone may get there before us.
if typ.id() != 0 {
return
}
nextId++
typ.setId(nextId)
idToType[nextId] = typ
......@@ -346,6 +350,11 @@ func newSliceType(name string) *sliceType {
func (s *sliceType) init(elem gobType) {
// Set our type id before evaluating the element's, in case it's our own.
setTypeId(s)
// See the comments about ids in newTypeObject. Only slices and
// structs have mutual recursion.
if elem.id() == 0 {
setTypeId(elem)
}
s.Elem = elem.id()
}
......@@ -503,6 +512,13 @@ func newTypeObject(name string, ut *userTypeInfo, rt reflect.Type) (gobType, err
if err != nil {
return nil, err
}
// Some mutually recursive types can cause us to be here while
// still defining the element. Fix the element type id here.
// We could do this more neatly by setting the id at the start of
// building every type, but that would break binary compatibility.
if gt.id() == 0 {
setTypeId(gt)
}
st.Field = append(st.Field, &fieldType{f.Name, gt.id()})
}
return st, nil
......
......@@ -496,6 +496,12 @@ func (d *decodeState) object(v reflect.Value) {
// Pretend this field doesn't exist.
continue
}
if sf.Anonymous {
// Pretend this field doesn't exist,
// so that we can do a good job with
// these in a later version.
continue
}
// First, tag match
tagName, _ := parseTag(tag)
if tagName == key {
......@@ -963,3 +969,11 @@ func unquoteBytes(s []byte) (t []byte, ok bool) {
}
return b[0:w], true
}
// The following is issue 3069.
// BUG(rsc): This package ignores anonymous (embedded) struct fields
// during encoding and decoding. A future version may assign meaning
// to them. To force an anonymous field to be ignored in all future
// versions of this package, use an explicit `json:"-"` tag in the struct
// definition.
......@@ -619,3 +619,32 @@ func TestRefUnmarshal(t *testing.T) {
t.Errorf("got %+v, want %+v", got, want)
}
}
// Test that anonymous fields are ignored.
// We may assign meaning to them later.
func TestAnonymous(t *testing.T) {
type S struct {
T
N int
}
data, err := Marshal(new(S))
if err != nil {
t.Fatalf("Marshal: %v", err)
}
want := `{"N":0}`
if string(data) != want {
t.Fatalf("Marshal = %#q, want %#q", string(data), want)
}
var s S
if err := Unmarshal([]byte(`{"T": 1, "T": {"Y": 1}, "N": 2}`), &s); err != nil {
t.Fatalf("Unmarshal: %v", err)
}
if s.N != 2 {
t.Fatal("Unmarshal: did not set N")
}
if s.T.Y != 0 {
t.Fatal("Unmarshal: did set T.Y")
}
}
......@@ -538,6 +538,11 @@ func encodeFields(t reflect.Type) []encodeField {
if f.PkgPath != "" {
continue
}
if f.Anonymous {
// We want to do a better job with these later,
// so for now pretend they don't exist.
continue
}
var ef encodeField
ef.i = i
ef.tag = f.Name
......
......@@ -57,35 +57,14 @@ const (
// if the field value is empty. The empty values are false, 0, any
// nil pointer or interface value, and any array, slice, map, or
// string of length zero.
// - a non-pointer anonymous struct field is handled as if the
// fields of its value were part of the outer struct.
//
// If a field uses a tag "a>b>c", then the element c will be nested inside
// parent elements a and b. Fields that appear next to each other that name
// the same parent will be enclosed in one XML element. For example:
// the same parent will be enclosed in one XML element.
//
// type Result struct {
// XMLName xml.Name `xml:"result"`
// Id int `xml:"id,attr"`
// FirstName string `xml:"person>name>first"`
// LastName string `xml:"person>name>last"`
// Age int `xml:"person>age"`
// Height float `xml:"person>height,omitempty"`
// Married bool `xml:"person>married"`
// }
//
// xml.Marshal(&Result{Id: 13, FirstName: "John", LastName: "Doe", Age: 42})
//
// would be marshalled as:
//
// <result>
// <person id="13">
// <name>
// <first>John</first>
// <last>Doe</last>
// </name>
// <age>42</age>
// <married>false</married>
// </person>
// </result>
// See MarshalIndent for an example.
//
// Marshal will return an error if asked to marshal a channel, function, or map.
func Marshal(v interface{}) ([]byte, error) {
......@@ -96,6 +75,22 @@ func Marshal(v interface{}) ([]byte, error) {
return b.Bytes(), nil
}
// MarshalIndent works like Marshal, but each XML element begins on a new
// indented line that starts with prefix and is followed by one or more
// copies of indent according to the nesting depth.
func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
var b bytes.Buffer
enc := NewEncoder(&b)
enc.prefix = prefix
enc.indent = indent
err := enc.marshalValue(reflect.ValueOf(v), nil)
enc.Flush()
if err != nil {
return nil, err
}
return b.Bytes(), nil
}
// An Encoder writes XML data to an output stream.
type Encoder struct {
printer
......@@ -103,7 +98,7 @@ type Encoder struct {
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{printer{bufio.NewWriter(w)}}
return &Encoder{printer{Writer: bufio.NewWriter(w)}}
}
// Encode writes the XML encoding of v to the stream.
......@@ -118,8 +113,14 @@ func (enc *Encoder) Encode(v interface{}) error {
type printer struct {
*bufio.Writer
indent string
prefix string
depth int
indentedIn bool
}
// marshalValue writes one or more XML elements representing val.
// If val was obtained from a struct field, finfo must have its details.
func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo) error {
if !val.IsValid() {
return nil
......@@ -177,6 +178,7 @@ func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo) error {
}
}
p.writeIndent(1)
p.WriteByte('<')
p.WriteString(name)
......@@ -216,6 +218,7 @@ func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo) error {
return err
}
p.writeIndent(-1)
p.WriteByte('<')
p.WriteByte('/')
p.WriteString(name)
......@@ -294,6 +297,7 @@ func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error {
if vf.Len() == 0 {
continue
}
p.writeIndent(0)
p.WriteString("<!--")
dashDash := false
dashLast := false
......@@ -352,6 +356,33 @@ func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error {
return nil
}
func (p *printer) writeIndent(depthDelta int) {
if len(p.prefix) == 0 && len(p.indent) == 0 {
return
}
if depthDelta < 0 {
p.depth--
if p.indentedIn {
p.indentedIn = false
return
}
p.indentedIn = false
}
p.WriteByte('\n')
if len(p.prefix) > 0 {
p.WriteString(p.prefix)
}
if len(p.indent) > 0 {
for i := 0; i < p.depth; i++ {
p.WriteString(p.indent)
}
}
if depthDelta > 0 {
p.depth++
p.indentedIn = true
}
}
type parentStack struct {
*printer
stack []string
......@@ -367,20 +398,20 @@ func (s *parentStack) trim(parents []string) {
break
}
}
for i := len(s.stack) - 1; i >= split; i-- {
s.writeIndent(-1)
s.WriteString("</")
s.WriteString(s.stack[i])
s.WriteByte('>')
}
s.stack = parents[:split]
}
// push adds parent elements to the stack and writes open tags.
func (s *parentStack) push(parents []string) {
for i := 0; i < len(parents); i++ {
s.WriteString("<")
s.writeIndent(1)
s.WriteByte('<')
s.WriteString(parents[i])
s.WriteByte('>')
}
......
......@@ -25,58 +25,6 @@ import (
// slice, or string. Well-formed data that does not fit into v is
// discarded.
//
// For example, given these definitions:
//
// type Email struct {
// Where string `xml:",attr"`
// Addr string
// }
//
// type Result struct {
// XMLName xml.Name `xml:"result"`
// Name string
// Phone string
// Email []Email
// Groups []string `xml:"group>value"`
// }
//
// result := Result{Name: "name", Phone: "phone", Email: nil}
//
// unmarshalling the XML input
//
// <result>
// <email where="home">
// <addr>gre@example.com</addr>
// </email>
// <email where='work'>
// <addr>gre@work.com</addr>
// </email>
// <name>Grace R. Emlin</name>
// <group>
// <value>Friends</value>
// <value>Squash</value>
// </group>
// <address>123 Main Street</address>
// </result>
//
// via Unmarshal(data, &result) is equivalent to assigning
//
// r = Result{
// xml.Name{Local: "result"},
// "Grace R. Emlin", // name
// "phone", // no phone given
// []Email{
// Email{"home", "gre@example.com"},
// Email{"work", "gre@work.com"},
// },
// []string{"Friends", "Squash"},
// }
//
// Note that the field r.Phone has not been modified and
// that the XML <address> element was discarded. Also, the field
// Groups was assigned considering the element path provided in the
// field tag.
//
// Because Unmarshal uses the reflect package, it can only assign
// to exported (upper case) fields. Unmarshal uses a case-sensitive
// comparison to match XML element names to tag values and struct
......@@ -133,6 +81,9 @@ import (
// of the above rules and the struct has a field with tag ",any",
// unmarshal maps the sub-element to that struct field.
//
// * A non-pointer anonymous struct field is handled as if the
// fields of its value were part of the outer struct.
//
// * A struct field with tag "-" is never unmarshalled into.
//
// Unmarshal maps an XML element to a string or []byte by saving the
......
......@@ -5,29 +5,49 @@
package errors_test
import (
. "errors"
"errors"
"fmt"
"testing"
)
func TestNewEqual(t *testing.T) {
// Different allocations should not be equal.
if New("abc") == New("abc") {
if errors.New("abc") == errors.New("abc") {
t.Errorf(`New("abc") == New("abc")`)
}
if New("abc") == New("xyz") {
if errors.New("abc") == errors.New("xyz") {
t.Errorf(`New("abc") == New("xyz")`)
}
// Same allocation should be equal to itself (not crash).
err := New("jkl")
err := errors.New("jkl")
if err != err {
t.Errorf(`err != err`)
}
}
func TestErrorMethod(t *testing.T) {
err := New("abc")
err := errors.New("abc")
if err.Error() != "abc" {
t.Errorf(`New("abc").Error() = %q, want %q`, err.Error(), "abc")
}
}
func ExampleNew() {
err := errors.New("emit macho dwarf: elf header corrupted")
if err != nil {
fmt.Print(err)
}
// Output: emit macho dwarf: elf header corrupted
}
// The fmt package's Errorf function lets us use the package's formatting
// features to create descriptive error messages.
func ExampleNew_errorf() {
const name, id = "bimmler", 17
err := fmt.Errorf("user %q (id %d) not found", name, id)
if err != nil {
fmt.Print(err)
}
// Output: user "bimmler" (id 17) not found
}
......@@ -7,6 +7,7 @@
package inotify
import (
"io/ioutil"
"os"
"testing"
"time"
......@@ -16,16 +17,19 @@ func TestInotifyEvents(t *testing.T) {
// Create an inotify watcher instance and initialize it
watcher, err := NewWatcher()
if err != nil {
t.Fatalf("NewWatcher() failed: %s", err)
t.Fatalf("NewWatcher failed: %s", err)
}
t.Logf("NEEDS TO BE CONVERTED TO NEW GO TOOL") // TODO
return
dir, err := ioutil.TempDir("", "inotify")
if err != nil {
t.Fatalf("TempDir failed: %s", err)
}
defer os.RemoveAll(dir)
// Add a watch for "_test"
err = watcher.Watch("_test")
err = watcher.Watch(dir)
if err != nil {
t.Fatalf("Watcher.Watch() failed: %s", err)
t.Fatalf("Watch failed: %s", err)
}
// Receive errors on the error channel on a separate goroutine
......@@ -35,7 +39,7 @@ func TestInotifyEvents(t *testing.T) {
}
}()
const testFile string = "_test/TestInotifyEvents.testfile"
testFile := dir + "/TestInotifyEvents.testfile"
// Receive events on the event channel on a separate goroutine
eventstream := watcher.Event
......@@ -58,7 +62,7 @@ func TestInotifyEvents(t *testing.T) {
// This should add at least one event to the inotify event queue
_, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
t.Fatalf("creating test file failed: %s", err)
t.Fatalf("creating test file: %s", err)
}
// We expect this event to be received almost immediately, but let's wait 1 s to be sure
......@@ -95,7 +99,7 @@ func TestInotifyClose(t *testing.T) {
t.Fatal("double Close() test failed: second Close() call didn't return")
}
err := watcher.Watch("_test")
err := watcher.Watch(os.TempDir())
if err == nil {
t.Fatal("expected error on Watch() after Close(), got nil")
}
......
......@@ -66,6 +66,18 @@ func (rb *reorderBuffer) flush(out []byte) []byte {
return out
}
// flushCopy copies the normalized segment to buf and resets rb.
// It returns the number of bytes written to buf.
func (rb *reorderBuffer) flushCopy(buf []byte) int {
p := 0
for i := 0; i < rb.nrune; i++ {
runep := rb.rune[i]
p += copy(buf[p:], rb.byte[runep.pos:runep.pos+runep.size])
}
rb.reset()
return p
}
// insertOrdered inserts a rune in the buffer, ordered by Canonical Combining Class.
// It returns false if the buffer is not large enough to hold the rune.
// It is used internally by insert and insertString only.
......@@ -96,32 +108,41 @@ func (rb *reorderBuffer) insertOrdered(info runeInfo) bool {
// insert inserts the given rune in the buffer ordered by CCC.
// It returns true if the buffer was large enough to hold the decomposed rune.
func (rb *reorderBuffer) insert(src input, i int, info runeInfo) bool {
if info.size == 3 {
if rune := src.hangul(i); rune != 0 {
return rb.decomposeHangul(rune)
}
if rune := src.hangul(i); rune != 0 {
return rb.decomposeHangul(rune)
}
if info.hasDecomposition() {
dcomp := info.decomposition()
rb.tmpBytes = inputBytes(dcomp)
for i := 0; i < len(dcomp); {
info = rb.f.info(&rb.tmpBytes, i)
pos := rb.nbyte
if !rb.insertOrdered(info) {
return false
}
end := i + int(info.size)
copy(rb.byte[pos:], dcomp[i:end])
i = end
}
} else {
// insertOrder changes nbyte
return rb.insertDecomposed(info.decomposition())
}
return rb.insertSingle(src, i, info)
}
// insertDecomposed inserts an entry in to the reorderBuffer for each rune
// in dcomp. dcomp must be a sequence of decomposed UTF-8-encoded runes.
func (rb *reorderBuffer) insertDecomposed(dcomp []byte) bool {
saveNrune, saveNbyte := rb.nrune, rb.nbyte
rb.tmpBytes = inputBytes(dcomp)
for i := 0; i < len(dcomp); {
info := rb.f.info(&rb.tmpBytes, i)
pos := rb.nbyte
if !rb.insertOrdered(info) {
rb.nrune, rb.nbyte = saveNrune, saveNbyte
return false
}
src.copySlice(rb.byte[pos:], i, i+int(info.size))
i += copy(rb.byte[pos:], dcomp[i:i+int(info.size)])
}
return true
}
// insertSingle inserts an entry in the reorderBuffer for the rune at
// position i. info is the runeInfo for the rune at position i.
func (rb *reorderBuffer) insertSingle(src input, i int, info runeInfo) bool {
// insertOrder changes nbyte
pos := rb.nbyte
if !rb.insertOrdered(info) {
return false
}
src.copySlice(rb.byte[pos:], i, i+int(info.size))
return true
}
......@@ -182,8 +203,12 @@ const (
jamoLVTCount = 19 * 21 * 28
)
// Caller must verify that len(b) >= 3.
const hangulUTF8Size = 3
func isHangul(b []byte) bool {
if len(b) < hangulUTF8Size {
return false
}
b0 := b[0]
if b0 < hangulBase0 {
return false
......@@ -202,8 +227,10 @@ func isHangul(b []byte) bool {
return b1 == hangulEnd1 && b[2] < hangulEnd2
}
// Caller must verify that len(b) >= 3.
func isHangulString(b string) bool {
if len(b) < hangulUTF8Size {
return false
}
b0 := b[0]
if b0 < hangulBase0 {
return false
......@@ -234,6 +261,22 @@ func isHangulWithoutJamoT(b []byte) bool {
return c < jamoLVTCount && c%jamoTCount == 0
}
// decomposeHangul writes the decomposed Hangul to buf and returns the number
// of bytes written. len(buf) should be at least 9.
func decomposeHangul(buf []byte, r rune) int {
const JamoUTF8Len = 3
r -= hangulBase
x := r % jamoTCount
r /= jamoTCount
utf8.EncodeRune(buf, jamoLBase+r/jamoVCount)
utf8.EncodeRune(buf[JamoUTF8Len:], jamoVBase+r%jamoVCount)
if x != 0 {
utf8.EncodeRune(buf[2*JamoUTF8Len:], jamoTBase+x)
return 3 * JamoUTF8Len
}
return 2 * JamoUTF8Len
}
// decomposeHangul algorithmically decomposes a Hangul rune into
// its Jamo components.
// See http://unicode.org/reports/tr15/#Hangul for details on decomposing Hangul.
......
......@@ -47,14 +47,14 @@ func runTests(t *testing.T, name string, fm Form, f insertFunc, tests []TestCase
}
}
func TestFlush(t *testing.T) {
type flushFunc func(rb *reorderBuffer) []byte
func testFlush(t *testing.T, name string, fn flushFunc) {
rb := reorderBuffer{}
rb.init(NFC, nil)
out := make([]byte, 0)
out = rb.flush(out)
out := fn(&rb)
if len(out) != 0 {
t.Errorf("wrote bytes on flush of empty buffer. (len(out) = %d)", len(out))
t.Errorf("%s: wrote bytes on flush of empty buffer. (len(out) = %d)", name, len(out))
}
for _, r := range []rune("world!") {
......@@ -65,16 +65,32 @@ func TestFlush(t *testing.T) {
out = rb.flush(out)
want := "Hello world!"
if string(out) != want {
t.Errorf(`output after flush was "%s"; want "%s"`, string(out), want)
t.Errorf(`%s: output after flush was "%s"; want "%s"`, name, string(out), want)
}
if rb.nrune != 0 {
t.Errorf("flush: non-null size of info buffer (rb.nrune == %d)", rb.nrune)
t.Errorf("%s: non-null size of info buffer (rb.nrune == %d)", name, rb.nrune)
}
if rb.nbyte != 0 {
t.Errorf("flush: non-null size of byte buffer (rb.nbyte == %d)", rb.nbyte)
t.Errorf("%s: non-null size of byte buffer (rb.nbyte == %d)", name, rb.nbyte)
}
}
func flushF(rb *reorderBuffer) []byte {
out := make([]byte, 0)
return rb.flush(out)
}
func flushCopyF(rb *reorderBuffer) []byte {
out := make([]byte, MaxSegmentSize)
n := rb.flushCopy(out)
return out[:n]
}
func TestFlush(t *testing.T) {
testFlush(t, "flush", flushF)
testFlush(t, "flushCopy", flushCopyF)
}
var insertTests = []TestCase{
{[]rune{'a'}, []rune{'a'}},
{[]rune{0x300}, []rune{0x300}},
......
......@@ -7,7 +7,7 @@ package norm
import "unicode/utf8"
type input interface {
skipASCII(p int) int
skipASCII(p, max int) int
skipNonStarter(p int) int
appendSlice(buf []byte, s, e int) []byte
copySlice(buf []byte, s, e int)
......@@ -18,8 +18,8 @@ type input interface {
type inputString string
func (s inputString) skipASCII(p int) int {
for ; p < len(s) && s[p] < utf8.RuneSelf; p++ {
func (s inputString) skipASCII(p, max int) int {
for ; p < max && s[p] < utf8.RuneSelf; p++ {
}
return p
}
......@@ -59,8 +59,8 @@ func (s inputString) hangul(p int) rune {
type inputBytes []byte
func (s inputBytes) skipASCII(p int) int {
for ; p < len(s) && s[p] < utf8.RuneSelf; p++ {
func (s inputBytes) skipASCII(p, max int) int {
for ; p < max && s[p] < utf8.RuneSelf; p++ {
}
return p
}
......
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
const MaxSegmentSize = maxByteBufferSize
// An Iter iterates over a string or byte slice, while normalizing it
// to a given Form.
type Iter struct {
rb reorderBuffer
info runeInfo // first character saved from previous iteration
next iterFunc // implementation of next depends on form
p int // current position in input source
outStart int // start of current segment in output buffer
inStart int // start of current segment in input source
maxp int // position in output buffer after which not to start a new segment
maxseg int // for tracking an excess of combining characters
tccc uint8
done bool
}
type iterFunc func(*Iter, []byte) int
// SetInput initializes i to iterate over src after normalizing it to Form f.
func (i *Iter) SetInput(f Form, src []byte) {
i.rb.init(f, src)
if i.rb.f.composing {
i.next = nextComposed
} else {
i.next = nextDecomposed
}
i.p = 0
if i.done = len(src) == 0; !i.done {
i.info = i.rb.f.info(i.rb.src, i.p)
}
}
// SetInputString initializes i to iterate over src after normalizing it to Form f.
func (i *Iter) SetInputString(f Form, src string) {
i.rb.initString(f, src)
if i.rb.f.composing {
i.next = nextComposed
} else {
i.next = nextDecomposed
}
i.p = 0
if i.done = len(src) == 0; !i.done {
i.info = i.rb.f.info(i.rb.src, i.p)
}
}
// Pos returns the byte position at which the next call to Next will commence processing.
func (i *Iter) Pos() int {
return i.p
}
// Done returns true if there is no more input to process.
func (i *Iter) Done() bool {
return i.done
}
// Next writes f(i.input[i.Pos():n]...) to buffer buf, where n is the
// largest boundary of i.input such that the result fits in buf.
// It returns the number of bytes written to buf.
// len(buf) should be at least MaxSegmentSize.
// Done must be false before calling Next.
func (i *Iter) Next(buf []byte) int {
return i.next(i, buf)
}
func (i *Iter) initNext(outn, inStart int) {
i.outStart = 0
i.inStart = inStart
i.maxp = outn - MaxSegmentSize
i.maxseg = MaxSegmentSize
}
// setStart resets the start of the new segment to the given position.
// It returns true if there is not enough room for the new segment.
func (i *Iter) setStart(outp, inp int) bool {
if outp > i.maxp {
return true
}
i.outStart = outp
i.inStart = inp
i.maxseg = outp + MaxSegmentSize
return false
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
// nextDecomposed is the implementation of Next for forms NFD and NFKD.
func nextDecomposed(i *Iter, out []byte) int {
var outp int
i.initNext(len(out), i.p)
doFast:
inCopyStart, outCopyStart := i.p, outp // invariant xCopyStart <= i.xStart
for {
if sz := int(i.info.size); sz <= 1 {
// ASCII or illegal byte. Either way, advance by 1.
i.p++
outp++
max := min(i.rb.nsrc, len(out)-outp+i.p)
if np := i.rb.src.skipASCII(i.p, max); np > i.p {
outp += np - i.p
i.p = np
if i.p >= i.rb.nsrc {
break
}
// ASCII may combine with consecutive runes.
if i.setStart(outp-1, i.p-1) {
i.p--
outp--
i.info.size = 1
break
}
}
} else if d := i.info.decomposition(); d != nil {
i.rb.src.copySlice(out[outCopyStart:], inCopyStart, i.p)
p := outp + len(d)
if p > i.maxseg && i.setStart(outp, i.p) {
return outp
}
copy(out[outp:], d)
outp = p
i.p += sz
inCopyStart, outCopyStart = i.p, outp
} else if r := i.rb.src.hangul(i.p); r != 0 {
i.rb.src.copySlice(out[outCopyStart:], inCopyStart, i.p)
for {
outp += decomposeHangul(out[outp:], r)
i.p += hangulUTF8Size
if r = i.rb.src.hangul(i.p); r == 0 {
break
}
if i.setStart(outp, i.p) {
return outp
}
}
inCopyStart, outCopyStart = i.p, outp
} else {
p := outp + sz
if p > i.maxseg && i.setStart(outp, i.p) {
break
}
outp = p
i.p += sz
}
if i.p >= i.rb.nsrc {
break
}
prevCC := i.info.tccc
i.info = i.rb.f.info(i.rb.src, i.p)
if cc := i.info.ccc; cc == 0 {
if i.setStart(outp, i.p) {
break
}
} else if cc < prevCC {
goto doNorm
}
}
if inCopyStart != i.p {
i.rb.src.copySlice(out[outCopyStart:], inCopyStart, i.p)
}
i.done = i.p >= i.rb.nsrc
return outp
doNorm:
// Insert what we have decomposed so far in the reorderBuffer.
// As we will only reorder, there will always be enough room.
i.rb.src.copySlice(out[outCopyStart:], inCopyStart, i.p)
if !i.rb.insertDecomposed(out[i.outStart:outp]) {
// Start over to prevent decompositions from crossing segment boundaries.
// This is a rare occurance.
i.p = i.inStart
i.info = i.rb.f.info(i.rb.src, i.p)
}
outp = i.outStart
for {
if !i.rb.insert(i.rb.src, i.p, i.info) {
break
}
if i.p += int(i.info.size); i.p >= i.rb.nsrc {
outp += i.rb.flushCopy(out[outp:])
i.done = true
return outp
}
i.info = i.rb.f.info(i.rb.src, i.p)
if i.info.ccc == 0 {
break
}
}
// new segment or too many combining characters: exit normalization
if outp += i.rb.flushCopy(out[outp:]); i.setStart(outp, i.p) {
return outp
}
goto doFast
}
// nextComposed is the implementation of Next for forms NFC and NFKC.
func nextComposed(i *Iter, out []byte) int {
var outp int
i.initNext(len(out), i.p)
doFast:
inCopyStart, outCopyStart := i.p, outp // invariant xCopyStart <= i.xStart
var prevCC uint8
for {
if !i.info.isYesC() {
goto doNorm
}
if cc := i.info.ccc; cc == 0 {
if i.setStart(outp, i.p) {
break
}
} else if cc < prevCC {
goto doNorm
}
prevCC = i.info.tccc
sz := int(i.info.size)
if sz == 0 {
sz = 1 // illegal rune: copy byte-by-byte
}
p := outp + sz
if p > i.maxseg && i.setStart(outp, i.p) {
break
}
outp = p
i.p += sz
max := min(i.rb.nsrc, len(out)-outp+i.p)
if np := i.rb.src.skipASCII(i.p, max); np > i.p {
outp += np - i.p
i.p = np
if i.p >= i.rb.nsrc {
break
}
// ASCII may combine with consecutive runes.
if i.setStart(outp-1, i.p-1) {
i.p--
outp--
i.info = runeInfo{size: 1}
break
}
}
if i.p >= i.rb.nsrc {
break
}
i.info = i.rb.f.info(i.rb.src, i.p)
}
if inCopyStart != i.p {
i.rb.src.copySlice(out[outCopyStart:], inCopyStart, i.p)
}
i.done = i.p >= i.rb.nsrc
return outp
doNorm:
i.rb.src.copySlice(out[outCopyStart:], inCopyStart, i.inStart)
outp, i.p = i.outStart, i.inStart
i.info = i.rb.f.info(i.rb.src, i.p)
for {
if !i.rb.insert(i.rb.src, i.p, i.info) {
break
}
if i.p += int(i.info.size); i.p >= i.rb.nsrc {
i.rb.compose()
outp += i.rb.flushCopy(out[outp:])
i.done = true
return outp
}
i.info = i.rb.f.info(i.rb.src, i.p)
if i.info.boundaryBefore() {
break
}
}
i.rb.compose()
if outp += i.rb.flushCopy(out[outp:]); i.setStart(outp, i.p) {
return outp
}
goto doFast
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import (
"strings"
"testing"
)
var iterBufSizes = []int{
MaxSegmentSize,
1.5 * MaxSegmentSize,
2 * MaxSegmentSize,
3 * MaxSegmentSize,
100 * MaxSegmentSize,
}
func doIterNorm(f Form, buf []byte, s string) []byte {
acc := []byte{}
i := Iter{}
i.SetInputString(f, s)
for !i.Done() {
n := i.Next(buf)
acc = append(acc, buf[:n]...)
}
return acc
}
func runIterTests(t *testing.T, name string, f Form, tests []AppendTest, norm bool) {
for i, test := range tests {
in := test.left + test.right
gold := test.out
if norm {
gold = string(f.AppendString(nil, test.out))
}
for _, sz := range iterBufSizes {
buf := make([]byte, sz)
out := string(doIterNorm(f, buf, in))
if len(out) != len(gold) {
const msg = "%s:%d:%d: length is %d; want %d"
t.Errorf(msg, name, i, sz, len(out), len(gold))
}
if out != gold {
// Find first rune that differs and show context.
ir := []rune(out)
ig := []rune(gold)
for j := 0; j < len(ir) && j < len(ig); j++ {
if ir[j] == ig[j] {
continue
}
if j -= 3; j < 0 {
j = 0
}
for e := j + 7; j < e && j < len(ir) && j < len(ig); j++ {
const msg = "%s:%d:%d: runeAt(%d) = %U; want %U"
t.Errorf(msg, name, i, sz, j, ir[j], ig[j])
}
break
}
}
}
}
}
func rep(r rune, n int) string {
return strings.Repeat(string(r), n)
}
var iterTests = []AppendTest{
{"", ascii, ascii},
{"", txt_all, txt_all},
{"", "a" + rep(0x0300, MaxSegmentSize/2), "a" + rep(0x0300, MaxSegmentSize/2)},
}
var iterTestsD = []AppendTest{
{ // segment overflow on unchanged character
"",
"a" + rep(0x0300, MaxSegmentSize/2) + "\u0316",
"a" + rep(0x0300, MaxSegmentSize/2-1) + "\u0316\u0300",
},
{ // segment overflow on unchanged character + start value
"",
"a" + rep(0x0300, MaxSegmentSize/2+maxCombiningChars+4) + "\u0316",
"a" + rep(0x0300, MaxSegmentSize/2+maxCombiningChars) + "\u0316" + rep(0x300, 4),
},
{ // segment overflow on decomposition
"",
"a" + rep(0x0300, MaxSegmentSize/2-1) + "\u0340",
"a" + rep(0x0300, MaxSegmentSize/2),
},
{ // segment overflow on decomposition + start value
"",
"a" + rep(0x0300, MaxSegmentSize/2-1) + "\u0340" + rep(0x300, maxCombiningChars+4) + "\u0320",
"a" + rep(0x0300, MaxSegmentSize/2-1) + rep(0x300, maxCombiningChars+1) + "\u0320" + rep(0x300, 4),
},
{ // start value after ASCII overflow
"",
rep('a', MaxSegmentSize) + rep(0x300, maxCombiningChars+2) + "\u0320",
rep('a', MaxSegmentSize) + rep(0x300, maxCombiningChars) + "\u0320\u0300\u0300",
},
{ // start value after Hangul overflow
"",
rep(0xAC00, MaxSegmentSize/6) + rep(0x300, maxCombiningChars+2) + "\u0320",
strings.Repeat("\u1100\u1161", MaxSegmentSize/6) + rep(0x300, maxCombiningChars-1) + "\u0320" + rep(0x300, 3),
},
{ // start value after cc=0
"",
"您您" + rep(0x300, maxCombiningChars+4) + "\u0320",
"您您" + rep(0x300, maxCombiningChars) + "\u0320" + rep(0x300, 4),
},
{ // start value after normalization
"",
"\u0300\u0320a" + rep(0x300, maxCombiningChars+4) + "\u0320",
"\u0320\u0300a" + rep(0x300, maxCombiningChars) + "\u0320" + rep(0x300, 4),
},
}
var iterTestsC = []AppendTest{
{ // ordering of non-composing combining characters
"",
"\u0305\u0316",
"\u0316\u0305",
},
{ // segment overflow
"",
"a" + rep(0x0305, MaxSegmentSize/2+4) + "\u0316",
"a" + rep(0x0305, MaxSegmentSize/2-1) + "\u0316" + rep(0x305, 5),
},
}
func TestIterNextD(t *testing.T) {
runIterTests(t, "IterNextD1", NFKD, appendTests, true)
runIterTests(t, "IterNextD2", NFKD, iterTests, true)
runIterTests(t, "IterNextD3", NFKD, iterTestsD, false)
}
func TestIterNextC(t *testing.T) {
runIterTests(t, "IterNextC1", NFKC, appendTests, true)
runIterTests(t, "IterNextC2", NFKC, iterTests, true)
runIterTests(t, "IterNextC3", NFKC, iterTestsC, false)
}
type SegmentTest struct {
in string
out []string
}
var segmentTests = []SegmentTest{
{rep('a', MaxSegmentSize), []string{rep('a', MaxSegmentSize), ""}},
{rep('a', MaxSegmentSize+2), []string{rep('a', MaxSegmentSize-1), "aaa", ""}},
{rep('a', MaxSegmentSize) + "\u0300aa", []string{rep('a', MaxSegmentSize-1), "a\u0300", "aa", ""}},
}
// Note that, by design, segmentation is equal for composing and decomposing forms.
func TestIterSegmentation(t *testing.T) {
segmentTest(t, "SegmentTestD", NFD, segmentTests)
segmentTest(t, "SegmentTestC", NFC, segmentTests)
}
func segmentTest(t *testing.T, name string, f Form, tests []SegmentTest) {
iter := Iter{}
for i, tt := range segmentTests {
buf := make([]byte, MaxSegmentSize)
iter.SetInputString(f, tt.in)
for j, seg := range tt.out {
if seg == "" {
if !iter.Done() {
n := iter.Next(buf)
res := string(buf[:n])
t.Errorf(`%s:%d:%d: expected Done()==true, found segment "%s"`, name, i, j, res)
}
continue
}
if iter.Done() {
t.Errorf("%s:%d:%d: Done()==true, want false", name, i, j)
}
n := iter.Next(buf)
seg = f.String(seg)
if res := string(buf[:n]); res != seg {
t.Errorf(`%s:%d:%d" segment was "%s" (%d); want "%s" (%d)`, name, i, j, res, len(res), seg, len(seg))
}
}
}
}
......@@ -243,7 +243,7 @@ func quickSpan(rb *reorderBuffer, i int) int {
lastSegStart := i
src, n := rb.src, rb.nsrc
for i < n {
if j := src.skipASCII(i); i != j {
if j := src.skipASCII(i, n); i != j {
i = j
lastSegStart = i - 1
lastCC = 0
......@@ -448,11 +448,16 @@ func decomposeToLastBoundary(rb *reorderBuffer, buf []byte) []byte {
}
// Check that decomposition doesn't result in overflow.
if info.hasDecomposition() {
dcomp := info.decomposition()
for i := 0; i < len(dcomp); {
inf := rb.f.info(inputBytes(dcomp), i)
i += int(inf.size)
if isHangul(buf) {
i += int(info.size)
n++
} else {
dcomp := info.decomposition()
for i := 0; i < len(dcomp); {
inf := rb.f.info(inputBytes(dcomp), i)
i += int(inf.size)
n++
}
}
} else {
n++
......
......@@ -5,6 +5,7 @@
package norm
import (
"bytes"
"strings"
"testing"
)
......@@ -495,15 +496,40 @@ func TestAppend(t *testing.T) {
runAppendTests(t, "TestString", NFKC, stringF, appendTests)
}
func appendBench(f Form, in []byte) func() {
buf := make([]byte, 0, 4*len(in))
return func() {
f.Append(buf, in...)
}
}
func iterBench(f Form, in []byte) func() {
buf := make([]byte, 4*len(in))
iter := Iter{}
return func() {
iter.SetInput(f, in)
for !iter.Done() {
iter.Next(buf)
}
}
}
func appendBenchmarks(bm []func(), f Form, in []byte) []func() {
//bm = append(bm, appendBench(f, in))
bm = append(bm, iterBench(f, in))
return bm
}
func doFormBenchmark(b *testing.B, inf, f Form, s string) {
b.StopTimer()
in := inf.Bytes([]byte(s))
buf := make([]byte, 2*len(in))
b.SetBytes(int64(len(in)))
bm := appendBenchmarks(nil, f, in)
b.SetBytes(int64(len(in) * len(bm)))
b.StartTimer()
for i := 0; i < b.N; i++ {
buf = f.Append(buf[0:0], in...)
buf = buf[0:0]
for _, fn := range bm {
fn()
}
}
}
......@@ -549,17 +575,21 @@ func BenchmarkNormalizeHangulNFD2NFD(b *testing.B) {
doFormBenchmark(b, NFD, NFD, txt_kr)
}
var forms = []Form{NFC, NFD, NFKC, NFKD}
func doTextBenchmark(b *testing.B, s string) {
b.StopTimer()
b.SetBytes(int64(len(s)) * 4)
in := []byte(s)
var buf = make([]byte, 0, 2*len(in))
bm := []func(){}
for _, f := range forms {
bm = appendBenchmarks(bm, f, in)
}
b.SetBytes(int64(len(s) * len(bm)))
b.StartTimer()
for i := 0; i < b.N; i++ {
NFC.Append(buf, in...)
NFD.Append(buf, in...)
NFKC.Append(buf, in...)
NFKD.Append(buf, in...)
for _, f := range bm {
f()
}
}
}
......@@ -584,6 +614,11 @@ func BenchmarkJapanese(b *testing.B) {
func BenchmarkChinese(b *testing.B) {
doTextBenchmark(b, txt_cn)
}
func BenchmarkOverflow(b *testing.B) {
doTextBenchmark(b, overflow)
}
var overflow = string(bytes.Repeat([]byte("\u035D"), 4096)) + "\u035B"
// Tests sampled from the Canonical ordering tests (Part 2) of
// http://unicode.org/Public/UNIDATA/NormalizationTest.txt
......
......@@ -220,6 +220,17 @@ func cmpIsNormal(t *Test, name string, f norm.Form, test string, result, want bo
func doTest(t *Test, f norm.Form, gold, test string) {
result := f.Bytes([]byte(test))
cmpResult(t, "Bytes", f, gold, test, string(result))
sresult := f.String(test)
cmpResult(t, "String", f, gold, test, sresult)
buf := make([]byte, norm.MaxSegmentSize)
acc := []byte{}
i := norm.Iter{}
i.SetInputString(f, test)
for !i.Done() {
n := i.Next(buf)
acc = append(acc, buf[:n]...)
}
cmpResult(t, "Iter.Next", f, gold, test, string(acc))
for i := range test {
out := f.Append(f.Bytes([]byte(test[:i])), []byte(test[i:])...)
cmpResult(t, fmt.Sprintf(":Append:%d", i), f, gold, test, string(out))
......
......@@ -98,9 +98,9 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) {
buf = append(buf, socks5Version)
if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 {
buf = append(buf, 2, /* num auth methods */ socks5AuthNone, socks5AuthPassword)
buf = append(buf, 2 /* num auth methods */, socks5AuthNone, socks5AuthPassword)
} else {
buf = append(buf, 1, /* num auth methods */ socks5AuthNone)
buf = append(buf, 1 /* num auth methods */, socks5AuthNone)
}
if _, err = conn.Write(buf); err != nil {
......@@ -139,7 +139,7 @@ func (s *socks5) Dial(network, addr string) (net.Conn, error) {
}
buf = buf[:0]
buf = append(buf, socks5Version, socks5Connect, 0 /* reserved */ )
buf = append(buf, socks5Version, socks5Connect, 0 /* reserved */)
if ip := net.ParseIP(host); ip != nil {
if len(ip) == 4 {
......
......@@ -389,12 +389,12 @@ func (t *Terminal) Write(buf []byte) (n int, err error) {
// We have a prompt and possibly user input on the screen. We
// have to clear it first.
t.move(0, /* up */ 0, /* down */ t.cursorX, /* left */ 0 /* right */ )
t.move(0 /* up */, 0 /* down */, t.cursorX /* left */, 0 /* right */)
t.cursorX = 0
t.clearLineToRight()
for t.cursorY > 0 {
t.move(1, /* up */ 0, 0, 0)
t.move(1 /* up */, 0, 0, 0)
t.cursorY--
t.clearLineToRight()
}
......
......@@ -7,6 +7,7 @@
package winfsnotify
import (
"io/ioutil"
"os"
"testing"
"time"
......@@ -115,7 +116,13 @@ func TestNotifyClose(t *testing.T) {
t.Fatal("double Close() test failed: second Close() call didn't return")
}
err := watcher.Watch("_test")
dir, err := ioutil.TempDir("", "wininotify")
if err != nil {
t.Fatalf("TempDir failed: %s", err)
}
defer os.RemoveAll(dir)
err = watcher.Watch(dir)
if err == nil {
t.Fatal("expected error on Watch() after Close(), got nil")
}
......
......@@ -2,28 +2,31 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Extract example functions from package ASTs.
// Extract example functions from file ASTs.
package doc
import (
"go/ast"
"go/printer"
"go/token"
"regexp"
"sort"
"strings"
"unicode"
"unicode/utf8"
)
type Example struct {
Name string // name of the item being demonstrated
Body *printer.CommentedNode // code
Output string // expected output
Name string // name of the item being exemplified
Doc string // example function doc string
Code ast.Node
Comments []*ast.CommentGroup
Output string // expected output
}
func Examples(pkg *ast.Package) []*Example {
func Examples(files ...*ast.File) []*Example {
var list []*Example
for _, file := range pkg.Files {
for _, file := range files {
hasTests := false // file contains tests or benchmarks
numDecl := 0 // number of non-import declarations in the file
var flist []*Example
......@@ -45,26 +48,54 @@ func Examples(pkg *ast.Package) []*Example {
if !isTest(name, "Example") {
continue
}
var doc string
if f.Doc != nil {
doc = f.Doc.Text()
}
flist = append(flist, &Example{
Name: name[len("Example"):],
Body: &printer.CommentedNode{
Node: f.Body,
Comments: file.Comments,
},
Output: f.Doc.Text(),
Name: name[len("Example"):],
Doc: doc,
Code: f.Body,
Comments: file.Comments,
Output: exampleOutput(f, file.Comments),
})
}
if !hasTests && numDecl > 1 && len(flist) == 1 {
// If this file only has one example function, some
// other top-level declarations, and no tests or
// benchmarks, use the whole file as the example.
flist[0].Body.Node = file
flist[0].Code = file
}
list = append(list, flist...)
}
sort.Sort(exampleByName(list))
return list
}
var outputPrefix = regexp.MustCompile(`(?i)^[[:space:]]*output:`)
func exampleOutput(fun *ast.FuncDecl, comments []*ast.CommentGroup) string {
// find the last comment in the function
var last *ast.CommentGroup
for _, cg := range comments {
if cg.Pos() < fun.Pos() {
continue
}
if cg.End() > fun.End() {
break
}
last = cg
}
if last != nil {
// test that it begins with the correct prefix
text := last.Text()
if loc := outputPrefix.FindStringIndex(text); loc != nil {
return strings.TrimSpace(text[loc[1]:])
}
}
return "" // no suitable comment found
}
// isTest tells whether name looks like a test, example, or benchmark.
// It is a Test (say) if there is a character after Test that is not a
// lower-case letter. (We don't want Testiness.)
......@@ -78,3 +109,9 @@ func isTest(name, prefix string) bool {
rune, _ := utf8.DecodeRuneInString(name[len(prefix):])
return !unicode.IsLower(rune)
}
type exampleByName []*Example
func (s exampleByName) Len() int { return len(s) }
func (s exampleByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s exampleByName) Less(i, j int) bool { return s[i].Name < s[j].Name }
......@@ -439,8 +439,10 @@ func (r *reader) readFile(src *ast.File) {
// gets to (re-)use the declaration documentation
// if there's none associated with the spec itself
fake := &ast.GenDecl{
d.Doc, d.Pos(), token.TYPE, token.NoPos,
[]ast.Spec{s}, token.NoPos,
Doc: d.Doc,
TokPos: d.Pos(),
Tok: token.TYPE,
Specs: []ast.Spec{s},
}
r.readType(fake, s)
}
......@@ -460,7 +462,7 @@ func (r *reader) readFile(src *ast.File) {
// non-empty BUG comment; collect comment without BUG prefix
list := append([]*ast.Comment(nil), c.List...) // make a copy
list[0].Text = text[m[1]:]
r.bugs = append(r.bugs, (&ast.CommentGroup{list}).Text())
r.bugs = append(r.bugs, (&ast.CommentGroup{List: list}).Text())
}
}
}
......@@ -530,7 +532,7 @@ func customizeRecv(f *Func, recvTypeName string, embeddedIsPtr bool, level int)
_, origRecvIsPtr := newField.Type.(*ast.StarExpr)
var typ ast.Expr = ast.NewIdent(recvTypeName)
if !embeddedIsPtr && origRecvIsPtr {
typ = &ast.StarExpr{token.NoPos, typ}
typ = &ast.StarExpr{X: typ}
}
newField.Type = typ
......
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package doc
import "unicode"
// firstSentenceLen returns the length of the first sentence in s.
// The sentence ends after the first period followed by space and
// not preceded by exactly one uppercase letter.
//
func firstSentenceLen(s string) int {
var ppp, pp, p rune
for i, q := range s {
if q == '\n' || q == '\r' || q == '\t' {
q = ' '
}
if q == ' ' && p == '.' && (!unicode.IsUpper(pp) || unicode.IsUpper(ppp)) {
return i
}
ppp, pp, p = pp, p, q
}
return len(s)
}
// Synopsis returns a cleaned version of the first sentence in s.
// That sentence ends after the first period followed by space and
// not preceded by exactly one uppercase letter. The result string
// has no \n, \r, or \t characters and uses only single spaces between
// words.
//
func Synopsis(s string) string {
n := firstSentenceLen(s)
var b []byte
p := byte(' ')
for i := 0; i < n; i++ {
q := s[i]
if q == '\n' || q == '\r' || q == '\t' {
q = ' '
}
if q != ' ' || p != ' ' {
b = append(b, q)
p = q
}
}
// remove trailing blank, if any
if n := len(b); n > 0 && p == ' ' {
b = b[0 : n-1]
}
return string(b)
}
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package doc
import "testing"
var tests = []struct {
txt string
fsl int
syn string
}{
{"", 0, ""},
{"foo", 3, "foo"},
{"foo.", 4, "foo."},
{"foo.bar", 7, "foo.bar"},
{" foo. ", 6, "foo."},
{" foo\t bar.\n", 12, "foo bar."},
{" foo\t bar.\n", 12, "foo bar."},
{"a b\n\nc\r\rd\t\t", 12, "a b c d"},
{"a b\n\nc\r\rd\t\t . BLA", 15, "a b c d ."},
{"Package poems by T.S.Eliot. To rhyme...", 27, "Package poems by T.S.Eliot."},
{"Package poems by T. S. Eliot. To rhyme...", 29, "Package poems by T. S. Eliot."},
{"foo implements the foo ABI. The foo ABI is...", 27, "foo implements the foo ABI."},
{"Package\nfoo. ..", 12, "Package foo."},
{"P . Q.", 3, "P ."},
{"P. Q. ", 8, "P. Q."},
{"Package Καλημέρα κόσμε.", 36, "Package Καλημέρα κόσμε."},
{"Package こんにちは 世界\n", 31, "Package こんにちは 世界"},
}
func TestSynopsis(t *testing.T) {
for _, e := range tests {
fsl := firstSentenceLen(e.txt)
if fsl != e.fsl {
t.Errorf("got fsl = %d; want %d for %q\n", fsl, e.fsl, e.txt)
}
syn := Synopsis(e.txt)
if syn != e.syn {
t.Errorf("got syn = %q; want %q for %q\n", syn, e.syn, e.txt)
}
}
}
......@@ -16,7 +16,7 @@ var matchBenchmarks = flag.String("test.bench", "", "regular expression to selec
var benchTime = flag.Float64("test.benchtime", 1, "approximate run time for each benchmark, in seconds")
// An internal type but exported because it is cross-package; part of the implementation
// of gotest.
// of go test.
type InternalBenchmark struct {
Name string
F func(b *B)
......@@ -213,7 +213,7 @@ func (r BenchmarkResult) String() string {
}
// An internal function but exported because it is cross-package; part of the implementation
// of gotest.
// of go test.
func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) {
// If no flag was specified, don't run benchmarks.
if len(*matchBenchmarks) == 0 {
......@@ -281,7 +281,7 @@ func (b *B) trimOutput() {
}
// Benchmark benchmarks a single function. Useful for creating
// custom benchmarks that do not use gotest.
// custom benchmarks that do not use go test.
func Benchmark(f func(b *B)) BenchmarkResult {
b := &B{
common: common{
......
......@@ -27,7 +27,7 @@ VARIABLES
// The short flag requests that tests run more quickly, but its functionality
// is provided by test writers themselves. The testing package is just its
// home. The all.bash installation script sets it to make installation more
// efficient, but by default the flag is off so a plain "gotest" will do a
// efficient, but by default the flag is off so a plain "go test" will do a
// full test of the package.
short = flag.Bool("test.short", false, "run smaller test suite to save time")
......
......@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// Package testing provides support for automated testing of Go packages.
// It is intended to be used in concert with the ``gotest'' utility, which automates
// It is intended to be used in concert with the ``go test'' utility, which automates
// execution of any function of the form
// func TestXxx(*testing.T)
// where Xxx can be any alphanumeric string (but the first letter must not be in
......@@ -12,7 +12,7 @@
//
// Functions of the form
// func BenchmarkXxx(*testing.B)
// are considered benchmarks, and are executed by gotest when the -test.bench
// are considered benchmarks, and are executed by go test when the -test.bench
// flag is provided.
//
// A sample benchmark function looks like this:
......@@ -53,7 +53,7 @@ var (
// The short flag requests that tests run more quickly, but its functionality
// is provided by test writers themselves. The testing package is just its
// home. The all.bash installation script sets it to make installation more
// efficient, but by default the flag is off so a plain "gotest" will do a
// efficient, but by default the flag is off so a plain "go test" will do a
// full test of the package.
short = flag.Bool("test.short", false, "run smaller test suite to save time")
......@@ -205,7 +205,7 @@ func (t *T) Parallel() {
}
// An internal type but exported because it is cross-package; part of the implementation
// of gotest.
// of go test.
type InternalTest struct {
Name string
F func(*T)
......@@ -227,7 +227,7 @@ func tRunner(t *T, test *InternalTest) {
}
// An internal function but exported because it is cross-package; part of the implementation
// of gotest.
// of go test.
func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) {
flag.Parse()
parseCpuList()
......
......@@ -87,7 +87,6 @@ const (
commaSep // elements are separated by commas
commaTerm // list is optionally terminated by a comma
noIndent // no extra indentation in multi-line lists
periodSep // elements are separated by periods
)
// Sets multiLine to true if the identifier list spans multiple lines.
......@@ -133,7 +132,9 @@ func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exp
for i, x := range list {
if i > 0 {
if mode&commaSep != 0 {
p.print(token.COMMA)
// use position of expression following the comma as
// comma position for correct comment placement
p.print(x.Pos(), token.COMMA)
}
p.print(blank)
}
......@@ -213,14 +214,18 @@ func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exp
}
if i > 0 {
switch {
case mode&commaSep != 0:
needsLinebreak := prevLine < line && prevLine > 0 && line > 0
if mode&commaSep != 0 {
// use position of expression following the comma as
// comma position for correct comment placement, but
// only if the expression is on the same line
if !needsLinebreak {
p.print(x.Pos())
}
p.print(token.COMMA)
case mode&periodSep != 0:
p.print(token.PERIOD)
}
needsBlank := mode&periodSep == 0 // period-separated list elements don't need a blank
if prevLine < line && prevLine > 0 && line > 0 {
needsBlank := true
if needsLinebreak {
// lines are broken using newlines so comments remain aligned
// unless forceFF is set or there are multiple expressions on
// the same line in which case formfeed is used
......@@ -287,11 +292,18 @@ func (p *printer) parameters(fields *ast.FieldList, multiLine *bool) {
parLineBeg = parLineEnd
}
// separating "," if needed
needsLinebreak := 0 < prevLine && prevLine < parLineBeg
if i > 0 {
// use position of parameter following the comma as
// comma position for correct comma placement, but
// only if the next parameter is on the same line
if !needsLinebreak {
p.print(par.Pos())
}
p.print(token.COMMA)
}
// separator if needed (linebreak or blank)
if 0 < prevLine && prevLine < parLineBeg && p.linebreak(parLineBeg, 0, ws, true) {
if needsLinebreak && p.linebreak(parLineBeg, 0, ws, true) {
// break line if the opening "(" or previous parameter ended on a different line
ws = ignore
*multiLine = true
......@@ -316,7 +328,7 @@ func (p *printer) parameters(fields *ast.FieldList, multiLine *bool) {
// if the closing ")" is on a separate line from the last parameter,
// print an additional "," and line break
if closing := p.lineFor(fields.Closing); 0 < prevLine && prevLine < closing {
p.print(",")
p.print(token.COMMA)
p.linebreak(closing, 0, ignore, true)
}
// unindent if we indented
......@@ -374,7 +386,7 @@ func (p *printer) isOneLineFieldList(list []*ast.Field) bool {
}
func (p *printer) setLineComment(text string) {
p.setComment(&ast.CommentGroup{[]*ast.Comment{{token.NoPos, text}}})
p.setComment(&ast.CommentGroup{List: []*ast.Comment{{Slash: token.NoPos, Text: text}}})
}
func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool) {
......@@ -397,6 +409,7 @@ func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool)
f := list[0]
for i, x := range f.Names {
if i > 0 {
// no comments so no need for comma position
p.print(token.COMMA, blank)
}
p.expr(x, ignoreMultiLine)
......@@ -668,63 +681,6 @@ func isBinary(expr ast.Expr) bool {
return ok
}
// If the expression contains one or more selector expressions, splits it into
// two expressions at the rightmost period. Writes entire expr to suffix when
// selector isn't found. Rewrites AST nodes for calls, index expressions and
// type assertions, all of which may be found in selector chains, to make them
// parts of the chain.
func splitSelector(expr ast.Expr) (body, suffix ast.Expr) {
switch x := expr.(type) {
case *ast.SelectorExpr:
body, suffix = x.X, x.Sel
return
case *ast.CallExpr:
body, suffix = splitSelector(x.Fun)
if body != nil {
suffix = &ast.CallExpr{suffix, x.Lparen, x.Args, x.Ellipsis, x.Rparen}
return
}
case *ast.IndexExpr:
body, suffix = splitSelector(x.X)
if body != nil {
suffix = &ast.IndexExpr{suffix, x.Lbrack, x.Index, x.Rbrack}
return
}
case *ast.SliceExpr:
body, suffix = splitSelector(x.X)
if body != nil {
suffix = &ast.SliceExpr{suffix, x.Lbrack, x.Low, x.High, x.Rbrack}
return
}
case *ast.TypeAssertExpr:
body, suffix = splitSelector(x.X)
if body != nil {
suffix = &ast.TypeAssertExpr{suffix, x.Type}
return
}
}
suffix = expr
return
}
// Convert an expression into an expression list split at the periods of
// selector expressions.
func selectorExprList(expr ast.Expr) (list []ast.Expr) {
// split expression
for expr != nil {
var suffix ast.Expr
expr, suffix = splitSelector(expr)
list = append(list, suffix)
}
// reverse list
for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 {
list[i], list[j] = list[j], list[i]
}
return
}
// Sets multiLine to true if the expression spans multiple lines.
func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
p.print(expr.Pos())
......@@ -798,8 +754,14 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
}
case *ast.SelectorExpr:
parts := selectorExprList(expr)
p.exprList(token.NoPos, parts, depth, periodSep, multiLine, token.NoPos)
p.expr1(x.X, token.HighestPrec, depth, multiLine)
p.print(token.PERIOD)
if line := p.lineFor(x.Sel.Pos()); p.pos.IsValid() && p.pos.Line < line {
p.print(indent, newline, x.Sel.Pos(), x.Sel, unindent)
*multiLine = true
} else {
p.print(x.Sel.Pos(), x.Sel)
}
case *ast.TypeAssertExpr:
p.expr1(x.X, token.HighestPrec, depth, multiLine)
......@@ -1180,7 +1142,9 @@ func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, multiLine *bool) {
p.print(token.FOR, blank)
p.expr(s.Key, multiLine)
if s.Value != nil {
p.print(token.COMMA, blank)
// use position of value following the comma as
// comma position for correct comment placement
p.print(s.Value.Pos(), token.COMMA, blank)
p.expr(s.Value, multiLine)
}
p.print(blank, s.TokPos, s.Tok, blank, token.RANGE, blank)
......
......@@ -686,9 +686,11 @@ func (p *printer) intersperseComments(next token.Position, tok token.Token) (wro
}
if last != nil {
if last.Text[1] == '*' && p.lineFor(last.Pos()) == next.Line {
// the last comment is a /*-style comment and the next item
// follows on the same line: separate with an extra blank
// if the last comment is a /*-style comment and the next item
// follows on the same line but is not a comma or a "closing"
// token, add an extra blank for separation
if last.Text[1] == '*' && p.lineFor(last.Pos()) == next.Line && tok != token.COMMA &&
tok != token.RPAREN && tok != token.RBRACK && tok != token.RBRACE {
p.writeByte(' ', 1)
}
// ensure that there is a line break after a //-style comment,
......
......@@ -283,10 +283,10 @@ func fibo(n int) {
t.Error("expected offset 1") // error in test
}
testComment(t, f, len(src), &ast.Comment{pos, "//-style comment"})
testComment(t, f, len(src), &ast.Comment{pos, "/*-style comment */"})
testComment(t, f, len(src), &ast.Comment{pos, "/*-style \n comment */"})
testComment(t, f, len(src), &ast.Comment{pos, "/*-style comment \n\n\n */"})
testComment(t, f, len(src), &ast.Comment{Slash: pos, Text: "//-style comment"})
testComment(t, f, len(src), &ast.Comment{Slash: pos, Text: "/*-style comment */"})
testComment(t, f, len(src), &ast.Comment{Slash: pos, Text: "/*-style \n comment */"})
testComment(t, f, len(src), &ast.Comment{Slash: pos, Text: "/*-style comment \n\n\n */"})
}
type visitor chan *ast.Ident
......
......@@ -405,16 +405,17 @@ func _() {
}
// Some interesting interspersed comments.
// See below for more common cases.
func _( /* this */ x /* is */ /* an */ int) {
}
func _( /* no params */ ) {}
func _( /* no params */) {}
func _() {
f( /* no args */ )
f( /* no args */)
}
func ( /* comment1 */ T /* comment2 */ ) _() {}
func ( /* comment1 */ T /* comment2 */) _() {}
func _() { /* one-line functions with comments are formatted as multi-line functions */
}
......@@ -425,7 +426,7 @@ func _() {
}
func _() {
_ = []int{0, 1 /* don't introduce a newline after this comment - was issue 1365 */ }
_ = []int{0, 1 /* don't introduce a newline after this comment - was issue 1365 */}
}
// Test cases from issue 1542:
......@@ -448,8 +449,9 @@ func _() {
_ = a
}
// Comments immediately adjacent to punctuation (for which the go/printer
// may only have estimated position information) must remain after the punctuation.
// Comments immediately adjacent to punctuation followed by a newline
// remain after the punctuation (looks better and permits alignment of
// comments).
func _() {
_ = T{
1, // comment after comma
......@@ -479,6 +481,35 @@ func _() {
}
}
// If there is no newline following punctuation, commas move before the punctuation.
// This way, commas interspersed in lists stay with the respective expression.
func f(x /* comment */, y int, z int /* comment */, u, v, w int /* comment */) {
f(x /* comment */, y)
f(x, /* comment */
y)
f(
x, /* comment */
)
}
func g(
x int, /* comment */
) {
}
type _ struct {
a, b /* comment */, c int
}
type _ struct {
a, b /* comment */, c int
}
func _() {
for a /* comment */, b := range x {
}
}
// Print line directives correctly.
// The following is a legal line directive.
......
......@@ -411,6 +411,7 @@ func _() {
// Some interesting interspersed comments.
// See below for more common cases.
func _(/* this */x/* is *//* an */ int) {
}
......@@ -453,8 +454,9 @@ func _() {
_ = a
}
// Comments immediately adjacent to punctuation (for which the go/printer
// may only have estimated position information) must remain after the punctuation.
// Comments immediately adjacent to punctuation followed by a newline
// remain after the punctuation (looks better and permits alignment of
// comments).
func _() {
_ = T{
1, // comment after comma
......@@ -486,6 +488,31 @@ func _() {
}
}
// If there is no newline following punctuation, commas move before the punctuation.
// This way, commas interspersed in lists stay with the respective expression.
func f(x/* comment */, y int, z int /* comment */, u, v, w int /* comment */) {
f(x /* comment */, y)
f(x /* comment */,
y)
f(
x /* comment */,
)
}
func g(
x int /* comment */,
) {}
type _ struct {
a, b /* comment */, c int
}
type _ struct { a, b /* comment */, c int }
func _() {
for a /* comment */, b := range x {
}
}
// Print line directives correctly.
......
......@@ -545,7 +545,7 @@ func _() {
// handle multiline argument list correctly
_ = new(T).
foo(
1).
1).
foo(2)
_ = new(T).foo(
......@@ -587,12 +587,12 @@ func _() {
_ = new(T).
Field.
Array[3+
4].
4].
Table["foo"].
Blob.(*Type).
Slices[1:4].
Method(1, 2,
3).
3).
Thingy
_ = a.b.c
......
......@@ -545,7 +545,7 @@ func _() {
// handle multiline argument list correctly
_ = new(T).
foo(
1).
1).
foo(2)
_ = new(T).foo(
......@@ -587,12 +587,12 @@ func _() {
_ = new(T).
Field.
Array[3+
4].
4].
Table["foo"].
Blob.(*Type).
Slices[1:4].
Method(1, 2,
3).
3).
Thingy
_ = a.b.c
......
......@@ -2,21 +2,9 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package scanner implements a scanner for Go source text. Takes a []byte as
// source which can then be tokenized through repeated calls to the Scan
// function. Typical use:
//
// var s scanner.Scanner
// fset := token.NewFileSet() // position information is relative to fset
// file := fset.AddFile(filename, fset.Base(), len(src)) // register file
// s.Init(file, src, nil /* no error handler */, 0)
// for {
// pos, tok, lit := s.Scan()
// if tok == token.EOF {
// break
// }
// // do something here with pos, tok, and lit
// }
// Package scanner implements a scanner for Go source text.
// It takes a []byte as source which can then be tokenized
// through repeated calls to the Scan method.
//
package scanner
......
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package template
import (
"text/template/parse"
)
// clone clones a template Node.
func clone(n parse.Node) parse.Node {
switch t := n.(type) {
case *parse.ActionNode:
return cloneAction(t)
case *parse.IfNode:
b := new(parse.IfNode)
copyBranch(&b.BranchNode, &t.BranchNode)
return b
case *parse.ListNode:
return cloneList(t)
case *parse.RangeNode:
b := new(parse.RangeNode)
copyBranch(&b.BranchNode, &t.BranchNode)
return b
case *parse.TemplateNode:
return cloneTemplate(t)
case *parse.TextNode:
return cloneText(t)
case *parse.WithNode:
b := new(parse.WithNode)
copyBranch(&b.BranchNode, &t.BranchNode)
return b
}
panic("cloning " + n.String() + " is unimplemented")
}
// cloneAction returns a deep clone of n.
func cloneAction(n *parse.ActionNode) *parse.ActionNode {
// We use keyless fields because they won't compile if a field is added.
return &parse.ActionNode{n.NodeType, n.Line, clonePipe(n.Pipe)}
}
// cloneList returns a deep clone of n.
func cloneList(n *parse.ListNode) *parse.ListNode {
if n == nil {
return nil
}
// We use keyless fields because they won't compile if a field is added.
c := parse.ListNode{n.NodeType, make([]parse.Node, len(n.Nodes))}
for i, child := range n.Nodes {
c.Nodes[i] = clone(child)
}
return &c
}
// clonePipe returns a shallow clone of n.
// The escaper does not modify pipe descendants in place so there's no need to
// clone deeply.
func clonePipe(n *parse.PipeNode) *parse.PipeNode {
if n == nil {
return nil
}
// We use keyless fields because they won't compile if a field is added.
return &parse.PipeNode{n.NodeType, n.Line, n.Decl, n.Cmds}
}
// cloneTemplate returns a deep clone of n.
func cloneTemplate(n *parse.TemplateNode) *parse.TemplateNode {
// We use keyless fields because they won't compile if a field is added.
return &parse.TemplateNode{n.NodeType, n.Line, n.Name, clonePipe(n.Pipe)}
}
// cloneText clones the given node sharing its []byte.
func cloneText(n *parse.TextNode) *parse.TextNode {
// We use keyless fields because they won't compile if a field is added.
return &parse.TextNode{n.NodeType, n.Text}
}
// copyBranch clones src into dst.
func copyBranch(dst, src *parse.BranchNode) {
// We use keyless fields because they won't compile if a field is added.
*dst = parse.BranchNode{
src.NodeType,
src.Line,
clonePipe(src.Pipe),
cloneList(src.List),
cloneList(src.ElseList),
}
}
......@@ -7,86 +7,109 @@ package template
import (
"bytes"
"testing"
"text/template/parse"
)
func TestAddParseTree(t *testing.T) {
root := Must(New("root").Parse(`{{define "a"}} {{.}} {{template "b"}} {{.}} "></a>{{end}}`))
tree, err := parse.Parse("t", `{{define "b"}}<a href="{{end}}`, "", "", nil, nil)
if err != nil {
t.Fatal(err)
}
added := Must(root.AddParseTree("b", tree["b"]))
b := new(bytes.Buffer)
err = added.ExecuteTemplate(b, "a", "1>0")
if err != nil {
t.Fatal(err)
}
if got, want := b.String(), ` 1&gt;0 <a href=" 1%3e0 "></a>`; got != want {
t.Errorf("got %q want %q", got, want)
}
}
func TestClone(t *testing.T) {
tests := []struct {
input, want, wantClone string
}{
{
`Hello, {{if true}}{{"<World>"}}{{end}}!`,
"Hello, <World>!",
"Hello, &lt;World&gt;!",
},
{
`Hello, {{if false}}{{.X}}{{else}}{{"<World>"}}{{end}}!`,
"Hello, <World>!",
"Hello, &lt;World&gt;!",
},
{
`Hello, {{with "<World>"}}{{.}}{{end}}!`,
"Hello, <World>!",
"Hello, &lt;World&gt;!",
},
{
`{{range .}}<p>{{.}}</p>{{end}}`,
"<p>foo</p><p><bar></p><p>baz</p>",
"<p>foo</p><p>&lt;bar&gt;</p><p>baz</p>",
},
{
`Hello, {{"<World>" | html}}!`,
"Hello, &lt;World&gt;!",
"Hello, &lt;World&gt;!",
},
{
`Hello{{if 1}}, World{{else}}{{template "d"}}{{end}}!`,
"Hello, World!",
"Hello, World!",
},
// The {{.}} will be executed with data "<i>*/" in different contexts.
// In the t0 template, it will be in a text context.
// In the t1 template, it will be in a URL context.
// In the t2 template, it will be in a JavaScript context.
// In the t3 template, it will be in a CSS context.
const tmpl = `{{define "a"}}{{template "lhs"}}{{.}}{{template "rhs"}}{{end}}`
b := new(bytes.Buffer)
// Create an incomplete template t0.
t0 := Must(New("t0").Parse(tmpl))
// Clone t0 as t1.
t1 := Must(t0.Clone())
Must(t1.Parse(`{{define "lhs"}} <a href=" {{end}}`))
Must(t1.Parse(`{{define "rhs"}} "></a> {{end}}`))
// Execute t1.
b.Reset()
if err := t1.ExecuteTemplate(b, "a", "<i>*/"); err != nil {
t.Fatal(err)
}
if got, want := b.String(), ` <a href=" %3ci%3e*/ "></a> `; got != want {
t.Errorf("t1: got %q want %q", got, want)
}
// Clone t0 as t2.
t2 := Must(t0.Clone())
Must(t2.Parse(`{{define "lhs"}} <p onclick="javascript: {{end}}`))
Must(t2.Parse(`{{define "rhs"}} "></p> {{end}}`))
// Execute t2.
b.Reset()
if err := t2.ExecuteTemplate(b, "a", "<i>*/"); err != nil {
t.Fatal(err)
}
if got, want := b.String(), ` <p onclick="javascript: &#34;\u003ci\u003e*/&#34; "></p> `; got != want {
t.Errorf("t2: got %q want %q", got, want)
}
for _, test := range tests {
s, err := New("s").Parse(test.input)
if err != nil {
t.Errorf("input=%q: unexpected parse error %v", test.input, err)
}
d, _ := New("d").Parse(test.input)
// Hack: just replace the root of the tree.
d.text.Root = cloneList(s.text.Root)
if want, got := s.text.Root.String(), d.text.Root.String(); want != got {
t.Errorf("want %q, got %q", want, got)
}
err = escapeTemplates(d, "d")
if err != nil {
t.Errorf("%q: failed to escape: %s", test.input, err)
continue
}
if want, got := "s", s.Name(); want != got {
t.Errorf("want %q, got %q", want, got)
continue
}
if want, got := "d", d.Name(); want != got {
t.Errorf("want %q, got %q", want, got)
continue
}
data := []string{"foo", "<bar>", "baz"}
var b bytes.Buffer
d.Execute(&b, data)
if got := b.String(); got != test.wantClone {
t.Errorf("input=%q: want %q, got %q", test.input, test.wantClone, got)
}
// Make sure escaping d did not affect s.
b.Reset()
s.text.Execute(&b, data)
if got := b.String(); got != test.want {
t.Errorf("input=%q: want %q, got %q", test.input, test.want, got)
}
// Clone t0 as t3, but do not execute t3 yet.
t3 := Must(t0.Clone())
Must(t3.Parse(`{{define "lhs"}} <style> {{end}}`))
Must(t3.Parse(`{{define "rhs"}} </style> {{end}}`))
// Complete t0.
Must(t0.Parse(`{{define "lhs"}} ( {{end}}`))
Must(t0.Parse(`{{define "rhs"}} ) {{end}}`))
// Clone t0 as t4. Redefining the "lhs" template should fail.
t4 := Must(t0.Clone())
if _, err := t4.Parse(`{{define "lhs"}} FAIL {{end}}`); err == nil {
t.Error(`redefine "lhs": got nil err want non-nil`)
}
// Execute t0.
b.Reset()
if err := t0.ExecuteTemplate(b, "a", "<i>*/"); err != nil {
t.Fatal(err)
}
if got, want := b.String(), ` ( &lt;i&gt;*/ ) `; got != want {
t.Errorf("t0: got %q want %q", got, want)
}
// Clone t0. This should fail, as t0 has already executed.
if _, err := t0.Clone(); err == nil {
t.Error(`t0.Clone(): got nil err want non-nil`)
}
// Similarly, cloning sub-templates should fail.
if _, err := t0.Lookup("a").Clone(); err == nil {
t.Error(`t0.Lookup("a").Clone(): got nil err want non-nil`)
}
if _, err := t0.Lookup("lhs").Clone(); err == nil {
t.Error(`t0.Lookup("lhs").Clone(): got nil err want non-nil`)
}
// Execute t3.
b.Reset()
if err := t3.ExecuteTemplate(b, "a", "<i>*/"); err != nil {
t.Fatal(err)
}
if got, want := b.String(), ` <style> ZgotmplZ </style> `; got != want {
t.Errorf("t3: got %q want %q", got, want)
}
}
......@@ -85,6 +85,22 @@ func indirect(a interface{}) interface{} {
return v.Interface()
}
var (
errorType = reflect.TypeOf((*error)(nil)).Elem()
fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
)
// indirectToStringerOrError returns the value, after dereferencing as many times
// as necessary to reach the base type (or nil) or an implementation of fmt.Stringer
// or error,
func indirectToStringerOrError(a interface{}) interface{} {
v := reflect.ValueOf(a)
for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() {
v = v.Elem()
}
return v.Interface()
}
// stringify converts its arguments to a string and the type of the content.
// All pointers are dereferenced, as in the text/template package.
func stringify(args ...interface{}) (string, contentType) {
......@@ -107,7 +123,7 @@ func stringify(args ...interface{}) (string, contentType) {
}
}
for i, arg := range args {
args[i] = indirect(arg)
args[i] = indirectToStringerOrError(arg)
}
return fmt.Sprint(args...), contentTypePlain
}
......@@ -6,6 +6,7 @@ package template
import (
"bytes"
"fmt"
"strings"
"testing"
)
......@@ -219,3 +220,42 @@ func TestTypedContent(t *testing.T) {
}
}
}
// Test that we print using the String method. Was issue 3073.
type stringer struct {
v int
}
func (s *stringer) String() string {
return fmt.Sprintf("string=%d", s.v)
}
type errorer struct {
v int
}
func (s *errorer) Error() string {
return fmt.Sprintf("error=%d", s.v)
}
func TestStringer(t *testing.T) {
s := &stringer{3}
b := new(bytes.Buffer)
tmpl := Must(New("x").Parse("{{.}}"))
if err := tmpl.Execute(b, s); err != nil {
t.Fatal(err)
}
var expect = "string=3"
if b.String() != expect {
t.Errorf("expected %q got %q", expect, b.String())
}
e := &errorer{7}
b.Reset()
if err := tmpl.Execute(b, e); err != nil {
t.Fatal(err)
}
expect = "error=7"
if b.String() != expect {
t.Errorf("expected %q got %q", expect, b.String())
}
}
......@@ -17,11 +17,11 @@ Introduction
This package wraps package text/template so you can share its template API
to parse and execute HTML templates safely.
set, err := new(template.Set).Parse(...)
tmpl, err := template.New("name").Parse(...)
// Error checking elided
err = set.Execute(out, "Foo", data)
err = tmpl.Execute(out, "Foo", data)
If successful, set will now be injection-safe. Otherwise, err is an error
If successful, tmpl will now be injection-safe. Otherwise, err is an error
defined in the docs for ErrorCode.
HTML templates treat data values as plain text which should be encoded so they
......@@ -172,18 +172,18 @@ This package assumes that template authors are trusted, that Execute's data
parameter is not, and seeks to preserve the properties below in the face
of untrusted data:
Structure Preservation Property
Structure Preservation Property:
"... when a template author writes an HTML tag in a safe templating language,
the browser will interpret the corresponding portion of the output as a tag
regardless of the values of untrusted data, and similarly for other structures
such as attribute boundaries and JS and CSS string boundaries."
Code Effect Property
Code Effect Property:
"... only code specified by the template author should run as a result of
injecting the template output into a page and all code specified by the
template author should run as a result of the same."
Least Surprise Property
Least Surprise Property:
"A developer (or code reviewer) familiar with HTML, CSS, and JavaScript, who
knows that contextual autoescaping happens should be able to look at a {{.}}
and correctly infer what sanitization happens."
......
......@@ -46,30 +46,30 @@ func escapeTemplates(tmpl *Template, names ...string) error {
// funcMap maps command names to functions that render their inputs safe.
var funcMap = template.FuncMap{
"exp_template_html_attrescaper": attrEscaper,
"exp_template_html_commentescaper": commentEscaper,
"exp_template_html_cssescaper": cssEscaper,
"exp_template_html_cssvaluefilter": cssValueFilter,
"exp_template_html_htmlnamefilter": htmlNameFilter,
"exp_template_html_htmlescaper": htmlEscaper,
"exp_template_html_jsregexpescaper": jsRegexpEscaper,
"exp_template_html_jsstrescaper": jsStrEscaper,
"exp_template_html_jsvalescaper": jsValEscaper,
"exp_template_html_nospaceescaper": htmlNospaceEscaper,
"exp_template_html_rcdataescaper": rcdataEscaper,
"exp_template_html_urlescaper": urlEscaper,
"exp_template_html_urlfilter": urlFilter,
"exp_template_html_urlnormalizer": urlNormalizer,
"html_template_attrescaper": attrEscaper,
"html_template_commentescaper": commentEscaper,
"html_template_cssescaper": cssEscaper,
"html_template_cssvaluefilter": cssValueFilter,
"html_template_htmlnamefilter": htmlNameFilter,
"html_template_htmlescaper": htmlEscaper,
"html_template_jsregexpescaper": jsRegexpEscaper,
"html_template_jsstrescaper": jsStrEscaper,
"html_template_jsvalescaper": jsValEscaper,
"html_template_nospaceescaper": htmlNospaceEscaper,
"html_template_rcdataescaper": rcdataEscaper,
"html_template_urlescaper": urlEscaper,
"html_template_urlfilter": urlFilter,
"html_template_urlnormalizer": urlNormalizer,
}
// equivEscapers matches contextual escapers to equivalent template builtins.
var equivEscapers = map[string]string{
"exp_template_html_attrescaper": "html",
"exp_template_html_htmlescaper": "html",
"exp_template_html_nospaceescaper": "html",
"exp_template_html_rcdataescaper": "html",
"exp_template_html_urlescaper": "urlquery",
"exp_template_html_urlnormalizer": "urlquery",
"html_template_attrescaper": "html",
"html_template_htmlescaper": "html",
"html_template_nospaceescaper": "html",
"html_template_rcdataescaper": "html",
"html_template_urlescaper": "urlquery",
"html_template_urlnormalizer": "urlquery",
}
// escaper collects type inferences about templates and changes needed to make
......@@ -147,17 +147,17 @@ func (e *escaper) escapeAction(c context, n *parse.ActionNode) context {
case stateURL, stateCSSDqStr, stateCSSSqStr, stateCSSDqURL, stateCSSSqURL, stateCSSURL:
switch c.urlPart {
case urlPartNone:
s = append(s, "exp_template_html_urlfilter")
s = append(s, "html_template_urlfilter")
fallthrough
case urlPartPreQuery:
switch c.state {
case stateCSSDqStr, stateCSSSqStr:
s = append(s, "exp_template_html_cssescaper")
s = append(s, "html_template_cssescaper")
default:
s = append(s, "exp_template_html_urlnormalizer")
s = append(s, "html_template_urlnormalizer")
}
case urlPartQueryOrFrag:
s = append(s, "exp_template_html_urlescaper")
s = append(s, "html_template_urlescaper")
case urlPartUnknown:
return context{
state: stateError,
......@@ -167,27 +167,27 @@ func (e *escaper) escapeAction(c context, n *parse.ActionNode) context {
panic(c.urlPart.String())
}
case stateJS:
s = append(s, "exp_template_html_jsvalescaper")
s = append(s, "html_template_jsvalescaper")
// A slash after a value starts a div operator.
c.jsCtx = jsCtxDivOp
case stateJSDqStr, stateJSSqStr:
s = append(s, "exp_template_html_jsstrescaper")
s = append(s, "html_template_jsstrescaper")
case stateJSRegexp:
s = append(s, "exp_template_html_jsregexpescaper")
s = append(s, "html_template_jsregexpescaper")
case stateCSS:
s = append(s, "exp_template_html_cssvaluefilter")
s = append(s, "html_template_cssvaluefilter")
case stateText:
s = append(s, "exp_template_html_htmlescaper")
s = append(s, "html_template_htmlescaper")
case stateRCDATA:
s = append(s, "exp_template_html_rcdataescaper")
s = append(s, "html_template_rcdataescaper")
case stateAttr:
// Handled below in delim check.
case stateAttrName, stateTag:
c.state = stateAttrName
s = append(s, "exp_template_html_htmlnamefilter")
s = append(s, "html_template_htmlnamefilter")
default:
if isComment(c.state) {
s = append(s, "exp_template_html_commentescaper")
s = append(s, "html_template_commentescaper")
} else {
panic("unexpected state " + c.state.String())
}
......@@ -196,9 +196,9 @@ func (e *escaper) escapeAction(c context, n *parse.ActionNode) context {
case delimNone:
// No extra-escaping needed for raw text content.
case delimSpaceOrTagEnd:
s = append(s, "exp_template_html_nospaceescaper")
s = append(s, "html_template_nospaceescaper")
default:
s = append(s, "exp_template_html_attrescaper")
s = append(s, "html_template_attrescaper")
}
e.editActionNode(n, s)
return c
......@@ -260,22 +260,22 @@ func ensurePipelineContains(p *parse.PipeNode, s []string) {
// redundantFuncs[a][b] implies that funcMap[b](funcMap[a](x)) == funcMap[a](x)
// for all x.
var redundantFuncs = map[string]map[string]bool{
"exp_template_html_commentescaper": {
"exp_template_html_attrescaper": true,
"exp_template_html_nospaceescaper": true,
"exp_template_html_htmlescaper": true,
"html_template_commentescaper": {
"html_template_attrescaper": true,
"html_template_nospaceescaper": true,
"html_template_htmlescaper": true,
},
"exp_template_html_cssescaper": {
"exp_template_html_attrescaper": true,
"html_template_cssescaper": {
"html_template_attrescaper": true,
},
"exp_template_html_jsregexpescaper": {
"exp_template_html_attrescaper": true,
"html_template_jsregexpescaper": {
"html_template_attrescaper": true,
},
"exp_template_html_jsstrescaper": {
"exp_template_html_attrescaper": true,
"html_template_jsstrescaper": {
"html_template_attrescaper": true,
},
"exp_template_html_urlescaper": {
"exp_template_html_urlnormalizer": true,
"html_template_urlescaper": {
"html_template_urlnormalizer": true,
},
}
......@@ -505,7 +505,7 @@ func (e *escaper) escapeTree(c context, name string, line int) (context, string)
dt := e.template(dname)
if dt == nil {
dt = template.New(dname)
dt.Tree = &parse.Tree{Name: dname, Root: cloneList(t.Root)}
dt.Tree = &parse.Tree{Name: dname, Root: t.Root.CopyList()}
e.derived[dname] = dt
}
t = dt
......
......@@ -50,7 +50,7 @@ func (t *Template) Execute(wr io.Writer, data interface{}) (err error) {
// ExecuteTemplate applies the template associated with t that has the given
// name to the specified data object and writes the output to wr.
func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error {
tmpl, err := t.lookupAndEscapeTemplate(wr, name)
tmpl, err := t.lookupAndEscapeTemplate(name)
if err != nil {
return err
}
......@@ -60,7 +60,7 @@ func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{})
// lookupAndEscapeTemplate guarantees that the template with the given name
// is escaped, or returns an error if it cannot be. It returns the named
// template.
func (t *Template) lookupAndEscapeTemplate(wr io.Writer, name string) (tmpl *Template, err error) {
func (t *Template) lookupAndEscapeTemplate(name string) (tmpl *Template, err error) {
t.nameSpace.mu.Lock()
defer t.nameSpace.mu.Unlock()
tmpl = t.set[name]
......@@ -106,14 +106,71 @@ func (t *Template) Parse(src string) (*Template, error) {
return t, nil
}
// AddParseTree is unimplemented.
func (t *Template) AddParseTree(name string, tree *parse.Tree) error {
return fmt.Errorf("html/template: AddParseTree unimplemented")
// AddParseTree creates a new template with the name and parse tree
// and associates it with t.
//
// It returns an error if t has already been executed.
func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) {
t.nameSpace.mu.Lock()
defer t.nameSpace.mu.Unlock()
if t.escaped {
return nil, fmt.Errorf("html/template: cannot AddParseTree to %q after it has executed", t.Name())
}
text, err := t.text.AddParseTree(name, tree)
if err != nil {
return nil, err
}
ret := &Template{
false,
text,
t.nameSpace,
}
t.set[name] = ret
return ret, nil
}
// Clone is unimplemented.
func (t *Template) Clone(name string) error {
return fmt.Errorf("html/template: Clone unimplemented")
// Clone returns a duplicate of the template, including all associated
// templates. The actual representation is not copied, but the name space of
// associated templates is, so further calls to Parse in the copy will add
// templates to the copy but not to the original. Clone can be used to prepare
// common templates and use them with variant definitions for other templates
// by adding the variants after the clone is made.
//
// It returns an error if t has already been executed.
func (t *Template) Clone() (*Template, error) {
t.nameSpace.mu.Lock()
defer t.nameSpace.mu.Unlock()
if t.escaped {
return nil, fmt.Errorf("html/template: cannot Clone %q after it has executed", t.Name())
}
textClone, err := t.text.Clone()
if err != nil {
return nil, err
}
ret := &Template{
false,
textClone,
&nameSpace{
set: make(map[string]*Template),
},
}
for _, x := range textClone.Templates() {
name := x.Name()
src := t.set[name]
if src == nil || src.escaped {
return nil, fmt.Errorf("html/template: cannot Clone %q after it has executed", t.Name())
}
x.Tree = &parse.Tree{
Name: x.Tree.Name,
Root: x.Tree.Root.CopyList(),
}
ret.set[name] = &Template{
false,
x,
ret.nameSpace,
}
}
return ret, nil
}
// New allocates a new HTML template with the given name.
......
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This example demonstrates decoding a JPEG image and examining its pixels.
package image_test
import (
"fmt"
"image"
"log"
"os"
// Package image/jpeg is not used explicitly in the code below,
// but is imported for its initialization side-effect, which allows
// image.Decode to understand JPEG formatted images. Uncomment these
// two lines to also understand GIF and PNG images:
// _ "image/gif"
// _ "image/png"
_ "image/jpeg"
)
func Example() {
// Open the file.
file, err := os.Open("testdata/video-001.jpeg")
if err != nil {
log.Fatal(err)
}
defer file.Close()
// Decode the image.
m, _, err := image.Decode(file)
if err != nil {
log.Fatal(err)
}
bounds := m.Bounds()
// Calculate a 16-bin histogram for m's red, green, blue and alpha components.
//
// An image's bounds do not necessarily start at (0, 0), so the two loops start
// at bounds.Min.Y and bounds.Min.X. Looping over Y first and X second is more
// likely to result in better memory access patterns than X first and Y second.
var histogram [16][4]int
for y := bounds.Min.Y; y < bounds.Max.Y; y++ {
for x := bounds.Min.X; x < bounds.Max.X; x++ {
r, g, b, a := m.At(x, y).RGBA()
// A color's RGBA method returns values in the range [0, 65535].
// Shifting by 12 reduces this to the range [0, 15].
histogram[r>>12][0]++
histogram[g>>12][1]++
histogram[b>>12][2]++
histogram[a>>12][3]++
}
}
// Print the results.
fmt.Printf("%-14s %6s %6s %6s %6s\n", "bin", "red", "green", "blue", "alpha")
for i, x := range histogram {
fmt.Printf("0x%04x-0x%04x: %6d %6d %6d %6d\n", i<<12, (i+1)<<12-1, x[0], x[1], x[2], x[3])
}
// Output:
// bin red green blue alpha
// 0x0000-0x0fff: 471 819 7596 0
// 0x1000-0x1fff: 576 2892 726 0
// 0x2000-0x2fff: 1038 2330 943 0
// 0x3000-0x3fff: 883 2321 1014 0
// 0x4000-0x4fff: 501 1295 525 0
// 0x5000-0x5fff: 302 962 242 0
// 0x6000-0x6fff: 219 358 150 0
// 0x7000-0x7fff: 352 281 192 0
// 0x8000-0x8fff: 3688 216 246 0
// 0x9000-0x9fff: 2277 237 283 0
// 0xa000-0xafff: 971 254 357 0
// 0xb000-0xbfff: 317 306 429 0
// 0xc000-0xcfff: 203 402 401 0
// 0xd000-0xdfff: 256 394 241 0
// 0xe000-0xefff: 378 343 173 0
// 0xf000-0xffff: 3018 2040 1932 15450
}
......@@ -50,6 +50,9 @@ func TestYCbCr(t *testing.T) {
testYCbCr(t, r, subsampleRatio, delta)
}
}
if testing.Short() {
break
}
}
}
......
......@@ -49,7 +49,7 @@ func TempFile(dir, prefix string) (f *os.File, err error) {
for i := 0; i < 10000; i++ {
name := filepath.Join(dir, prefix+nextSuffix())
f, err = os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
if pe, ok := err.(*os.PathError); ok && pe.Err == os.EEXIST {
if os.IsExist(err) {
if nconflict++; nconflict > 10 {
rand = reseed()
}
......@@ -76,7 +76,7 @@ func TempDir(dir, prefix string) (name string, err error) {
for i := 0; i < 10000; i++ {
try := filepath.Join(dir, prefix+nextSuffix())
err = os.Mkdir(try, 0700)
if pe, ok := err.(*os.PathError); ok && pe.Err == os.EEXIST {
if os.IsExist(err) {
if nconflict++; nconflict > 10 {
rand = reseed()
}
......
......@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !windows,!plan9
// Package syslog provides a simple interface to the system log service. It
// can send messages to the syslog daemon using UNIX domain sockets, UDP, or
// TCP connections.
......
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !windows,!plan9
package syslog
import (
......
......@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !windows,!plan9
package syslog
import (
......
......@@ -512,6 +512,9 @@ func TestStringPowers(t *testing.T) {
t.Errorf("failed at %d ** %d in base %d: %s != %s", b, p, b, xs, xs2)
}
}
if b >= 3 && testing.Short() {
break
}
}
}
......
......@@ -49,9 +49,10 @@ func (r *Rand) Int() int {
}
// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n).
// It panics if n <= 0.
func (r *Rand) Int63n(n int64) int64 {
if n <= 0 {
return 0
panic("invalid argument to Int63n")
}
max := int64((1 << 63) - 1 - (1<<63)%uint64(n))
v := r.Int63()
......@@ -62,9 +63,10 @@ func (r *Rand) Int63n(n int64) int64 {
}
// Int31n returns, as an int32, a non-negative pseudo-random number in [0,n).
// It panics if n <= 0.
func (r *Rand) Int31n(n int32) int32 {
if n <= 0 {
return 0
panic("invalid argument to Int31n")
}
max := int32((1 << 31) - 1 - (1<<31)%uint32(n))
v := r.Int31()
......@@ -75,7 +77,11 @@ func (r *Rand) Int31n(n int32) int32 {
}
// Intn returns, as an int, a non-negative pseudo-random number in [0,n).
// It panics if n <= 0.
func (r *Rand) Intn(n int) int {
if n <= 0 {
panic("invalid argument to Intn")
}
if n <= 1<<31-1 {
return int(r.Int31n(int32(n)))
}
......@@ -125,12 +131,15 @@ func Int31() int32 { return globalRand.Int31() }
func Int() int { return globalRand.Int() }
// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n).
// It panics if n <= 0.
func Int63n(n int64) int64 { return globalRand.Int63n(n) }
// Int31n returns, as an int32, a non-negative pseudo-random number in [0,n).
// It panics if n <= 0.
func Int31n(n int32) int32 { return globalRand.Int31n(n) }
// Intn returns, as an int, a non-negative pseudo-random number in [0,n).
// It panics if n <= 0.
func Intn(n int) int { return globalRand.Intn(n) }
// Float64 returns, as a float64, a pseudo-random number in [0.0,1.0).
......
......@@ -141,6 +141,9 @@ func TestNonStandardNormalValues(t *testing.T) {
for m := 0.5; m < mmax; m *= 2 {
for _, seed := range testSeeds {
testNormalDistribution(t, numTestSamples, m, sd, seed)
if testing.Short() {
break
}
}
}
}
......@@ -191,6 +194,9 @@ func TestNonStandardExponentialValues(t *testing.T) {
for rate := 0.05; rate < 10; rate *= 2 {
for _, seed := range testSeeds {
testExponentialDistribution(t, numTestSamples, rate, seed)
if testing.Short() {
break
}
}
}
}
......
......@@ -14,7 +14,7 @@ import (
)
// If an IPv6 tunnel is running, we can try dialing a real IPv6 address.
var ipv6 = flag.Bool("ipv6", false, "assume ipv6 tunnel is present")
var testIPv6 = flag.Bool("ipv6", false, "assume ipv6 tunnel is present")
// fd is already connected to the destination, port 80.
// Run an HTTP request to fetch the appropriate page.
......@@ -130,7 +130,7 @@ func TestDialGoogleIPv6(t *testing.T) {
return
}
// Only run tcp6 if the kernel will take it.
if !*ipv6 || !supportsIPv6 {
if !*testIPv6 || !supportsIPv6 {
return
}
......
......@@ -252,7 +252,9 @@ func (s *pollServer) Run() {
} else {
netfd := s.LookupFD(fd, mode)
if netfd == nil {
print("pollServer: unexpected wakeup for fd=", fd, " mode=", string(mode), "\n")
// This can happen because the WaitFD runs without
// holding s's lock, so there might be a pending wakeup
// for an fd that has been evicted. No harm done.
continue
}
s.WakeFD(netfd, mode, nil)
......@@ -506,7 +508,7 @@ func (fd *netFD) Write(p []byte) (int, error) {
}
defer fd.decref()
if fd.sysfile == nil {
return 0, os.EINVAL
return 0, syscall.EINVAL
}
var err error
......
......@@ -335,7 +335,7 @@ func (fd *netFD) Close() error {
func (fd *netFD) shutdown(how int) error {
if fd == nil || fd.sysfd == syscall.InvalidHandle {
return os.EINVAL
return syscall.EINVAL
}
err := syscall.Shutdown(fd.sysfd, how)
if err != nil {
......@@ -369,7 +369,7 @@ func (o *readOp) Name() string {
func (fd *netFD) Read(buf []byte) (int, error) {
if fd == nil {
return 0, os.EINVAL
return 0, syscall.EINVAL
}
fd.rio.Lock()
defer fd.rio.Unlock()
......@@ -378,7 +378,7 @@ func (fd *netFD) Read(buf []byte) (int, error) {
}
defer fd.decref()
if fd.sysfd == syscall.InvalidHandle {
return 0, os.EINVAL
return 0, syscall.EINVAL
}
var o readOp
o.Init(fd, buf, 'r')
......@@ -408,7 +408,7 @@ func (o *readFromOp) Name() string {
func (fd *netFD) ReadFrom(buf []byte) (n int, sa syscall.Sockaddr, err error) {
if fd == nil {
return 0, nil, os.EINVAL
return 0, nil, syscall.EINVAL
}
if len(buf) == 0 {
return 0, nil, nil
......@@ -447,7 +447,7 @@ func (o *writeOp) Name() string {
func (fd *netFD) Write(buf []byte) (int, error) {
if fd == nil {
return 0, os.EINVAL
return 0, syscall.EINVAL
}
fd.wio.Lock()
defer fd.wio.Unlock()
......@@ -478,7 +478,7 @@ func (o *writeToOp) Name() string {
func (fd *netFD) WriteTo(buf []byte, sa syscall.Sockaddr) (int, error) {
if fd == nil {
return 0, os.EINVAL
return 0, syscall.EINVAL
}
if len(buf) == 0 {
return 0, nil
......@@ -490,7 +490,7 @@ func (fd *netFD) WriteTo(buf []byte, sa syscall.Sockaddr) (int, error) {
}
defer fd.decref()
if fd.sysfd == syscall.InvalidHandle {
return 0, os.EINVAL
return 0, syscall.EINVAL
}
var o writeToOp
o.Init(fd, buf, 'w')
......@@ -578,10 +578,12 @@ func (fd *netFD) dup() (*os.File, error) {
return nil, os.NewSyscallError("dup", syscall.EWINDOWS)
}
var errNoSupport = errors.New("address family not supported")
func (fd *netFD) ReadMsg(p []byte, oob []byte) (n, oobn, flags int, sa syscall.Sockaddr, err error) {
return 0, 0, 0, nil, os.EAFNOSUPPORT
return 0, 0, 0, nil, errNoSupport
}
func (fd *netFD) WriteMsg(p []byte, oob []byte, sa syscall.Sockaddr) (n int, oobn int, err error) {
return 0, 0, os.EAFNOSUPPORT
return 0, 0, errNoSupport
}
......@@ -28,7 +28,7 @@ func newFileFD(f *os.File) (*netFD, error) {
switch sa.(type) {
default:
closesocket(fd)
return nil, os.EINVAL
return nil, syscall.EINVAL
case *syscall.SockaddrInet4:
family = syscall.AF_INET
if proto == syscall.SOCK_DGRAM {
......@@ -84,7 +84,7 @@ func FileConn(f *os.File) (c Conn, err error) {
return newIPConn(fd), nil
}
fd.Close()
return nil, os.EINVAL
return nil, syscall.EINVAL
}
// FileListener returns a copy of the network listener corresponding
......@@ -103,7 +103,7 @@ func FileListener(f *os.File) (l Listener, err error) {
return &UnixListener{fd, laddr.Name}, nil
}
fd.Close()
return nil, os.EINVAL
return nil, syscall.EINVAL
}
// FilePacketConn returns a copy of the packet network connection
......@@ -122,5 +122,5 @@ func FilePacketConn(f *os.File) (c PacketConn, err error) {
return newUnixConn(fd), nil
}
fd.Close()
return nil, os.EINVAL
return nil, syscall.EINVAL
}
......@@ -6,6 +6,7 @@ package net
import (
"os"
"syscall"
)
// FileConn returns a copy of the network connection corresponding to
......@@ -13,7 +14,7 @@ import (
// finished. Closing c does not affect f, and closing f does not
// affect c.
func FileConn(f *os.File) (c Conn, err error) {
return nil, os.EPLAN9
return nil, syscall.EPLAN9
}
// FileListener returns a copy of the network listener corresponding
......@@ -21,7 +22,7 @@ func FileConn(f *os.File) (c Conn, err error) {
// when finished. Closing c does not affect l, and closing l does not
// affect c.
func FileListener(f *os.File) (l Listener, err error) {
return nil, os.EPLAN9
return nil, syscall.EPLAN9
}
// FilePacketConn returns a copy of the packet network connection
......@@ -29,5 +30,5 @@ func FileListener(f *os.File) (l Listener, err error) {
// responsibility to close f when finished. Closing c does not affect
// f, and closing f does not affect c.
func FilePacketConn(f *os.File) (c PacketConn, err error) {
return nil, os.EPLAN9
return nil, syscall.EPLAN9
}
......@@ -34,7 +34,7 @@ var hosttests = []hostTest{
func TestLookupStaticHost(t *testing.T) {
p := hostsPath
hostsPath = "hosts_testdata"
hostsPath = "testdata/hosts"
for i := 0; i < len(hosttests); i++ {
tt := hosttests[i]
ips := lookupStaticHost(tt.host)
......
......@@ -128,6 +128,34 @@ var readSetCookiesTests = []struct {
Raw: "NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=/; domain=.google.ch; HttpOnly",
}},
},
{
Header{"Set-Cookie": {".ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=/; HttpOnly"}},
[]*Cookie{{
Name: ".ASPXAUTH",
Value: "7E3AA",
Path: "/",
Expires: time.Date(2012, 3, 7, 14, 25, 6, 0, time.UTC),
RawExpires: "Wed, 07-Mar-2012 14:25:06 GMT",
HttpOnly: true,
Raw: ".ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=/; HttpOnly",
}},
},
{
Header{"Set-Cookie": {"ASP.NET_SessionId=foo; path=/; HttpOnly"}},
[]*Cookie{{
Name: "ASP.NET_SessionId",
Value: "foo",
Path: "/",
HttpOnly: true,
Raw: "ASP.NET_SessionId=foo; path=/; HttpOnly",
}},
},
// TODO(bradfitz): users have reported seeing this in the
// wild, but do browsers handle it? RFC 6265 just says "don't
// do that" (section 3) and then never mentions header folding
// again.
// Header{"Set-Cookie": {"ASP.NET_SessionId=foo; path=/; HttpOnly, .ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=/; HttpOnly"}},
}
func toJSON(v interface{}) string {
......
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http_test
import (
"fmt"
"io/ioutil"
"log"
"net/http"
)
func ExampleHijacker() {
http.HandleFunc("/hijack", func(w http.ResponseWriter, r *http.Request) {
hj, ok := w.(http.Hijacker)
if !ok {
http.Error(w, "webserver doesn't support hijacking", http.StatusInternalServerError)
return
}
conn, bufrw, err := hj.Hijack()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// Don't forget to close the connection:
defer conn.Close()
bufrw.WriteString("Now we're speaking raw TCP. Say hi: ")
bufrw.Flush()
s, err := bufrw.ReadString('\n')
if err != nil {
log.Printf("error reading string: %v", err)
return
}
fmt.Fprintf(bufrw, "You said: %q\nBye.\n", s)
bufrw.Flush()
})
}
func ExampleGet() {
res, err := http.Get("http://www.google.com/robots.txt")
if err != nil {
log.Fatal(err)
}
robots, err := ioutil.ReadAll(res.Body)
if err != nil {
log.Fatal(err)
}
res.Body.Close()
fmt.Printf("%s", robots)
}
......@@ -6,6 +6,7 @@ package http_test
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
......@@ -131,7 +132,7 @@ func TestFileServerCleans(t *testing.T) {
ch := make(chan string, 1)
fs := FileServer(&testFileSystem{func(name string) (File, error) {
ch <- name
return nil, os.ENOENT
return nil, errors.New("file does not exist")
}})
tests := []struct {
reqPath, openArg string
......@@ -398,11 +399,15 @@ func TestLinuxSendfile(t *testing.T) {
return
}
_, err = Get(fmt.Sprintf("http://%s/", ln.Addr()))
res, err := Get(fmt.Sprintf("http://%s/", ln.Addr()))
if err != nil {
t.Errorf("http client error: %v", err)
return
t.Fatalf("http client error: %v", err)
}
_, err = io.Copy(ioutil.Discard, res.Body)
if err != nil {
t.Fatalf("client body read error: %v", err)
}
res.Body.Close()
// Force child to exit cleanly.
Get(fmt.Sprintf("http://%s/quit", ln.Addr()))
......
......@@ -13,12 +13,12 @@ import (
"net"
"net/http"
"net/textproto"
"os"
"sync"
)
var (
ErrPersistEOF = &http.ProtocolError{ErrorString: "persistent connection closed"}
ErrClosed = &http.ProtocolError{ErrorString: "connection closed by user"}
ErrPipeline = &http.ProtocolError{ErrorString: "pipeline error"}
)
......@@ -191,7 +191,7 @@ func (sc *ServerConn) Write(req *http.Request, resp *http.Response) error {
}
if sc.c == nil { // connection closed by user in the meantime
defer sc.lk.Unlock()
return os.EBADF
return ErrClosed
}
c := sc.c
if sc.nread <= sc.nwritten {
......
......@@ -22,9 +22,9 @@
//
// go tool pprof http://localhost:6060/debug/pprof/profile
//
// Or to look at the thread creation profile:
// Or to view all available profiles:
//
// go tool pprof http://localhost:6060/debug/pprof/thread
// go tool pprof http://localhost:6060/debug/pprof/
//
// For a study of the facility in action, visit
//
......@@ -36,7 +36,9 @@ import (
"bufio"
"bytes"
"fmt"
"html/template"
"io"
"log"
"net/http"
"os"
"runtime"
......@@ -47,11 +49,10 @@ import (
)
func init() {
http.Handle("/debug/pprof/", http.HandlerFunc(Index))
http.Handle("/debug/pprof/cmdline", http.HandlerFunc(Cmdline))
http.Handle("/debug/pprof/profile", http.HandlerFunc(Profile))
http.Handle("/debug/pprof/heap", http.HandlerFunc(Heap))
http.Handle("/debug/pprof/symbol", http.HandlerFunc(Symbol))
http.Handle("/debug/pprof/thread", http.HandlerFunc(Thread))
}
// Cmdline responds with the running program's
......@@ -62,20 +63,6 @@ func Cmdline(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, strings.Join(os.Args, "\x00"))
}
// Heap responds with the pprof-formatted heap profile.
// The package initialization registers it as /debug/pprof/heap.
func Heap(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
pprof.WriteHeapProfile(w)
}
// Thread responds with the pprof-formatted thread creation profile.
// The package initialization registers it as /debug/pprof/thread.
func Thread(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
pprof.WriteThreadProfile(w)
}
// Profile responds with the pprof-formatted cpu profile.
// The package initialization registers it as /debug/pprof/profile.
func Profile(w http.ResponseWriter, r *http.Request) {
......@@ -147,3 +134,61 @@ func Symbol(w http.ResponseWriter, r *http.Request) {
w.Write(buf.Bytes())
}
// Handler returns an HTTP handler that serves the named profile.
func Handler(name string) http.Handler {
return handler(name)
}
type handler string
func (name handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
debug, _ := strconv.Atoi(r.FormValue("debug"))
p := pprof.Lookup(string(name))
if p == nil {
w.WriteHeader(404)
fmt.Fprintf(w, "Unknown profile: %s\n", name)
return
}
p.WriteTo(w, debug)
return
}
// Index responds with the pprof-formatted profile named by the request.
// For example, "/debug/pprof/heap" serves the "heap" profile.
// Index responds to a request for "/debug/pprof/" with an HTML page
// listing the available profiles.
func Index(w http.ResponseWriter, r *http.Request) {
if strings.HasPrefix(r.URL.Path, "/debug/pprof/") {
name := r.URL.Path[len("/debug/pprof/"):]
if name != "" {
handler(name).ServeHTTP(w, r)
return
}
}
profiles := pprof.Profiles()
if err := indexTmpl.Execute(w, profiles); err != nil {
log.Print(err)
}
}
var indexTmpl = template.Must(template.New("index").Parse(`<html>
<head>
<title>/debug/pprof/</title>
</head>
/debug/pprof/<br>
<br>
<body>
profiles:<br>
<table>
{{range .}}
<tr><td align=right>{{.Count}}<td><a href="/debug/pprof/{{.Name}}?debug=1">{{.Name}}</a>
{{end}}
</table>
<br>
<a href="/debug/pprof/goroutine?debug=2">full goroutine stack dump</a><br>
</body>
</html>
`))
......@@ -186,7 +186,7 @@ func (r *Request) Cookies() []*Cookie {
return readCookies(r.Header, "")
}
var ErrNoCookie = errors.New("http: named cookied not present")
var ErrNoCookie = errors.New("http: named cookie not present")
// Cookie returns the named cookie provided in the request or
// ErrNoCookie if not found.
......@@ -486,7 +486,7 @@ func ReadRequest(b *bufio.Reader) (req *Request, err error) {
rawurl = "http://" + rawurl
}
if req.URL, err = url.ParseRequest(rawurl); err != nil {
if req.URL, err = url.ParseRequestURI(rawurl); err != nil {
return nil, err
}
......
......@@ -245,8 +245,7 @@ func TestServerTimeouts(t *testing.T) {
fmt.Fprintf(res, "req=%d", reqNum)
})
const second = 1000000000 /* nanos */
server := &Server{Handler: handler, ReadTimeout: 0.25 * second, WriteTimeout: 0.25 * second}
server := &Server{Handler: handler, ReadTimeout: 250 * time.Millisecond, WriteTimeout: 250 * time.Millisecond}
go server.Serve(l)
url := fmt.Sprintf("http://%s/", addr)
......@@ -277,7 +276,7 @@ func TestServerTimeouts(t *testing.T) {
if n != 0 || err != io.EOF {
t.Errorf("Read = %v, %v, wanted %v, %v", n, err, 0, io.EOF)
}
if latency < 200*time.Millisecond /* fudge from 0.25 above */ {
if latency < 200*time.Millisecond /* fudge from 250 ms above */ {
t.Errorf("got EOF after %s, want >= %s", latency, 200*time.Millisecond)
}
......
......@@ -12,7 +12,6 @@ package http
import (
"bufio"
"bytes"
"crypto/rand"
"crypto/tls"
"errors"
"fmt"
......@@ -985,6 +984,7 @@ type Server struct {
ReadTimeout time.Duration // maximum duration before timing out read of the request
WriteTimeout time.Duration // maximum duration before timing out write of the response
MaxHeaderBytes int // maximum size of request headers, DefaultMaxHeaderBytes if 0
TLSConfig *tls.Config // optional TLS config, used by ListenAndServeTLS
}
// ListenAndServe listens on the TCP network address srv.Addr and then
......@@ -1121,9 +1121,12 @@ func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error {
if addr == "" {
addr = ":https"
}
config := &tls.Config{
Rand: rand.Reader,
NextProtos: []string{"http/1.1"},
config := &tls.Config{}
if srv.TLSConfig != nil {
*config = *srv.TLSConfig
}
if config.NextProtos == nil {
config.NextProtos = []string{"http/1.1"}
}
var err error
......
......@@ -648,7 +648,7 @@ func TestTransportPersistConnLeak(t *testing.T) {
tr := &Transport{}
c := &Client{Transport: tr}
n0 := runtime.Goroutines()
n0 := runtime.NumGoroutine()
const numReq = 25
didReqCh := make(chan bool)
......@@ -669,7 +669,7 @@ func TestTransportPersistConnLeak(t *testing.T) {
<-gotReqCh
}
nhigh := runtime.Goroutines()
nhigh := runtime.NumGoroutine()
// Tell all handlers to unblock and reply.
for i := 0; i < numReq; i++ {
......@@ -685,7 +685,7 @@ func TestTransportPersistConnLeak(t *testing.T) {
time.Sleep(100 * time.Millisecond)
runtime.GC()
runtime.GC() // even more.
nfinal := runtime.Goroutines()
nfinal := runtime.NumGoroutine()
growth := nfinal - n0
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment