Commit 9af4cb95 by Ian Lance Taylor

libgo: Update to weekly.2012-01-27.

From-SVN: r183810
parent 6b6cd722
9f2be4fbbf69 1107a7d3cb07
The first line of this file holds the Mercurial revision number of the The first line of this file holds the Mercurial revision number of the
last merge done from the master library sources. last merge done from the master library sources.
...@@ -18,7 +18,7 @@ import ( ...@@ -18,7 +18,7 @@ import (
) )
var ( var (
HeaderError = errors.New("invalid tar header") ErrHeader = errors.New("invalid tar header")
) )
// A Reader provides sequential access to the contents of a tar archive. // A Reader provides sequential access to the contents of a tar archive.
...@@ -123,13 +123,13 @@ func (tr *Reader) readHeader() *Header { ...@@ -123,13 +123,13 @@ func (tr *Reader) readHeader() *Header {
if bytes.Equal(header, zeroBlock[0:blockSize]) { if bytes.Equal(header, zeroBlock[0:blockSize]) {
tr.err = io.EOF tr.err = io.EOF
} else { } else {
tr.err = HeaderError // zero block and then non-zero block tr.err = ErrHeader // zero block and then non-zero block
} }
return nil return nil
} }
if !tr.verifyChecksum(header) { if !tr.verifyChecksum(header) {
tr.err = HeaderError tr.err = ErrHeader
return nil return nil
} }
...@@ -188,7 +188,7 @@ func (tr *Reader) readHeader() *Header { ...@@ -188,7 +188,7 @@ func (tr *Reader) readHeader() *Header {
} }
if tr.err != nil { if tr.err != nil {
tr.err = HeaderError tr.err = ErrHeader
return nil return nil
} }
......
...@@ -240,31 +240,20 @@ func TestNonSeekable(t *testing.T) { ...@@ -240,31 +240,20 @@ func TestNonSeekable(t *testing.T) {
} }
defer f.Close() defer f.Close()
// pipe the data in type readerOnly struct {
r, w, err := os.Pipe() io.Reader
if err != nil {
t.Fatalf("Unexpected error %s", err)
} }
go func() { tr := NewReader(readerOnly{f})
rdbuf := make([]uint8, 1<<16)
for {
nr, err := f.Read(rdbuf)
w.Write(rdbuf[0:nr])
if err == io.EOF {
break
}
}
w.Close()
}()
tr := NewReader(r)
nread := 0 nread := 0
for ; ; nread++ { for ; ; nread++ {
hdr, err := tr.Next() _, err := tr.Next()
if hdr == nil || err == io.EOF { if err == io.EOF {
break break
} }
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
} }
if nread != len(test.headers) { if nread != len(test.headers) {
......
...@@ -17,9 +17,9 @@ import ( ...@@ -17,9 +17,9 @@ import (
) )
var ( var (
FormatError = errors.New("zip: not a valid zip file") ErrFormat = errors.New("zip: not a valid zip file")
UnsupportedMethod = errors.New("zip: unsupported compression algorithm") ErrAlgorithm = errors.New("zip: unsupported compression algorithm")
ChecksumError = errors.New("zip: checksum error") ErrChecksum = errors.New("zip: checksum error")
) )
type Reader struct { type Reader struct {
...@@ -90,12 +90,12 @@ func (z *Reader) init(r io.ReaderAt, size int64) error { ...@@ -90,12 +90,12 @@ func (z *Reader) init(r io.ReaderAt, size int64) error {
// The count of files inside a zip is truncated to fit in a uint16. // The count of files inside a zip is truncated to fit in a uint16.
// Gloss over this by reading headers until we encounter // Gloss over this by reading headers until we encounter
// a bad one, and then only report a FormatError or UnexpectedEOF if // a bad one, and then only report a ErrFormat or UnexpectedEOF if
// the file count modulo 65536 is incorrect. // the file count modulo 65536 is incorrect.
for { for {
f := &File{zipr: r, zipsize: size} f := &File{zipr: r, zipsize: size}
err = readDirectoryHeader(f, buf) err = readDirectoryHeader(f, buf)
if err == FormatError || err == io.ErrUnexpectedEOF { if err == ErrFormat || err == io.ErrUnexpectedEOF {
break break
} }
if err != nil { if err != nil {
...@@ -135,7 +135,7 @@ func (f *File) Open() (rc io.ReadCloser, err error) { ...@@ -135,7 +135,7 @@ func (f *File) Open() (rc io.ReadCloser, err error) {
case Deflate: case Deflate:
rc = flate.NewReader(r) rc = flate.NewReader(r)
default: default:
err = UnsupportedMethod err = ErrAlgorithm
} }
if rc != nil { if rc != nil {
rc = &checksumReader{rc, crc32.NewIEEE(), f, r} rc = &checksumReader{rc, crc32.NewIEEE(), f, r}
...@@ -162,7 +162,7 @@ func (r *checksumReader) Read(b []byte) (n int, err error) { ...@@ -162,7 +162,7 @@ func (r *checksumReader) Read(b []byte) (n int, err error) {
} }
} }
if r.hash.Sum32() != r.f.CRC32 { if r.hash.Sum32() != r.f.CRC32 {
err = ChecksumError err = ErrChecksum
} }
return return
} }
...@@ -176,7 +176,7 @@ func readFileHeader(f *File, r io.Reader) error { ...@@ -176,7 +176,7 @@ func readFileHeader(f *File, r io.Reader) error {
} }
c := binary.LittleEndian c := binary.LittleEndian
if sig := c.Uint32(b[:4]); sig != fileHeaderSignature { if sig := c.Uint32(b[:4]); sig != fileHeaderSignature {
return FormatError return ErrFormat
} }
f.ReaderVersion = c.Uint16(b[4:6]) f.ReaderVersion = c.Uint16(b[4:6])
f.Flags = c.Uint16(b[6:8]) f.Flags = c.Uint16(b[6:8])
...@@ -207,7 +207,7 @@ func (f *File) findBodyOffset() (int64, error) { ...@@ -207,7 +207,7 @@ func (f *File) findBodyOffset() (int64, error) {
} }
c := binary.LittleEndian c := binary.LittleEndian
if sig := c.Uint32(b[:4]); sig != fileHeaderSignature { if sig := c.Uint32(b[:4]); sig != fileHeaderSignature {
return 0, FormatError return 0, ErrFormat
} }
filenameLen := int(c.Uint16(b[26:28])) filenameLen := int(c.Uint16(b[26:28]))
extraLen := int(c.Uint16(b[28:30])) extraLen := int(c.Uint16(b[28:30]))
...@@ -216,7 +216,7 @@ func (f *File) findBodyOffset() (int64, error) { ...@@ -216,7 +216,7 @@ func (f *File) findBodyOffset() (int64, error) {
// readDirectoryHeader attempts to read a directory header from r. // readDirectoryHeader attempts to read a directory header from r.
// It returns io.ErrUnexpectedEOF if it cannot read a complete header, // It returns io.ErrUnexpectedEOF if it cannot read a complete header,
// and FormatError if it doesn't find a valid header signature. // and ErrFormat if it doesn't find a valid header signature.
func readDirectoryHeader(f *File, r io.Reader) error { func readDirectoryHeader(f *File, r io.Reader) error {
var b [directoryHeaderLen]byte var b [directoryHeaderLen]byte
if _, err := io.ReadFull(r, b[:]); err != nil { if _, err := io.ReadFull(r, b[:]); err != nil {
...@@ -224,7 +224,7 @@ func readDirectoryHeader(f *File, r io.Reader) error { ...@@ -224,7 +224,7 @@ func readDirectoryHeader(f *File, r io.Reader) error {
} }
c := binary.LittleEndian c := binary.LittleEndian
if sig := c.Uint32(b[:4]); sig != directoryHeaderSignature { if sig := c.Uint32(b[:4]); sig != directoryHeaderSignature {
return FormatError return ErrFormat
} }
f.CreatorVersion = c.Uint16(b[4:6]) f.CreatorVersion = c.Uint16(b[4:6])
f.ReaderVersion = c.Uint16(b[6:8]) f.ReaderVersion = c.Uint16(b[6:8])
...@@ -280,7 +280,7 @@ func readDirectoryEnd(r io.ReaderAt, size int64) (dir *directoryEnd, err error) ...@@ -280,7 +280,7 @@ func readDirectoryEnd(r io.ReaderAt, size int64) (dir *directoryEnd, err error)
break break
} }
if i == 1 || bLen == size { if i == 1 || bLen == size {
return nil, FormatError return nil, ErrFormat
} }
} }
......
...@@ -70,7 +70,7 @@ var tests = []ZipTest{ ...@@ -70,7 +70,7 @@ var tests = []ZipTest{
}, },
}, },
{Name: "readme.zip"}, {Name: "readme.zip"},
{Name: "readme.notzip", Error: FormatError}, {Name: "readme.notzip", Error: ErrFormat},
{ {
Name: "dd.zip", Name: "dd.zip",
File: []ZipTestFile{ File: []ZipTestFile{
...@@ -131,7 +131,7 @@ func readTestZip(t *testing.T, zt ZipTest) { ...@@ -131,7 +131,7 @@ func readTestZip(t *testing.T, zt ZipTest) {
} }
// bail if file is not zip // bail if file is not zip
if err == FormatError { if err == ErrFormat {
return return
} }
defer func() { defer func() {
...@@ -184,8 +184,8 @@ func readTestZip(t *testing.T, zt ZipTest) { ...@@ -184,8 +184,8 @@ func readTestZip(t *testing.T, zt ZipTest) {
} }
var b bytes.Buffer var b bytes.Buffer
_, err = io.Copy(&b, r) _, err = io.Copy(&b, r)
if err != ChecksumError { if err != ErrChecksum {
t.Errorf("%s: copy error=%v, want %v", z.File[0].Name, err, ChecksumError) t.Errorf("%s: copy error=%v, want %v", z.File[0].Name, err, ErrChecksum)
} }
} }
} }
...@@ -250,13 +250,9 @@ func readTestFile(t *testing.T, ft ZipTestFile, f *File) { ...@@ -250,13 +250,9 @@ func readTestFile(t *testing.T, ft ZipTestFile, f *File) {
} }
func testFileMode(t *testing.T, f *File, want os.FileMode) { func testFileMode(t *testing.T, f *File, want os.FileMode) {
mode, err := f.Mode() mode := f.Mode()
if want == 0 { if want == 0 {
if err == nil { t.Errorf("%s mode: got %v, want none", f.Name, mode)
t.Errorf("%s mode: got %v, want none", f.Name, mode)
}
} else if err != nil {
t.Errorf("%s mode: %s", f.Name, err)
} else if mode != want { } else if mode != want {
t.Errorf("%s mode: want %v, got %v", f.Name, want, mode) t.Errorf("%s mode: want %v, got %v", f.Name, want, mode)
} }
...@@ -268,8 +264,8 @@ func TestInvalidFiles(t *testing.T) { ...@@ -268,8 +264,8 @@ func TestInvalidFiles(t *testing.T) {
// zeroes // zeroes
_, err := NewReader(sliceReaderAt(b), size) _, err := NewReader(sliceReaderAt(b), size)
if err != FormatError { if err != ErrFormat {
t.Errorf("zeroes: error=%v, want %v", err, FormatError) t.Errorf("zeroes: error=%v, want %v", err, ErrFormat)
} }
// repeated directoryEndSignatures // repeated directoryEndSignatures
...@@ -279,8 +275,8 @@ func TestInvalidFiles(t *testing.T) { ...@@ -279,8 +275,8 @@ func TestInvalidFiles(t *testing.T) {
copy(b[i:i+4], sig) copy(b[i:i+4], sig)
} }
_, err = NewReader(sliceReaderAt(b), size) _, err = NewReader(sliceReaderAt(b), size)
if err != FormatError { if err != ErrFormat {
t.Errorf("sigs: error=%v, want %v", err, FormatError) t.Errorf("sigs: error=%v, want %v", err, ErrFormat)
} }
} }
......
...@@ -12,6 +12,7 @@ This package does not support ZIP64 or disk spanning. ...@@ -12,6 +12,7 @@ This package does not support ZIP64 or disk spanning.
package zip package zip
import ( import (
"errors"
"os" "os"
"time" "time"
) )
...@@ -55,6 +56,38 @@ type FileHeader struct { ...@@ -55,6 +56,38 @@ type FileHeader struct {
Comment string Comment string
} }
// FileInfo returns an os.FileInfo for the FileHeader.
func (fh *FileHeader) FileInfo() os.FileInfo {
return headerFileInfo{fh}
}
// headerFileInfo implements os.FileInfo.
type headerFileInfo struct {
fh *FileHeader
}
func (fi headerFileInfo) Name() string { return fi.fh.Name }
func (fi headerFileInfo) Size() int64 { return int64(fi.fh.UncompressedSize) }
func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
func (fi headerFileInfo) ModTime() time.Time { return fi.fh.ModTime() }
func (fi headerFileInfo) Mode() os.FileMode { return fi.fh.Mode() }
// FileInfoHeader creates a partially-populated FileHeader from an
// os.FileInfo.
func FileInfoHeader(fi os.FileInfo) (*FileHeader, error) {
size := fi.Size()
if size > (1<<32 - 1) {
return nil, errors.New("zip: file over 4GB")
}
fh := &FileHeader{
Name: fi.Name(),
UncompressedSize: uint32(size),
}
fh.SetModTime(fi.ModTime())
fh.SetMode(fi.Mode())
return fh, nil
}
type directoryEnd struct { type directoryEnd struct {
diskNbr uint16 // unused diskNbr uint16 // unused
dirDiskNbr uint16 // unused dirDiskNbr uint16 // unused
...@@ -131,8 +164,7 @@ const ( ...@@ -131,8 +164,7 @@ const (
) )
// Mode returns the permission and mode bits for the FileHeader. // Mode returns the permission and mode bits for the FileHeader.
// An error is returned in case the information is not available. func (h *FileHeader) Mode() (mode os.FileMode) {
func (h *FileHeader) Mode() (mode os.FileMode, err error) {
switch h.CreatorVersion >> 8 { switch h.CreatorVersion >> 8 {
case creatorUnix, creatorMacOSX: case creatorUnix, creatorMacOSX:
mode = unixModeToFileMode(h.ExternalAttrs >> 16) mode = unixModeToFileMode(h.ExternalAttrs >> 16)
...@@ -142,7 +174,7 @@ func (h *FileHeader) Mode() (mode os.FileMode, err error) { ...@@ -142,7 +174,7 @@ func (h *FileHeader) Mode() (mode os.FileMode, err error) {
if len(h.Name) > 0 && h.Name[len(h.Name)-1] == '/' { if len(h.Name) > 0 && h.Name[len(h.Name)-1] == '/' {
mode |= os.ModeDir mode |= os.ModeDir
} }
return mode, nil return mode
} }
// SetMode changes the permission and mode bits for the FileHeader. // SetMode changes the permission and mode bits for the FileHeader.
......
...@@ -129,7 +129,7 @@ func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) { ...@@ -129,7 +129,7 @@ func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) {
case Deflate: case Deflate:
fw.comp = flate.NewWriter(fw.compCount, 5) fw.comp = flate.NewWriter(fw.compCount, 5)
default: default:
return nil, UnsupportedMethod return nil, ErrAlgorithm
} }
fw.rawCount = &countWriter{w: fw.comp} fw.rawCount = &countWriter{w: fw.comp}
......
...@@ -10,6 +10,7 @@ import ( ...@@ -10,6 +10,7 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"io" "io"
"reflect"
"testing" "testing"
"time" "time"
) )
...@@ -66,3 +67,22 @@ func TestModTime(t *testing.T) { ...@@ -66,3 +67,22 @@ func TestModTime(t *testing.T) {
t.Errorf("times don't match: got %s, want %s", outTime, testTime) t.Errorf("times don't match: got %s, want %s", outTime, testTime)
} }
} }
func TestFileHeaderRoundTrip(t *testing.T) {
fh := &FileHeader{
Name: "foo.txt",
UncompressedSize: 987654321,
ModifiedTime: 1234,
ModifiedDate: 5678,
}
fi := fh.FileInfo()
fh2, err := FileInfoHeader(fi)
// Ignore these fields:
fh2.CreatorVersion = 0
fh2.ExternalAttrs = 0
if !reflect.DeepEqual(fh, fh2) {
t.Errorf("mismatch\n input=%#v\noutput=%#v\nerr=%v", fh, fh2, err)
}
}
...@@ -33,6 +33,9 @@ const ( ...@@ -33,6 +33,9 @@ const (
opRead // Any other read operation. opRead // Any other read operation.
) )
// ErrTooLarge is passed to panic if memory cannot be allocated to store data in a buffer.
var ErrTooLarge = errors.New("bytes.Buffer: too large")
// Bytes returns a slice of the contents of the unread portion of the buffer; // Bytes returns a slice of the contents of the unread portion of the buffer;
// len(b.Bytes()) == b.Len(). If the caller changes the contents of the // len(b.Bytes()) == b.Len(). If the caller changes the contents of the
// returned slice, the contents of the buffer will change provided there // returned slice, the contents of the buffer will change provided there
...@@ -68,8 +71,9 @@ func (b *Buffer) Truncate(n int) { ...@@ -68,8 +71,9 @@ func (b *Buffer) Truncate(n int) {
// b.Reset() is the same as b.Truncate(0). // b.Reset() is the same as b.Truncate(0).
func (b *Buffer) Reset() { b.Truncate(0) } func (b *Buffer) Reset() { b.Truncate(0) }
// Grow buffer to guarantee space for n more bytes. // grow grows the buffer to guarantee space for n more bytes.
// Return index where bytes should be written. // It returns the index where bytes should be written.
// If the buffer can't grow it will panic with ErrTooLarge.
func (b *Buffer) grow(n int) int { func (b *Buffer) grow(n int) int {
m := b.Len() m := b.Len()
// If buffer is empty, reset to recover space. // If buffer is empty, reset to recover space.
...@@ -82,7 +86,7 @@ func (b *Buffer) grow(n int) int { ...@@ -82,7 +86,7 @@ func (b *Buffer) grow(n int) int {
buf = b.bootstrap[0:] buf = b.bootstrap[0:]
} else { } else {
// not enough space anywhere // not enough space anywhere
buf = make([]byte, 2*cap(b.buf)+n) buf = makeSlice(2*cap(b.buf) + n)
copy(buf, b.buf[b.off:]) copy(buf, b.buf[b.off:])
} }
b.buf = buf b.buf = buf
...@@ -94,6 +98,8 @@ func (b *Buffer) grow(n int) int { ...@@ -94,6 +98,8 @@ func (b *Buffer) grow(n int) int {
// Write appends the contents of p to the buffer. The return // Write appends the contents of p to the buffer. The return
// value n is the length of p; err is always nil. // value n is the length of p; err is always nil.
// If the buffer becomes too large, Write will panic with
// ErrTooLarge.
func (b *Buffer) Write(p []byte) (n int, err error) { func (b *Buffer) Write(p []byte) (n int, err error) {
b.lastRead = opInvalid b.lastRead = opInvalid
m := b.grow(len(p)) m := b.grow(len(p))
...@@ -102,6 +108,8 @@ func (b *Buffer) Write(p []byte) (n int, err error) { ...@@ -102,6 +108,8 @@ func (b *Buffer) Write(p []byte) (n int, err error) {
// WriteString appends the contents of s to the buffer. The return // WriteString appends the contents of s to the buffer. The return
// value n is the length of s; err is always nil. // value n is the length of s; err is always nil.
// If the buffer becomes too large, WriteString will panic with
// ErrTooLarge.
func (b *Buffer) WriteString(s string) (n int, err error) { func (b *Buffer) WriteString(s string) (n int, err error) {
b.lastRead = opInvalid b.lastRead = opInvalid
m := b.grow(len(s)) m := b.grow(len(s))
...@@ -118,6 +126,8 @@ const MinRead = 512 ...@@ -118,6 +126,8 @@ const MinRead = 512
// The return value n is the number of bytes read. // The return value n is the number of bytes read.
// Any error except io.EOF encountered during the read // Any error except io.EOF encountered during the read
// is also returned. // is also returned.
// If the buffer becomes too large, ReadFrom will panic with
// ErrTooLarge.
func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) { func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
b.lastRead = opInvalid b.lastRead = opInvalid
// If buffer is empty, reset to recover space. // If buffer is empty, reset to recover space.
...@@ -125,18 +135,16 @@ func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) { ...@@ -125,18 +135,16 @@ func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
b.Truncate(0) b.Truncate(0)
} }
for { for {
if cap(b.buf)-len(b.buf) < MinRead { if free := cap(b.buf) - len(b.buf); free < MinRead {
var newBuf []byte // not enough space at end
// can we get space without allocation? newBuf := b.buf
if b.off+cap(b.buf)-len(b.buf) >= MinRead { if b.off+free < MinRead {
// reuse beginning of buffer // not enough space using beginning of buffer;
newBuf = b.buf[0 : len(b.buf)-b.off] // double buffer capacity
} else { newBuf = makeSlice(2*cap(b.buf) + MinRead)
// not enough space at end; put space on end
newBuf = make([]byte, len(b.buf)-b.off, 2*(cap(b.buf)-b.off)+MinRead)
} }
copy(newBuf, b.buf[b.off:]) copy(newBuf, b.buf[b.off:])
b.buf = newBuf b.buf = newBuf[:len(b.buf)-b.off]
b.off = 0 b.off = 0
} }
m, e := r.Read(b.buf[len(b.buf):cap(b.buf)]) m, e := r.Read(b.buf[len(b.buf):cap(b.buf)])
...@@ -152,6 +160,18 @@ func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) { ...@@ -152,6 +160,18 @@ func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
return n, nil // err is EOF, so return nil explicitly return n, nil // err is EOF, so return nil explicitly
} }
// makeSlice allocates a slice of size n. If the allocation fails, it panics
// with ErrTooLarge.
func makeSlice(n int) []byte {
// If the make fails, give a known error.
defer func() {
if recover() != nil {
panic(ErrTooLarge)
}
}()
return make([]byte, n)
}
// WriteTo writes data to w until the buffer is drained or an error // WriteTo writes data to w until the buffer is drained or an error
// occurs. The return value n is the number of bytes written; it always // occurs. The return value n is the number of bytes written; it always
// fits into an int, but it is int64 to match the io.WriterTo interface. // fits into an int, but it is int64 to match the io.WriterTo interface.
...@@ -176,6 +196,8 @@ func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) { ...@@ -176,6 +196,8 @@ func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
// WriteByte appends the byte c to the buffer. // WriteByte appends the byte c to the buffer.
// The returned error is always nil, but is included // The returned error is always nil, but is included
// to match bufio.Writer's WriteByte. // to match bufio.Writer's WriteByte.
// If the buffer becomes too large, WriteByte will panic with
// ErrTooLarge.
func (b *Buffer) WriteByte(c byte) error { func (b *Buffer) WriteByte(c byte) error {
b.lastRead = opInvalid b.lastRead = opInvalid
m := b.grow(1) m := b.grow(1)
...@@ -187,6 +209,8 @@ func (b *Buffer) WriteByte(c byte) error { ...@@ -187,6 +209,8 @@ func (b *Buffer) WriteByte(c byte) error {
// code point r to the buffer, returning its length and // code point r to the buffer, returning its length and
// an error, which is always nil but is included // an error, which is always nil but is included
// to match bufio.Writer's WriteRune. // to match bufio.Writer's WriteRune.
// If the buffer becomes too large, WriteRune will panic with
// ErrTooLarge.
func (b *Buffer) WriteRune(r rune) (n int, err error) { func (b *Buffer) WriteRune(r rune) (n int, err error) {
if r < utf8.RuneSelf { if r < utf8.RuneSelf {
b.WriteByte(byte(r)) b.WriteByte(byte(r))
......
...@@ -27,10 +27,12 @@ const ( ...@@ -27,10 +27,12 @@ const (
// stop things from getting too large. // stop things from getting too large.
maxFlateBlockTokens = 1 << 14 maxFlateBlockTokens = 1 << 14
maxStoreBlockSize = 65535 maxStoreBlockSize = 65535
hashBits = 15 hashBits = 17
hashSize = 1 << hashBits hashSize = 1 << hashBits
hashMask = (1 << hashBits) - 1 hashMask = (1 << hashBits) - 1
hashShift = (hashBits + minMatchLength - 1) / minMatchLength hashShift = (hashBits + minMatchLength - 1) / minMatchLength
skipNever = math.MaxInt32
) )
type compressionLevel struct { type compressionLevel struct {
...@@ -45,12 +47,12 @@ var levels = []compressionLevel{ ...@@ -45,12 +47,12 @@ var levels = []compressionLevel{
{3, 0, 32, 32, 6}, {3, 0, 32, 32, 6},
// Levels 4-9 use increasingly more lazy matching // Levels 4-9 use increasingly more lazy matching
// and increasingly stringent conditions for "good enough". // and increasingly stringent conditions for "good enough".
{4, 4, 16, 16, math.MaxInt32}, {4, 4, 16, 16, skipNever},
{8, 16, 32, 32, math.MaxInt32}, {8, 16, 32, 32, skipNever},
{8, 16, 128, 128, math.MaxInt32}, {8, 16, 128, 128, skipNever},
{8, 32, 128, 256, math.MaxInt32}, {8, 32, 128, 256, skipNever},
{32, 128, 258, 1024, math.MaxInt32}, {32, 128, 258, 1024, skipNever},
{32, 258, 258, 4096, math.MaxInt32}, {32, 258, 258, 4096, skipNever},
} }
type compressor struct { type compressor struct {
...@@ -68,9 +70,10 @@ type compressor struct { ...@@ -68,9 +70,10 @@ type compressor struct {
// If hashHead[hashValue] is within the current window, then // If hashHead[hashValue] is within the current window, then
// hashPrev[hashHead[hashValue] & windowMask] contains the previous index // hashPrev[hashHead[hashValue] & windowMask] contains the previous index
// with the same hash value. // with the same hash value.
chainHead int chainHead int
hashHead []int hashHead []int
hashPrev []int hashPrev []int
hashOffset int
// input window: unprocessed data is window[index:windowEnd] // input window: unprocessed data is window[index:windowEnd]
index int index int
...@@ -79,9 +82,8 @@ type compressor struct { ...@@ -79,9 +82,8 @@ type compressor struct {
blockStart int // window index where current tokens start blockStart int // window index where current tokens start
byteAvailable bool // if true, still need to process window[index-1]. byteAvailable bool // if true, still need to process window[index-1].
// queued output tokens: tokens[:ti] // queued output tokens
tokens []token tokens []token
ti int
// deflate state // deflate state
length int length int
...@@ -100,22 +102,9 @@ func (d *compressor) fillDeflate(b []byte) int { ...@@ -100,22 +102,9 @@ func (d *compressor) fillDeflate(b []byte) int {
if d.blockStart >= windowSize { if d.blockStart >= windowSize {
d.blockStart -= windowSize d.blockStart -= windowSize
} else { } else {
d.blockStart = math.MaxInt32 d.blockStart = skipNever
}
for i, h := range d.hashHead {
v := h - windowSize
if v < -1 {
v = -1
}
d.hashHead[i] = v
}
for i, h := range d.hashPrev {
v := -h - windowSize
if v < -1 {
v = -1
}
d.hashPrev[i] = v
} }
d.hashOffset += windowSize
} }
n := copy(d.window[d.windowEnd:], b) n := copy(d.window[d.windowEnd:], b)
d.windowEnd += n d.windowEnd += n
...@@ -186,7 +175,7 @@ func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead ...@@ -186,7 +175,7 @@ func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead
// hashPrev[i & windowMask] has already been overwritten, so stop now. // hashPrev[i & windowMask] has already been overwritten, so stop now.
break break
} }
if i = d.hashPrev[i&windowMask]; i < minIndex || i < 0 { if i = d.hashPrev[i&windowMask] - d.hashOffset; i < minIndex || i < 0 {
break break
} }
} }
...@@ -205,13 +194,12 @@ func (d *compressor) initDeflate() { ...@@ -205,13 +194,12 @@ func (d *compressor) initDeflate() {
d.hashHead = make([]int, hashSize) d.hashHead = make([]int, hashSize)
d.hashPrev = make([]int, windowSize) d.hashPrev = make([]int, windowSize)
d.window = make([]byte, 2*windowSize) d.window = make([]byte, 2*windowSize)
fillInts(d.hashHead, -1) d.hashOffset = 1
d.tokens = make([]token, maxFlateBlockTokens, maxFlateBlockTokens+1) d.tokens = make([]token, 0, maxFlateBlockTokens+1)
d.length = minMatchLength - 1 d.length = minMatchLength - 1
d.offset = 0 d.offset = 0
d.byteAvailable = false d.byteAvailable = false
d.index = 0 d.index = 0
d.ti = 0
d.hash = 0 d.hash = 0
d.chainHead = -1 d.chainHead = -1
} }
...@@ -243,15 +231,14 @@ Loop: ...@@ -243,15 +231,14 @@ Loop:
// Flush current output block if any. // Flush current output block if any.
if d.byteAvailable { if d.byteAvailable {
// There is still one pending token that needs to be flushed // There is still one pending token that needs to be flushed
d.tokens[d.ti] = literalToken(uint32(d.window[d.index-1])) d.tokens = append(d.tokens, literalToken(uint32(d.window[d.index-1])))
d.ti++
d.byteAvailable = false d.byteAvailable = false
} }
if d.ti > 0 { if len(d.tokens) > 0 {
if d.err = d.writeBlock(d.tokens[0:d.ti], d.index, false); d.err != nil { if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
return return
} }
d.ti = 0 d.tokens = d.tokens[:0]
} }
break Loop break Loop
} }
...@@ -261,7 +248,7 @@ Loop: ...@@ -261,7 +248,7 @@ Loop:
d.hash = (d.hash<<hashShift + int(d.window[d.index+2])) & hashMask d.hash = (d.hash<<hashShift + int(d.window[d.index+2])) & hashMask
d.chainHead = d.hashHead[d.hash] d.chainHead = d.hashHead[d.hash]
d.hashPrev[d.index&windowMask] = d.chainHead d.hashPrev[d.index&windowMask] = d.chainHead
d.hashHead[d.hash] = d.index d.hashHead[d.hash] = d.index + d.hashOffset
} }
prevLength := d.length prevLength := d.length
prevOffset := d.offset prevOffset := d.offset
...@@ -272,34 +259,33 @@ Loop: ...@@ -272,34 +259,33 @@ Loop:
minIndex = 0 minIndex = 0
} }
if d.chainHead >= minIndex && if d.chainHead-d.hashOffset >= minIndex &&
(d.fastSkipHashing != 0 && lookahead > minMatchLength-1 || (d.fastSkipHashing != skipNever && lookahead > minMatchLength-1 ||
d.fastSkipHashing == 0 && lookahead > prevLength && prevLength < d.lazy) { d.fastSkipHashing == skipNever && lookahead > prevLength && prevLength < d.lazy) {
if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead, minMatchLength-1, lookahead); ok { if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
d.length = newLength d.length = newLength
d.offset = newOffset d.offset = newOffset
} }
} }
if d.fastSkipHashing != 0 && d.length >= minMatchLength || if d.fastSkipHashing != skipNever && d.length >= minMatchLength ||
d.fastSkipHashing == 0 && prevLength >= minMatchLength && d.length <= prevLength { d.fastSkipHashing == skipNever && prevLength >= minMatchLength && d.length <= prevLength {
// There was a match at the previous step, and the current match is // There was a match at the previous step, and the current match is
// not better. Output the previous match. // not better. Output the previous match.
if d.fastSkipHashing != 0 { if d.fastSkipHashing != skipNever {
d.tokens[d.ti] = matchToken(uint32(d.length-minMatchLength), uint32(d.offset-minOffsetSize)) d.tokens = append(d.tokens, matchToken(uint32(d.length-minMatchLength), uint32(d.offset-minOffsetSize)))
} else { } else {
d.tokens[d.ti] = matchToken(uint32(prevLength-minMatchLength), uint32(prevOffset-minOffsetSize)) d.tokens = append(d.tokens, matchToken(uint32(prevLength-minMatchLength), uint32(prevOffset-minOffsetSize)))
} }
d.ti++
// Insert in the hash table all strings up to the end of the match. // Insert in the hash table all strings up to the end of the match.
// index and index-1 are already inserted. If there is not enough // index and index-1 are already inserted. If there is not enough
// lookahead, the last two strings are not inserted into the hash // lookahead, the last two strings are not inserted into the hash
// table. // table.
if d.length <= d.fastSkipHashing { if d.length <= d.fastSkipHashing {
var newIndex int var newIndex int
if d.fastSkipHashing != 0 { if d.fastSkipHashing != skipNever {
newIndex = d.index + d.length newIndex = d.index + d.length
} else { } else {
newIndex = prevLength - 1 newIndex = d.index + prevLength - 1
} }
for d.index++; d.index < newIndex; d.index++ { for d.index++; d.index < newIndex; d.index++ {
if d.index < d.maxInsertIndex { if d.index < d.maxInsertIndex {
...@@ -308,10 +294,10 @@ Loop: ...@@ -308,10 +294,10 @@ Loop:
// Our chain should point to the previous value. // Our chain should point to the previous value.
d.hashPrev[d.index&windowMask] = d.hashHead[d.hash] d.hashPrev[d.index&windowMask] = d.hashHead[d.hash]
// Set the head of the hash chain to us. // Set the head of the hash chain to us.
d.hashHead[d.hash] = d.index d.hashHead[d.hash] = d.index + d.hashOffset
} }
} }
if d.fastSkipHashing == 0 { if d.fastSkipHashing == skipNever {
d.byteAvailable = false d.byteAvailable = false
d.length = minMatchLength - 1 d.length = minMatchLength - 1
} }
...@@ -323,30 +309,29 @@ Loop: ...@@ -323,30 +309,29 @@ Loop:
d.hash = (int(d.window[d.index])<<hashShift + int(d.window[d.index+1])) d.hash = (int(d.window[d.index])<<hashShift + int(d.window[d.index+1]))
} }
} }
if d.ti == maxFlateBlockTokens { if len(d.tokens) == maxFlateBlockTokens {
// The block includes the current character // The block includes the current character
if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
return return
} }
d.ti = 0 d.tokens = d.tokens[:0]
} }
} else { } else {
if d.fastSkipHashing != 0 || d.byteAvailable { if d.fastSkipHashing != skipNever || d.byteAvailable {
i := d.index - 1 i := d.index - 1
if d.fastSkipHashing != 0 { if d.fastSkipHashing != skipNever {
i = d.index i = d.index
} }
d.tokens[d.ti] = literalToken(uint32(d.window[i])) d.tokens = append(d.tokens, literalToken(uint32(d.window[i])))
d.ti++ if len(d.tokens) == maxFlateBlockTokens {
if d.ti == maxFlateBlockTokens {
if d.err = d.writeBlock(d.tokens, i+1, false); d.err != nil { if d.err = d.writeBlock(d.tokens, i+1, false); d.err != nil {
return return
} }
d.ti = 0 d.tokens = d.tokens[:0]
} }
} }
d.index++ d.index++
if d.fastSkipHashing == 0 { if d.fastSkipHashing == skipNever {
d.byteAvailable = true d.byteAvailable = true
} }
} }
......
...@@ -225,10 +225,17 @@ func testSync(t *testing.T, level int, input []byte, name string) { ...@@ -225,10 +225,17 @@ func testSync(t *testing.T, level int, input []byte, name string) {
} }
func testToFromWithLevel(t *testing.T, level int, input []byte, name string) error { func testToFromWithLevel(t *testing.T, level int, input []byte, name string) error {
return testToFromWithLevelAndLimit(t, level, input, name, -1)
}
func testToFromWithLevelAndLimit(t *testing.T, level int, input []byte, name string, limit int) error {
buffer := bytes.NewBuffer(nil) buffer := bytes.NewBuffer(nil)
w := NewWriter(buffer, level) w := NewWriter(buffer, level)
w.Write(input) w.Write(input)
w.Close() w.Close()
if limit > 0 && buffer.Len() > limit {
t.Errorf("level: %d, len(compress(data)) = %d > limit = %d", level, buffer.Len(), limit)
}
r := NewReader(buffer) r := NewReader(buffer)
out, err := ioutil.ReadAll(r) out, err := ioutil.ReadAll(r)
if err != nil { if err != nil {
...@@ -244,12 +251,16 @@ func testToFromWithLevel(t *testing.T, level int, input []byte, name string) err ...@@ -244,12 +251,16 @@ func testToFromWithLevel(t *testing.T, level int, input []byte, name string) err
return nil return nil
} }
func testToFrom(t *testing.T, input []byte, name string) { func testToFromWithLimit(t *testing.T, input []byte, name string, limit [10]int) {
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
testToFromWithLevel(t, i, input, name) testToFromWithLevelAndLimit(t, i, input, name, limit[i])
} }
} }
func testToFrom(t *testing.T, input []byte, name string) {
testToFromWithLimit(t, input, name, [10]int{})
}
func TestDeflateInflate(t *testing.T) { func TestDeflateInflate(t *testing.T) {
for i, h := range deflateInflateTests { for i, h := range deflateInflateTests {
testToFrom(t, h.in, fmt.Sprintf("#%d", i)) testToFrom(t, h.in, fmt.Sprintf("#%d", i))
...@@ -265,12 +276,33 @@ func TestReverseBits(t *testing.T) { ...@@ -265,12 +276,33 @@ func TestReverseBits(t *testing.T) {
} }
} }
type deflateInflateStringTest struct {
filename string
label string
limit [10]int
}
var deflateInflateStringTests = []deflateInflateStringTest{
{
"../testdata/e.txt",
"2.718281828...",
[...]int{10013, 5065, 5096, 5115, 5093, 5079, 5079, 5079, 5079, 5079},
},
{
"../testdata/Mark.Twain-Tom.Sawyer.txt",
"Mark.Twain-Tom.Sawyer",
[...]int{407330, 187598, 180361, 172974, 169160, 163476, 160936, 160506, 160295, 160295},
},
}
func TestDeflateInflateString(t *testing.T) { func TestDeflateInflateString(t *testing.T) {
gold, err := ioutil.ReadFile("../testdata/e.txt") for _, test := range deflateInflateStringTests {
if err != nil { gold, err := ioutil.ReadFile(test.filename)
t.Error(err) if err != nil {
t.Error(err)
}
testToFromWithLimit(t, gold, test.label, test.limit)
} }
testToFromWithLevel(t, 1, gold, "2.718281828...")
} }
func TestReaderDict(t *testing.T) { func TestReaderDict(t *testing.T) {
......
...@@ -193,15 +193,17 @@ func (w *huffmanBitWriter) writeBytes(bytes []byte) { ...@@ -193,15 +193,17 @@ func (w *huffmanBitWriter) writeBytes(bytes []byte) {
// numLiterals The number of literals in literalEncoding // numLiterals The number of literals in literalEncoding
// numOffsets The number of offsets in offsetEncoding // numOffsets The number of offsets in offsetEncoding
func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int) { func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int) {
fillInt32s(w.codegenFreq, 0) for i := range w.codegenFreq {
w.codegenFreq[i] = 0
}
// Note that we are using codegen both as a temporary variable for holding // Note that we are using codegen both as a temporary variable for holding
// a copy of the frequencies, and as the place where we put the result. // a copy of the frequencies, and as the place where we put the result.
// This is fine because the output is always shorter than the input used // This is fine because the output is always shorter than the input used
// so far. // so far.
codegen := w.codegen // cache codegen := w.codegen // cache
// Copy the concatenated code sizes to codegen. Put a marker at the end. // Copy the concatenated code sizes to codegen. Put a marker at the end.
copyUint8s(codegen[0:numLiterals], w.literalEncoding.codeBits) copy(codegen[0:numLiterals], w.literalEncoding.codeBits)
copyUint8s(codegen[numLiterals:numLiterals+numOffsets], w.offsetEncoding.codeBits) copy(codegen[numLiterals:numLiterals+numOffsets], w.offsetEncoding.codeBits)
codegen[numLiterals+numOffsets] = badCode codegen[numLiterals+numOffsets] = badCode
size := codegen[0] size := codegen[0]
...@@ -222,7 +224,10 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int) { ...@@ -222,7 +224,10 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int) {
w.codegenFreq[size]++ w.codegenFreq[size]++
count-- count--
for count >= 3 { for count >= 3 {
n := min(count, 6) n := 6
if n > count {
n = count
}
codegen[outIndex] = 16 codegen[outIndex] = 16
outIndex++ outIndex++
codegen[outIndex] = uint8(n - 3) codegen[outIndex] = uint8(n - 3)
...@@ -232,7 +237,10 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int) { ...@@ -232,7 +237,10 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int) {
} }
} else { } else {
for count >= 11 { for count >= 11 {
n := min(count, 138) n := 138
if n > count {
n = count
}
codegen[outIndex] = 18 codegen[outIndex] = 18
outIndex++ outIndex++
codegen[outIndex] = uint8(n - 11) codegen[outIndex] = uint8(n - 11)
...@@ -351,8 +359,12 @@ func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) { ...@@ -351,8 +359,12 @@ func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) {
if w.err != nil { if w.err != nil {
return return
} }
fillInt32s(w.literalFreq, 0) for i := range w.literalFreq {
fillInt32s(w.offsetFreq, 0) w.literalFreq[i] = 0
}
for i := range w.offsetFreq {
w.offsetFreq[i] = 0
}
n := len(tokens) n := len(tokens)
tokens = tokens[0 : n+1] tokens = tokens[0 : n+1]
......
...@@ -195,7 +195,9 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { ...@@ -195,7 +195,9 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
// The tree can't have greater depth than n - 1, no matter what. This // The tree can't have greater depth than n - 1, no matter what. This
// saves a little bit of work in some small cases // saves a little bit of work in some small cases
maxBits = minInt32(maxBits, n-1) if maxBits > n-1 {
maxBits = n - 1
}
// Create information about each of the levels. // Create information about each of the levels.
// A bogus "Level 0" whose sole purpose is so that // A bogus "Level 0" whose sole purpose is so that
......
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
func min(left int, right int) int {
if left < right {
return left
}
return right
}
func minInt32(left int32, right int32) int32 {
if left < right {
return left
}
return right
}
func max(left int, right int) int {
if left > right {
return left
}
return right
}
func fillInts(a []int, value int) {
for i := range a {
a[i] = value
}
}
func fillInt32s(a []int32, value int32) {
for i := range a {
a[i] = value
}
}
func fillBytes(a []byte, value byte) {
for i := range a {
a[i] = value
}
}
func fillInt8s(a []int8, value int8) {
for i := range a {
a[i] = value
}
}
func fillUint8s(a []uint8, value uint8) {
for i := range a {
a[i] = value
}
}
func copyInt8s(dst []int8, src []int8) int {
cnt := min(len(dst), len(src))
for i := 0; i < cnt; i++ {
dst[i] = src[i]
}
return cnt
}
func copyUint8s(dst []uint8, src []uint8) int {
cnt := min(len(dst), len(src))
for i := 0; i < cnt; i++ {
dst[i] = src[i]
}
return cnt
}
...@@ -37,8 +37,8 @@ func makeReader(r io.Reader) flate.Reader { ...@@ -37,8 +37,8 @@ func makeReader(r io.Reader) flate.Reader {
return bufio.NewReader(r) return bufio.NewReader(r)
} }
var HeaderError = errors.New("invalid gzip header") var ErrHeader = errors.New("invalid gzip header")
var ChecksumError = errors.New("gzip checksum error") var ErrChecksum = errors.New("gzip checksum error")
// The gzip file stores a header giving metadata about the compressed file. // The gzip file stores a header giving metadata about the compressed file.
// That header is exposed as the fields of the Compressor and Decompressor structs. // That header is exposed as the fields of the Compressor and Decompressor structs.
...@@ -59,7 +59,7 @@ type Header struct { ...@@ -59,7 +59,7 @@ type Header struct {
// Only the first header is recorded in the Decompressor fields. // Only the first header is recorded in the Decompressor fields.
// //
// Gzip files store a length and checksum of the uncompressed data. // Gzip files store a length and checksum of the uncompressed data.
// The Decompressor will return a ChecksumError when Read // The Decompressor will return a ErrChecksum when Read
// reaches the end of the uncompressed data if it does not // reaches the end of the uncompressed data if it does not
// have the expected length or checksum. Clients should treat data // have the expected length or checksum. Clients should treat data
// returned by Read as tentative until they receive the successful // returned by Read as tentative until they receive the successful
...@@ -99,7 +99,7 @@ func (z *Decompressor) readString() (string, error) { ...@@ -99,7 +99,7 @@ func (z *Decompressor) readString() (string, error) {
needconv := false needconv := false
for i := 0; ; i++ { for i := 0; ; i++ {
if i >= len(z.buf) { if i >= len(z.buf) {
return "", HeaderError return "", ErrHeader
} }
z.buf[i], err = z.r.ReadByte() z.buf[i], err = z.r.ReadByte()
if err != nil { if err != nil {
...@@ -137,7 +137,7 @@ func (z *Decompressor) readHeader(save bool) error { ...@@ -137,7 +137,7 @@ func (z *Decompressor) readHeader(save bool) error {
return err return err
} }
if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate { if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate {
return HeaderError return ErrHeader
} }
z.flg = z.buf[3] z.flg = z.buf[3]
if save { if save {
...@@ -188,7 +188,7 @@ func (z *Decompressor) readHeader(save bool) error { ...@@ -188,7 +188,7 @@ func (z *Decompressor) readHeader(save bool) error {
} }
sum := z.digest.Sum32() & 0xFFFF sum := z.digest.Sum32() & 0xFFFF
if n != sum { if n != sum {
return HeaderError return ErrHeader
} }
} }
...@@ -221,7 +221,7 @@ func (z *Decompressor) Read(p []byte) (n int, err error) { ...@@ -221,7 +221,7 @@ func (z *Decompressor) Read(p []byte) (n int, err error) {
crc32, isize := get4(z.buf[0:4]), get4(z.buf[4:8]) crc32, isize := get4(z.buf[0:4]), get4(z.buf[4:8])
sum := z.digest.Sum32() sum := z.digest.Sum32()
if sum != crc32 || isize != z.size { if sum != crc32 || isize != z.size {
z.err = ChecksumError z.err = ErrChecksum
return 0, z.err return 0, z.err
} }
......
...@@ -232,7 +232,7 @@ var gunzipTests = []gunzipTest{ ...@@ -232,7 +232,7 @@ var gunzipTests = []gunzipTest{
0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0x0c, 0x00, 0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0x0c, 0x00,
0x00, 0x00, 'g', 'a', 'r', 'b', 'a', 'g', 'e', '!', '!', '!', 0x00, 0x00, 'g', 'a', 'r', 'b', 'a', 'g', 'e', '!', '!', '!',
}, },
HeaderError, ErrHeader,
}, },
{ // has 1 non-empty fixed huffman block not enough header { // has 1 non-empty fixed huffman block not enough header
"hello.txt", "hello.txt",
...@@ -260,7 +260,7 @@ var gunzipTests = []gunzipTest{ ...@@ -260,7 +260,7 @@ var gunzipTests = []gunzipTest{
0x02, 0x00, 0xff, 0xff, 0xff, 0xff, 0x0c, 0x00, 0x02, 0x00, 0xff, 0xff, 0xff, 0xff, 0x0c, 0x00,
0x00, 0x00, 0x00, 0x00,
}, },
ChecksumError, ErrChecksum,
}, },
{ // has 1 non-empty fixed huffman block but corrupt size { // has 1 non-empty fixed huffman block but corrupt size
"hello.txt", "hello.txt",
...@@ -274,7 +274,7 @@ var gunzipTests = []gunzipTest{ ...@@ -274,7 +274,7 @@ var gunzipTests = []gunzipTest{
0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0xff, 0x00, 0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0xff, 0x00,
0x00, 0x00, 0x00, 0x00,
}, },
ChecksumError, ErrChecksum,
}, },
} }
......
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -34,9 +34,9 @@ import ( ...@@ -34,9 +34,9 @@ import (
const zlibDeflate = 8 const zlibDeflate = 8
var ChecksumError = errors.New("zlib checksum error") var ErrChecksum = errors.New("zlib checksum error")
var HeaderError = errors.New("invalid zlib header") var ErrHeader = errors.New("invalid zlib header")
var DictionaryError = errors.New("invalid zlib dictionary") var ErrDictionary = errors.New("invalid zlib dictionary")
type reader struct { type reader struct {
r flate.Reader r flate.Reader
...@@ -68,7 +68,7 @@ func NewReaderDict(r io.Reader, dict []byte) (io.ReadCloser, error) { ...@@ -68,7 +68,7 @@ func NewReaderDict(r io.Reader, dict []byte) (io.ReadCloser, error) {
} }
h := uint(z.scratch[0])<<8 | uint(z.scratch[1]) h := uint(z.scratch[0])<<8 | uint(z.scratch[1])
if (z.scratch[0]&0x0f != zlibDeflate) || (h%31 != 0) { if (z.scratch[0]&0x0f != zlibDeflate) || (h%31 != 0) {
return nil, HeaderError return nil, ErrHeader
} }
if z.scratch[1]&0x20 != 0 { if z.scratch[1]&0x20 != 0 {
_, err = io.ReadFull(z.r, z.scratch[0:4]) _, err = io.ReadFull(z.r, z.scratch[0:4])
...@@ -77,7 +77,7 @@ func NewReaderDict(r io.Reader, dict []byte) (io.ReadCloser, error) { ...@@ -77,7 +77,7 @@ func NewReaderDict(r io.Reader, dict []byte) (io.ReadCloser, error) {
} }
checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3]) checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3])
if checksum != adler32.Checksum(dict) { if checksum != adler32.Checksum(dict) {
return nil, DictionaryError return nil, ErrDictionary
} }
z.decompressor = flate.NewReaderDict(z.r, dict) z.decompressor = flate.NewReaderDict(z.r, dict)
} else { } else {
...@@ -110,7 +110,7 @@ func (z *reader) Read(p []byte) (n int, err error) { ...@@ -110,7 +110,7 @@ func (z *reader) Read(p []byte) (n int, err error) {
// ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952). // ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952).
checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3]) checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3])
if checksum != z.digest.Sum32() { if checksum != z.digest.Sum32() {
z.err = ChecksumError z.err = ErrChecksum
return 0, z.err return 0, z.err
} }
return return
......
...@@ -45,14 +45,14 @@ var zlibTests = []zlibTest{ ...@@ -45,14 +45,14 @@ var zlibTests = []zlibTest{
"", "",
[]byte{0x78, 0x9f, 0x03, 0x00, 0x00, 0x00, 0x00, 0x01}, []byte{0x78, 0x9f, 0x03, 0x00, 0x00, 0x00, 0x00, 0x01},
nil, nil,
HeaderError, ErrHeader,
}, },
{ {
"bad checksum", "bad checksum",
"", "",
[]byte{0x78, 0x9c, 0x03, 0x00, 0x00, 0x00, 0x00, 0xff}, []byte{0x78, 0x9c, 0x03, 0x00, 0x00, 0x00, 0x00, 0xff},
nil, nil,
ChecksumError, ErrChecksum,
}, },
{ {
"not enough data", "not enough data",
...@@ -95,7 +95,7 @@ var zlibTests = []zlibTest{ ...@@ -95,7 +95,7 @@ var zlibTests = []zlibTest{
[]byte{ []byte{
0x48, 0x65, 0x6c, 0x6c, 0x48, 0x65, 0x6c, 0x6c,
}, },
DictionaryError, ErrDictionary,
}, },
} }
......
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bcrypt
import "encoding/base64"
const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
var bcEncoding = base64.NewEncoding(alphabet)
func base64Encode(src []byte) []byte {
n := bcEncoding.EncodedLen(len(src))
dst := make([]byte, n)
bcEncoding.Encode(dst, src)
for dst[n-1] == '=' {
n--
}
return dst[:n]
}
func base64Decode(src []byte) ([]byte, error) {
numOfEquals := 4 - (len(src) % 4)
for i := 0; i < numOfEquals; i++ {
src = append(src, '=')
}
dst := make([]byte, bcEncoding.DecodedLen(len(src)))
n, err := bcEncoding.Decode(dst, src)
if err != nil {
return nil, err
}
return dst[:n], nil
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing
// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf
package bcrypt
// The code is a port of Provos and Mazières's C implementation.
import (
"crypto/blowfish"
"crypto/rand"
"crypto/subtle"
"errors"
"fmt"
"io"
"strconv"
)
const (
MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword
MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword
DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword
)
// The error returned from CompareHashAndPassword when a password and hash do
// not match.
var MismatchedHashAndPasswordError = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password")
// The error returned from CompareHashAndPassword when a hash is too short to
// be a bcrypt hash.
var HashTooShortError = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password")
// The error returned from CompareHashAndPassword when a hash was created with
// a bcrypt algorithm newer than this implementation.
type HashVersionTooNewError byte
func (hv HashVersionTooNewError) Error() string {
return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion)
}
// The error returned from CompareHashAndPassword when a hash starts with something other than '$'
type InvalidHashPrefixError byte
func (ih InvalidHashPrefixError) Error() string {
return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih))
}
type InvalidCostError int
func (ic InvalidCostError) Error() string {
return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost))
}
const (
majorVersion = '2'
minorVersion = 'a'
maxSaltSize = 16
maxCryptedHashSize = 23
encodedSaltSize = 22
encodedHashSize = 31
minHashSize = 59
)
// magicCipherData is an IV for the 64 Blowfish encryption calls in
// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes.
var magicCipherData = []byte{
0x4f, 0x72, 0x70, 0x68,
0x65, 0x61, 0x6e, 0x42,
0x65, 0x68, 0x6f, 0x6c,
0x64, 0x65, 0x72, 0x53,
0x63, 0x72, 0x79, 0x44,
0x6f, 0x75, 0x62, 0x74,
}
type hashed struct {
hash []byte
salt []byte
cost uint32 // allowed range is MinCost to MaxCost
major byte
minor byte
}
// GenerateFromPassword returns the bcrypt hash of the password at the given
// cost. If the cost given is less than MinCost, the cost will be set to
// MinCost, instead. Use CompareHashAndPassword, as defined in this package,
// to compare the returned hashed password with its cleartext version.
func GenerateFromPassword(password []byte, cost int) ([]byte, error) {
p, err := newFromPassword(password, cost)
if err != nil {
return nil, err
}
return p.Hash(), nil
}
// CompareHashAndPassword compares a bcrypt hashed password with its possible
// plaintext equivalent. Note: Using bytes.Equal for this job is
// insecure. Returns nil on success, or an error on failure.
func CompareHashAndPassword(hashedPassword, password []byte) error {
p, err := newFromHash(hashedPassword)
if err != nil {
return err
}
otherHash, err := bcrypt(password, p.cost, p.salt)
if err != nil {
return err
}
otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor}
if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 {
return nil
}
return MismatchedHashAndPasswordError
}
func newFromPassword(password []byte, cost int) (*hashed, error) {
if cost < MinCost {
cost = DefaultCost
}
p := new(hashed)
p.major = majorVersion
p.minor = minorVersion
err := checkCost(cost)
if err != nil {
return nil, err
}
p.cost = uint32(cost)
unencodedSalt := make([]byte, maxSaltSize)
_, err = io.ReadFull(rand.Reader, unencodedSalt)
if err != nil {
return nil, err
}
p.salt = base64Encode(unencodedSalt)
hash, err := bcrypt(password, p.cost, p.salt)
if err != nil {
return nil, err
}
p.hash = hash
return p, err
}
func newFromHash(hashedSecret []byte) (*hashed, error) {
if len(hashedSecret) < minHashSize {
return nil, HashTooShortError
}
p := new(hashed)
n, err := p.decodeVersion(hashedSecret)
if err != nil {
return nil, err
}
hashedSecret = hashedSecret[n:]
n, err = p.decodeCost(hashedSecret)
if err != nil {
return nil, err
}
hashedSecret = hashedSecret[n:]
// The "+2" is here because we'll have to append at most 2 '=' to the salt
// when base64 decoding it in expensiveBlowfishSetup().
p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2)
copy(p.salt, hashedSecret[:encodedSaltSize])
hashedSecret = hashedSecret[encodedSaltSize:]
p.hash = make([]byte, len(hashedSecret))
copy(p.hash, hashedSecret)
return p, nil
}
func bcrypt(password []byte, cost uint32, salt []byte) ([]byte, error) {
cipherData := make([]byte, len(magicCipherData))
copy(cipherData, magicCipherData)
c, err := expensiveBlowfishSetup(password, cost, salt)
if err != nil {
return nil, err
}
for i := 0; i < 24; i += 8 {
for j := 0; j < 64; j++ {
c.Encrypt(cipherData[i:i+8], cipherData[i:i+8])
}
}
// Bug compatibility with C bcrypt implementations. We only encode 23 of
// the 24 bytes encrypted.
hsh := base64Encode(cipherData[:maxCryptedHashSize])
return hsh, nil
}
func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) {
csalt, err := base64Decode(salt)
if err != nil {
return nil, err
}
// Bug compatibility with C bcrypt implementations. They use the trailing
// NULL in the key string during expansion.
ckey := append(key, 0)
c, err := blowfish.NewSaltedCipher(ckey, csalt)
if err != nil {
return nil, err
}
rounds := 1 << cost
for i := 0; i < rounds; i++ {
blowfish.ExpandKey(ckey, c)
blowfish.ExpandKey(csalt, c)
}
return c, nil
}
func (p *hashed) Hash() []byte {
arr := make([]byte, 60)
arr[0] = '$'
arr[1] = p.major
n := 2
if p.minor != 0 {
arr[2] = p.minor
n = 3
}
arr[n] = '$'
n += 1
copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost)))
n += 2
arr[n] = '$'
n += 1
copy(arr[n:], p.salt)
n += encodedSaltSize
copy(arr[n:], p.hash)
n += encodedHashSize
return arr[:n]
}
func (p *hashed) decodeVersion(sbytes []byte) (int, error) {
if sbytes[0] != '$' {
return -1, InvalidHashPrefixError(sbytes[0])
}
if sbytes[1] > majorVersion {
return -1, HashVersionTooNewError(sbytes[1])
}
p.major = sbytes[1]
n := 3
if sbytes[2] != '$' {
p.minor = sbytes[2]
n++
}
return n, nil
}
// sbytes should begin where decodeVersion left off.
func (p *hashed) decodeCost(sbytes []byte) (int, error) {
cost, err := strconv.Atoi(string(sbytes[0:2]))
if err != nil {
return -1, err
}
err = checkCost(cost)
if err != nil {
return -1, err
}
p.cost = uint32(cost)
return 3, nil
}
func (p *hashed) String() string {
return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor)
}
func checkCost(cost int) error {
if cost < MinCost || cost > MaxCost {
return InvalidCostError(cost)
}
return nil
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bcrypt
import (
"bytes"
"testing"
)
func TestBcryptingIsEasy(t *testing.T) {
pass := []byte("mypassword")
hp, err := GenerateFromPassword(pass, 0)
if err != nil {
t.Fatalf("GenerateFromPassword error: %s", err)
}
if CompareHashAndPassword(hp, pass) != nil {
t.Errorf("%v should hash %s correctly", hp, pass)
}
notPass := "notthepass"
err = CompareHashAndPassword(hp, []byte(notPass))
if err != MismatchedHashAndPasswordError {
t.Errorf("%v and %s should be mismatched", hp, notPass)
}
}
func TestBcryptingIsCorrect(t *testing.T) {
pass := []byte("allmine")
salt := []byte("XajjQvNhvvRt5GSeFk1xFe")
expectedHash := []byte("$2a$10$XajjQvNhvvRt5GSeFk1xFeyqRrsxkhBkUiQeg0dt.wU1qD4aFDcga")
hash, err := bcrypt(pass, 10, salt)
if err != nil {
t.Fatalf("bcrypt blew up: %v", err)
}
if !bytes.HasSuffix(expectedHash, hash) {
t.Errorf("%v should be the suffix of %v", hash, expectedHash)
}
h, err := newFromHash(expectedHash)
if err != nil {
t.Errorf("Unable to parse %s: %v", string(expectedHash), err)
}
// This is not the safe way to compare these hashes. We do this only for
// testing clarity. Use bcrypt.CompareHashAndPassword()
if err == nil && !bytes.Equal(expectedHash, h.Hash()) {
t.Errorf("Parsed hash %v should equal %v", h.Hash(), expectedHash)
}
}
func TestTooLongPasswordsWork(t *testing.T) {
salt := []byte("XajjQvNhvvRt5GSeFk1xFe")
// One byte over the usual 56 byte limit that blowfish has
tooLongPass := []byte("012345678901234567890123456789012345678901234567890123456")
tooLongExpected := []byte("$2a$10$XajjQvNhvvRt5GSeFk1xFe5l47dONXg781AmZtd869sO8zfsHuw7C")
hash, err := bcrypt(tooLongPass, 10, salt)
if err != nil {
t.Fatalf("bcrypt blew up on long password: %v", err)
}
if !bytes.HasSuffix(tooLongExpected, hash) {
t.Errorf("%v should be the suffix of %v", hash, tooLongExpected)
}
}
type InvalidHashTest struct {
err error
hash []byte
}
var invalidTests = []InvalidHashTest{
{HashTooShortError, []byte("$2a$10$fooo")},
{HashTooShortError, []byte("$2a")},
{HashVersionTooNewError('3'), []byte("$3a$10$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")},
{InvalidHashPrefixError('%'), []byte("%2a$10$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")},
{InvalidCostError(32), []byte("$2a$32$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")},
}
func TestInvalidHashErrors(t *testing.T) {
check := func(name string, expected, err error) {
if err == nil {
t.Errorf("%s: Should have returned an error", name)
}
if err != nil && err != expected {
t.Errorf("%s gave err %v but should have given %v", name, err, expected)
}
}
for _, iht := range invalidTests {
_, err := newFromHash(iht.hash)
check("newFromHash", iht.err, err)
err = CompareHashAndPassword(iht.hash, []byte("anything"))
check("CompareHashAndPassword", iht.err, err)
}
}
func TestUnpaddedBase64Encoding(t *testing.T) {
original := []byte{101, 201, 101, 75, 19, 227, 199, 20, 239, 236, 133, 32, 30, 109, 243, 30}
encodedOriginal := []byte("XajjQvNhvvRt5GSeFk1xFe")
encoded := base64Encode(original)
if !bytes.Equal(encodedOriginal, encoded) {
t.Errorf("Encoded %v should have equaled %v", encoded, encodedOriginal)
}
decoded, err := base64Decode(encodedOriginal)
if err != nil {
t.Fatalf("base64Decode blew up: %s", err)
}
if !bytes.Equal(decoded, original) {
t.Errorf("Decoded %v should have equaled %v", decoded, original)
}
}
func TestCost(t *testing.T) {
if testing.Short() {
return
}
pass := []byte("mypassword")
for c := 0; c < MinCost; c++ {
p, _ := newFromPassword(pass, c)
if p.cost != uint32(DefaultCost) {
t.Errorf("newFromPassword should default costs below %d to %d, but was %d", MinCost, DefaultCost, p.cost)
}
}
p, _ := newFromPassword(pass, 14)
if p.cost != 14 {
t.Errorf("newFromPassword should default cost to 14, but was %d", p.cost)
}
hp, _ := newFromHash(p.Hash())
if p.cost != hp.cost {
t.Errorf("newFromHash should maintain the cost at %d, but was %d", p.cost, hp.cost)
}
_, err := newFromPassword(pass, 32)
if err == nil {
t.Fatalf("newFromPassword: should return a cost error")
}
if err != InvalidCostError(32) {
t.Errorf("newFromPassword: should return cost error, got %#v", err)
}
}
func TestCostReturnsWithLeadingZeroes(t *testing.T) {
hp, _ := newFromPassword([]byte("abcdefgh"), 7)
cost := hp.Hash()[4:7]
expected := []byte("07$")
if !bytes.Equal(expected, cost) {
t.Errorf("single digit costs in hash should have leading zeros: was %v instead of %v", cost, expected)
}
}
func TestMinorNotRequired(t *testing.T) {
noMinorHash := []byte("$2$10$XajjQvNhvvRt5GSeFk1xFeyqRrsxkhBkUiQeg0dt.wU1qD4aFDcga")
h, err := newFromHash(noMinorHash)
if err != nil {
t.Fatalf("No minor hash blew up: %s", err)
}
if h.minor != 0 {
t.Errorf("Should leave minor version at 0, but was %d", h.minor)
}
if !bytes.Equal(noMinorHash, h.Hash()) {
t.Errorf("Should generate hash %v, but created %v", noMinorHash, h.Hash())
}
}
func BenchmarkEqual(b *testing.B) {
b.StopTimer()
passwd := []byte("somepasswordyoulike")
hash, _ := GenerateFromPassword(passwd, 10)
b.StartTimer()
for i := 0; i < b.N; i++ {
CompareHashAndPassword(hash, passwd)
}
}
func BenchmarkGeneration(b *testing.B) {
b.StopTimer()
passwd := []byte("mylongpassword1234")
b.StartTimer()
for i := 0; i < b.N; i++ {
GenerateFromPassword(passwd, 10)
}
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package blowfish
// ExpandKey performs a key expansion on the given *Cipher. Specifically, it
// performs the Blowfish algorithm's key schedule which sets up the *Cipher's
// pi and substitution tables for calls to Encrypt. This is used, primarily,
// by the bcrypt package to reuse the Blowfish key schedule during its
// set up. It's unlikely that you need to use this directly.
func ExpandKey(key []byte, c *Cipher) {
j := 0
for i := 0; i < 18; i++ {
var d uint32
for k := 0; k < 4; k++ {
d = d<<8 | uint32(key[j])&0x000000FF
j++
if j >= len(key) {
j = 0
}
}
c.p[i] ^= d
}
var l, r uint32
for i := 0; i < 18; i += 2 {
l, r = encryptBlock(l, r, c)
c.p[i], c.p[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l, r = encryptBlock(l, r, c)
c.s0[i], c.s0[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l, r = encryptBlock(l, r, c)
c.s1[i], c.s1[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l, r = encryptBlock(l, r, c)
c.s2[i], c.s2[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l, r = encryptBlock(l, r, c)
c.s3[i], c.s3[i+1] = l, r
}
}
// This is similar to ExpandKey, but folds the salt during the key
// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero
// salt passed in, reusing ExpandKey turns out to be a place of inefficiency
// and specializing it here is useful.
func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) {
j := 0
expandedKey := make([]uint32, 18)
for i := 0; i < 18; i++ {
var d uint32
for k := 0; k < 4; k++ {
d = d<<8 | uint32(key[j])&0x000000FF
j++
if j >= len(key) {
j = 0
}
}
expandedKey[i] = d
c.p[i] ^= d
}
j = 0
expandedSalt := make([]uint32, 18)
for i := 0; i < 18; i++ {
var d uint32
for k := 0; k < 4; k++ {
d = d<<8 | uint32(salt[j])&0x000000FF
j++
if j >= len(salt) {
j = 0
}
}
expandedSalt[i] = d
}
var l, r uint32
for i := 0; i < 18; i += 2 {
l ^= expandedSalt[i&2]
r ^= expandedSalt[(i&2)+1]
l, r = encryptBlock(l, r, c)
c.p[i], c.p[i+1] = l, r
}
for i := 0; i < 256; i += 4 {
l ^= expandedSalt[2]
r ^= expandedSalt[3]
l, r = encryptBlock(l, r, c)
c.s0[i], c.s0[i+1] = l, r
l ^= expandedSalt[0]
r ^= expandedSalt[1]
l, r = encryptBlock(l, r, c)
c.s0[i+2], c.s0[i+3] = l, r
}
for i := 0; i < 256; i += 4 {
l ^= expandedSalt[2]
r ^= expandedSalt[3]
l, r = encryptBlock(l, r, c)
c.s1[i], c.s1[i+1] = l, r
l ^= expandedSalt[0]
r ^= expandedSalt[1]
l, r = encryptBlock(l, r, c)
c.s1[i+2], c.s1[i+3] = l, r
}
for i := 0; i < 256; i += 4 {
l ^= expandedSalt[2]
r ^= expandedSalt[3]
l, r = encryptBlock(l, r, c)
c.s2[i], c.s2[i+1] = l, r
l ^= expandedSalt[0]
r ^= expandedSalt[1]
l, r = encryptBlock(l, r, c)
c.s2[i+2], c.s2[i+3] = l, r
}
for i := 0; i < 256; i += 4 {
l ^= expandedSalt[2]
r ^= expandedSalt[3]
l, r = encryptBlock(l, r, c)
c.s3[i], c.s3[i+1] = l, r
l ^= expandedSalt[0]
r ^= expandedSalt[1]
l, r = encryptBlock(l, r, c)
c.s3[i+2], c.s3[i+3] = l, r
}
}
func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
xl, xr := l, r
xl ^= c.p[0]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16]
xr ^= c.p[17]
return xr, xl
}
func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
xl, xr := l, r
xl ^= c.p[17]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1]
xr ^= c.p[0]
return xr, xl
}
func zero(x []uint32) {
for i := range x {
x[i] = 0
}
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package blowfish
import (
"testing"
)
type CryptTest struct {
key []byte
in []byte
out []byte
}
// Test vector values are from http://www.schneier.com/code/vectors.txt.
var encryptTests = []CryptTest{
{
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
[]byte{0x4E, 0xF9, 0x97, 0x45, 0x61, 0x98, 0xDD, 0x78}},
{
[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
[]byte{0x51, 0x86, 0x6F, 0xD5, 0xB8, 0x5E, 0xCB, 0x8A}},
{
[]byte{0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
[]byte{0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
[]byte{0x7D, 0x85, 0x6F, 0x9A, 0x61, 0x30, 0x63, 0xF2}},
{
[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
[]byte{0x24, 0x66, 0xDD, 0x87, 0x8B, 0x96, 0x3C, 0x9D}},
{
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
[]byte{0x61, 0xF9, 0xC3, 0x80, 0x22, 0x81, 0xB0, 0x96}},
{
[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
[]byte{0x7D, 0x0C, 0xC6, 0x30, 0xAF, 0xDA, 0x1E, 0xC7}},
{
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
[]byte{0x4E, 0xF9, 0x97, 0x45, 0x61, 0x98, 0xDD, 0x78}},
{
[]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10},
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
[]byte{0x0A, 0xCE, 0xAB, 0x0F, 0xC6, 0xA0, 0xA2, 0x8D}},
{
[]byte{0x7C, 0xA1, 0x10, 0x45, 0x4A, 0x1A, 0x6E, 0x57},
[]byte{0x01, 0xA1, 0xD6, 0xD0, 0x39, 0x77, 0x67, 0x42},
[]byte{0x59, 0xC6, 0x82, 0x45, 0xEB, 0x05, 0x28, 0x2B}},
{
[]byte{0x01, 0x31, 0xD9, 0x61, 0x9D, 0xC1, 0x37, 0x6E},
[]byte{0x5C, 0xD5, 0x4C, 0xA8, 0x3D, 0xEF, 0x57, 0xDA},
[]byte{0xB1, 0xB8, 0xCC, 0x0B, 0x25, 0x0F, 0x09, 0xA0}},
{
[]byte{0x07, 0xA1, 0x13, 0x3E, 0x4A, 0x0B, 0x26, 0x86},
[]byte{0x02, 0x48, 0xD4, 0x38, 0x06, 0xF6, 0x71, 0x72},
[]byte{0x17, 0x30, 0xE5, 0x77, 0x8B, 0xEA, 0x1D, 0xA4}},
{
[]byte{0x38, 0x49, 0x67, 0x4C, 0x26, 0x02, 0x31, 0x9E},
[]byte{0x51, 0x45, 0x4B, 0x58, 0x2D, 0xDF, 0x44, 0x0A},
[]byte{0xA2, 0x5E, 0x78, 0x56, 0xCF, 0x26, 0x51, 0xEB}},
{
[]byte{0x04, 0xB9, 0x15, 0xBA, 0x43, 0xFE, 0xB5, 0xB6},
[]byte{0x42, 0xFD, 0x44, 0x30, 0x59, 0x57, 0x7F, 0xA2},
[]byte{0x35, 0x38, 0x82, 0xB1, 0x09, 0xCE, 0x8F, 0x1A}},
{
[]byte{0x01, 0x13, 0xB9, 0x70, 0xFD, 0x34, 0xF2, 0xCE},
[]byte{0x05, 0x9B, 0x5E, 0x08, 0x51, 0xCF, 0x14, 0x3A},
[]byte{0x48, 0xF4, 0xD0, 0x88, 0x4C, 0x37, 0x99, 0x18}},
{
[]byte{0x01, 0x70, 0xF1, 0x75, 0x46, 0x8F, 0xB5, 0xE6},
[]byte{0x07, 0x56, 0xD8, 0xE0, 0x77, 0x47, 0x61, 0xD2},
[]byte{0x43, 0x21, 0x93, 0xB7, 0x89, 0x51, 0xFC, 0x98}},
{
[]byte{0x43, 0x29, 0x7F, 0xAD, 0x38, 0xE3, 0x73, 0xFE},
[]byte{0x76, 0x25, 0x14, 0xB8, 0x29, 0xBF, 0x48, 0x6A},
[]byte{0x13, 0xF0, 0x41, 0x54, 0xD6, 0x9D, 0x1A, 0xE5}},
{
[]byte{0x07, 0xA7, 0x13, 0x70, 0x45, 0xDA, 0x2A, 0x16},
[]byte{0x3B, 0xDD, 0x11, 0x90, 0x49, 0x37, 0x28, 0x02},
[]byte{0x2E, 0xED, 0xDA, 0x93, 0xFF, 0xD3, 0x9C, 0x79}},
{
[]byte{0x04, 0x68, 0x91, 0x04, 0xC2, 0xFD, 0x3B, 0x2F},
[]byte{0x26, 0x95, 0x5F, 0x68, 0x35, 0xAF, 0x60, 0x9A},
[]byte{0xD8, 0x87, 0xE0, 0x39, 0x3C, 0x2D, 0xA6, 0xE3}},
{
[]byte{0x37, 0xD0, 0x6B, 0xB5, 0x16, 0xCB, 0x75, 0x46},
[]byte{0x16, 0x4D, 0x5E, 0x40, 0x4F, 0x27, 0x52, 0x32},
[]byte{0x5F, 0x99, 0xD0, 0x4F, 0x5B, 0x16, 0x39, 0x69}},
{
[]byte{0x1F, 0x08, 0x26, 0x0D, 0x1A, 0xC2, 0x46, 0x5E},
[]byte{0x6B, 0x05, 0x6E, 0x18, 0x75, 0x9F, 0x5C, 0xCA},
[]byte{0x4A, 0x05, 0x7A, 0x3B, 0x24, 0xD3, 0x97, 0x7B}},
{
[]byte{0x58, 0x40, 0x23, 0x64, 0x1A, 0xBA, 0x61, 0x76},
[]byte{0x00, 0x4B, 0xD6, 0xEF, 0x09, 0x17, 0x60, 0x62},
[]byte{0x45, 0x20, 0x31, 0xC1, 0xE4, 0xFA, 0xDA, 0x8E}},
{
[]byte{0x02, 0x58, 0x16, 0x16, 0x46, 0x29, 0xB0, 0x07},
[]byte{0x48, 0x0D, 0x39, 0x00, 0x6E, 0xE7, 0x62, 0xF2},
[]byte{0x75, 0x55, 0xAE, 0x39, 0xF5, 0x9B, 0x87, 0xBD}},
{
[]byte{0x49, 0x79, 0x3E, 0xBC, 0x79, 0xB3, 0x25, 0x8F},
[]byte{0x43, 0x75, 0x40, 0xC8, 0x69, 0x8F, 0x3C, 0xFA},
[]byte{0x53, 0xC5, 0x5F, 0x9C, 0xB4, 0x9F, 0xC0, 0x19}},
{
[]byte{0x4F, 0xB0, 0x5E, 0x15, 0x15, 0xAB, 0x73, 0xA7},
[]byte{0x07, 0x2D, 0x43, 0xA0, 0x77, 0x07, 0x52, 0x92},
[]byte{0x7A, 0x8E, 0x7B, 0xFA, 0x93, 0x7E, 0x89, 0xA3}},
{
[]byte{0x49, 0xE9, 0x5D, 0x6D, 0x4C, 0xA2, 0x29, 0xBF},
[]byte{0x02, 0xFE, 0x55, 0x77, 0x81, 0x17, 0xF1, 0x2A},
[]byte{0xCF, 0x9C, 0x5D, 0x7A, 0x49, 0x86, 0xAD, 0xB5}},
{
[]byte{0x01, 0x83, 0x10, 0xDC, 0x40, 0x9B, 0x26, 0xD6},
[]byte{0x1D, 0x9D, 0x5C, 0x50, 0x18, 0xF7, 0x28, 0xC2},
[]byte{0xD1, 0xAB, 0xB2, 0x90, 0x65, 0x8B, 0xC7, 0x78}},
{
[]byte{0x1C, 0x58, 0x7F, 0x1C, 0x13, 0x92, 0x4F, 0xEF},
[]byte{0x30, 0x55, 0x32, 0x28, 0x6D, 0x6F, 0x29, 0x5A},
[]byte{0x55, 0xCB, 0x37, 0x74, 0xD1, 0x3E, 0xF2, 0x01}},
{
[]byte{0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01},
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
[]byte{0xFA, 0x34, 0xEC, 0x48, 0x47, 0xB2, 0x68, 0xB2}},
{
[]byte{0x1F, 0x1F, 0x1F, 0x1F, 0x0E, 0x0E, 0x0E, 0x0E},
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
[]byte{0xA7, 0x90, 0x79, 0x51, 0x08, 0xEA, 0x3C, 0xAE}},
{
[]byte{0xE0, 0xFE, 0xE0, 0xFE, 0xF1, 0xFE, 0xF1, 0xFE},
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
[]byte{0xC3, 0x9E, 0x07, 0x2D, 0x9F, 0xAC, 0x63, 0x1D}},
{
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
[]byte{0x01, 0x49, 0x33, 0xE0, 0xCD, 0xAF, 0xF6, 0xE4}},
{
[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
[]byte{0xF2, 0x1E, 0x9A, 0x77, 0xB7, 0x1C, 0x49, 0xBC}},
{
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
[]byte{0x24, 0x59, 0x46, 0x88, 0x57, 0x54, 0x36, 0x9A}},
{
[]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10},
[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
[]byte{0x6B, 0x5C, 0x5A, 0x9C, 0x5D, 0x9E, 0x0A, 0x5A}},
}
func TestCipherEncrypt(t *testing.T) {
for i, tt := range encryptTests {
c, err := NewCipher(tt.key)
if err != nil {
t.Errorf("NewCipher(%d bytes) = %s", len(tt.key), err)
continue
}
ct := make([]byte, len(tt.out))
c.Encrypt(ct, tt.in)
for j, v := range ct {
if v != tt.out[j] {
t.Errorf("Cipher.Encrypt, test vector #%d: cipher-text[%d] = %#x, expected %#x", i, j, v, tt.out[j])
break
}
}
}
}
func TestCipherDecrypt(t *testing.T) {
for i, tt := range encryptTests {
c, err := NewCipher(tt.key)
if err != nil {
t.Errorf("NewCipher(%d bytes) = %s", len(tt.key), err)
continue
}
pt := make([]byte, len(tt.in))
c.Decrypt(pt, tt.out)
for j, v := range pt {
if v != tt.in[j] {
t.Errorf("Cipher.Decrypt, test vector #%d: plain-text[%d] = %#x, expected %#x", i, j, v, tt.in[j])
break
}
}
}
}
func TestSaltedCipherKeyLength(t *testing.T) {
var key []byte
for i := 0; i < 4; i++ {
_, err := NewSaltedCipher(key, []byte{'a'})
if err != KeySizeError(i) {
t.Errorf("NewSaltedCipher with short key, gave error %#v, expected %#v", err, KeySizeError(i))
}
key = append(key, 'a')
}
// A 57-byte key. One over the typical blowfish restriction.
key = []byte("012345678901234567890123456789012345678901234567890123456")
_, err := NewSaltedCipher(key, []byte{'a'})
if err != nil {
t.Errorf("NewSaltedCipher with long key, gave error %#v", err)
}
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm.
package blowfish
// The code is a port of Bruce Schneier's C implementation.
// See http://www.schneier.com/blowfish.html.
import "strconv"
// The Blowfish block size in bytes.
const BlockSize = 8
// A Cipher is an instance of Blowfish encryption using a particular key.
type Cipher struct {
p [18]uint32
s0, s1, s2, s3 [256]uint32
}
type KeySizeError int
func (k KeySizeError) Error() string {
return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k))
}
// NewCipher creates and returns a Cipher.
// The key argument should be the Blowfish key, 4 to 56 bytes.
func NewCipher(key []byte) (*Cipher, error) {
var result Cipher
k := len(key)
if k < 4 || k > 56 {
return nil, KeySizeError(k)
}
initCipher(key, &result)
ExpandKey(key, &result)
return &result, nil
}
// NewSaltedCipher creates a returns a Cipher that folds a salt into its key
// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is
// sufficient and desirable. For bcrypt compatiblity, the key can be over 56
// bytes.
func NewSaltedCipher(key, salt []byte) (*Cipher, error) {
var result Cipher
k := len(key)
if k < 4 {
return nil, KeySizeError(k)
}
initCipher(key, &result)
expandKeyWithSalt(key, salt, &result)
return &result, nil
}
// BlockSize returns the Blowfish block size, 8 bytes.
// It is necessary to satisfy the Block interface in the
// package "crypto/cipher".
func (c *Cipher) BlockSize() int { return BlockSize }
// Encrypt encrypts the 8-byte buffer src using the key k
// and stores the result in dst.
// Note that for amounts of data larger than a block,
// it is not safe to just call Encrypt on successive blocks;
// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go).
func (c *Cipher) Encrypt(dst, src []byte) {
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
l, r = encryptBlock(l, r, c)
dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
}
// Decrypt decrypts the 8-byte buffer src using the key k
// and stores the result in dst.
func (c *Cipher) Decrypt(dst, src []byte) {
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
l, r = decryptBlock(l, r, c)
dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
}
// Reset zeros the key data, so that it will no longer
// appear in the process's memory.
func (c *Cipher) Reset() {
zero(c.p[0:])
zero(c.s0[0:])
zero(c.s1[0:])
zero(c.s2[0:])
zero(c.s3[0:])
}
func initCipher(key []byte, c *Cipher) {
copy(c.p[0:], p[0:])
copy(c.s0[0:], s0[0:])
copy(c.s1[0:], s1[0:])
copy(c.s2[0:], s2[0:])
copy(c.s3[0:], s3[0:])
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cast5
import (
"bytes"
"encoding/hex"
"testing"
)
// This test vector is taken from RFC 2144, App B.1.
// Since the other two test vectors are for reduced-round variants, we can't
// use them.
var basicTests = []struct {
key, plainText, cipherText string
}{
{
"0123456712345678234567893456789a",
"0123456789abcdef",
"238b4fe5847e44b2",
},
}
func TestBasic(t *testing.T) {
for i, test := range basicTests {
key, _ := hex.DecodeString(test.key)
plainText, _ := hex.DecodeString(test.plainText)
expected, _ := hex.DecodeString(test.cipherText)
c, err := NewCipher(key)
if err != nil {
t.Errorf("#%d: failed to create Cipher: %s", i, err)
continue
}
var cipherText [BlockSize]byte
c.Encrypt(cipherText[:], plainText)
if !bytes.Equal(cipherText[:], expected) {
t.Errorf("#%d: got:%x want:%x", i, cipherText, expected)
}
var plainTextAgain [BlockSize]byte
c.Decrypt(plainTextAgain[:], cipherText[:])
if !bytes.Equal(plainTextAgain[:], plainText) {
t.Errorf("#%d: got:%x want:%x", i, plainTextAgain, plainText)
}
}
}
// TestFull performs the test specified in RFC 2144, App B.2.
// However, due to the length of time taken, it's disabled here and a more
// limited version is included, below.
func TestFull(t *testing.T) {
// This is too slow for normal testing
return
a, b := iterate(1000000)
const expectedA = "eea9d0a249fd3ba6b3436fb89d6dca92"
const expectedB = "b2c95eb00c31ad7180ac05b8e83d696e"
if hex.EncodeToString(a) != expectedA {
t.Errorf("a: got:%x want:%s", a, expectedA)
}
if hex.EncodeToString(b) != expectedB {
t.Errorf("b: got:%x want:%s", b, expectedB)
}
}
func iterate(iterations int) ([]byte, []byte) {
const initValueHex = "0123456712345678234567893456789a"
initValue, _ := hex.DecodeString(initValueHex)
var a, b [16]byte
copy(a[:], initValue)
copy(b[:], initValue)
for i := 0; i < iterations; i++ {
c, _ := NewCipher(b[:])
c.Encrypt(a[:8], a[:8])
c.Encrypt(a[8:], a[8:])
c, _ = NewCipher(a[:])
c.Encrypt(b[:8], b[:8])
c.Encrypt(b[8:], b[8:])
}
return a[:], b[:]
}
func TestLimited(t *testing.T) {
a, b := iterate(1000)
const expectedA = "23f73b14b02a2ad7dfb9f2c35644798d"
const expectedB = "e5bf37eff14c456a40b21ce369370a9f"
if hex.EncodeToString(a) != expectedA {
t.Errorf("a: got:%x want:%s", a, expectedA)
}
if hex.EncodeToString(b) != expectedB {
t.Errorf("b: got:%x want:%s", b, expectedB)
}
}
...@@ -35,11 +35,11 @@ func (invalidPublicKeyError) Error() string { ...@@ -35,11 +35,11 @@ func (invalidPublicKeyError) Error() string {
return "crypto/dsa: invalid public key" return "crypto/dsa: invalid public key"
} }
// InvalidPublicKeyError results when a public key is not usable by this code. // ErrInvalidPublicKey results when a public key is not usable by this code.
// FIPS is quite strict about the format of DSA keys, but other code may be // FIPS is quite strict about the format of DSA keys, but other code may be
// less so. Thus, when using keys which may have been generated by other code, // less so. Thus, when using keys which may have been generated by other code,
// this error must be handled. // this error must be handled.
var InvalidPublicKeyError = invalidPublicKeyError(0) var ErrInvalidPublicKey error = invalidPublicKeyError(0)
// ParameterSizes is a enumeration of the acceptable bit lengths of the primes // ParameterSizes is a enumeration of the acceptable bit lengths of the primes
// in a set of DSA parameters. See FIPS 186-3, section 4.2. // in a set of DSA parameters. See FIPS 186-3, section 4.2.
...@@ -194,7 +194,7 @@ func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err err ...@@ -194,7 +194,7 @@ func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err err
n := priv.Q.BitLen() n := priv.Q.BitLen()
if n&7 != 0 { if n&7 != 0 {
err = InvalidPublicKeyError err = ErrInvalidPublicKey
return return
} }
n >>= 3 n >>= 3
......
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package md4 implements the MD4 hash algorithm as defined in RFC 1320.
package md4
import (
"crypto"
"hash"
)
func init() {
crypto.RegisterHash(crypto.MD4, New)
}
// The size of an MD4 checksum in bytes.
const Size = 16
// The blocksize of MD4 in bytes.
const BlockSize = 64
const (
_Chunk = 64
_Init0 = 0x67452301
_Init1 = 0xEFCDAB89
_Init2 = 0x98BADCFE
_Init3 = 0x10325476
)
// digest represents the partial evaluation of a checksum.
type digest struct {
s [4]uint32
x [_Chunk]byte
nx int
len uint64
}
func (d *digest) Reset() {
d.s[0] = _Init0
d.s[1] = _Init1
d.s[2] = _Init2
d.s[3] = _Init3
d.nx = 0
d.len = 0
}
// New returns a new hash.Hash computing the MD4 checksum.
func New() hash.Hash {
d := new(digest)
d.Reset()
return d
}
func (d *digest) Size() int { return Size }
func (d *digest) BlockSize() int { return BlockSize }
func (d *digest) Write(p []byte) (nn int, err error) {
nn = len(p)
d.len += uint64(nn)
if d.nx > 0 {
n := len(p)
if n > _Chunk-d.nx {
n = _Chunk - d.nx
}
for i := 0; i < n; i++ {
d.x[d.nx+i] = p[i]
}
d.nx += n
if d.nx == _Chunk {
_Block(d, d.x[0:])
d.nx = 0
}
p = p[n:]
}
n := _Block(d, p)
p = p[n:]
if len(p) > 0 {
d.nx = copy(d.x[:], p)
}
return
}
func (d0 *digest) Sum(in []byte) []byte {
// Make a copy of d0, so that caller can keep writing and summing.
d := new(digest)
*d = *d0
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
len := d.len
var tmp [64]byte
tmp[0] = 0x80
if len%64 < 56 {
d.Write(tmp[0 : 56-len%64])
} else {
d.Write(tmp[0 : 64+56-len%64])
}
// Length in bits.
len <<= 3
for i := uint(0); i < 8; i++ {
tmp[i] = byte(len >> (8 * i))
}
d.Write(tmp[0:8])
if d.nx != 0 {
panic("d.nx != 0")
}
for _, s := range d.s {
in = append(in, byte(s>>0))
in = append(in, byte(s>>8))
in = append(in, byte(s>>16))
in = append(in, byte(s>>24))
}
return in
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package md4
import (
"fmt"
"io"
"testing"
)
type md4Test struct {
out string
in string
}
var golden = []md4Test{
{"31d6cfe0d16ae931b73c59d7e0c089c0", ""},
{"bde52cb31de33e46245e05fbdbd6fb24", "a"},
{"ec388dd78999dfc7cf4632465693b6bf", "ab"},
{"a448017aaf21d8525fc10ae87aa6729d", "abc"},
{"41decd8f579255c5200f86a4bb3ba740", "abcd"},
{"9803f4a34e8eb14f96adba49064a0c41", "abcde"},
{"804e7f1c2586e50b49ac65db5b645131", "abcdef"},
{"752f4adfe53d1da0241b5bc216d098fc", "abcdefg"},
{"ad9daf8d49d81988590a6f0e745d15dd", "abcdefgh"},
{"1e4e28b05464316b56402b3815ed2dfd", "abcdefghi"},
{"dc959c6f5d6f9e04e4380777cc964b3d", "abcdefghij"},
{"1b5701e265778898ef7de5623bbe7cc0", "Discard medicine more than two years old."},
{"d7f087e090fe7ad4a01cb59dacc9a572", "He who has a shady past knows that nice guys finish last."},
{"a6f8fd6df617c72837592fc3570595c9", "I wouldn't marry him with a ten foot pole."},
{"c92a84a9526da8abc240c05d6b1a1ce0", "Free! Free!/A trip/to Mars/for 900/empty jars/Burma Shave"},
{"f6013160c4dcb00847069fee3bb09803", "The days of the digital watch are numbered. -Tom Stoppard"},
{"2c3bb64f50b9107ed57640fe94bec09f", "Nepal premier won't resign."},
{"45b7d8a32c7806f2f7f897332774d6e4", "For every action there is an equal and opposite government program."},
{"b5b4f9026b175c62d7654bdc3a1cd438", "His money is twice tainted: 'taint yours and 'taint mine."},
{"caf44e80f2c20ce19b5ba1cab766e7bd", "There is no reason for any individual to have a computer in their home. -Ken Olsen, 1977"},
{"191fae6707f496aa54a6bce9f2ecf74d", "It's a tiny change to the code and not completely disgusting. - Bob Manchek"},
{"9ddc753e7a4ccee6081cd1b45b23a834", "size: a.out: bad magic"},
{"8d050f55b1cadb9323474564be08a521", "The major problem is with sendmail. -Mark Horton"},
{"ad6e2587f74c3e3cc19146f6127fa2e3", "Give me a rock, paper and scissors and I will move the world. CCFestoon"},
{"1d616d60a5fabe85589c3f1566ca7fca", "If the enemy is within range, then so are you."},
{"aec3326a4f496a2ced65a1963f84577f", "It's well we cannot hear the screams/That we create in others' dreams."},
{"77b4fd762d6b9245e61c50bf6ebf118b", "You remind me of a TV show, but that's all right: I watch it anyway."},
{"e8f48c726bae5e516f6ddb1a4fe62438", "C is as portable as Stonehedge!!"},
{"a3a84366e7219e887423b01f9be7166e", "Even if I could be Shakespeare, I think I should still choose to be Faraday. - A. Huxley"},
{"a6b7aa35157e984ef5d9b7f32e5fbb52", "The fugacity of a constituent in a mixture of gases at a given temperature is proportional to its mole fraction. Lewis-Randall Rule"},
{"75661f0545955f8f9abeeb17845f3fd6", "How can you write a big system without C++? -Paul Glick"},
}
func TestGolden(t *testing.T) {
for i := 0; i < len(golden); i++ {
g := golden[i]
c := New()
for j := 0; j < 3; j++ {
if j < 2 {
io.WriteString(c, g.in)
} else {
io.WriteString(c, g.in[0:len(g.in)/2])
c.Sum(nil)
io.WriteString(c, g.in[len(g.in)/2:])
}
s := fmt.Sprintf("%x", c.Sum(nil))
if s != g.out {
t.Fatalf("md4[%d](%s) = %s want %s", j, g.in, s, g.out)
}
c.Reset()
}
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// MD4 block step.
// In its own file so that a faster assembly or C version
// can be substituted easily.
package md4
var shift1 = []uint{3, 7, 11, 19}
var shift2 = []uint{3, 5, 9, 13}
var shift3 = []uint{3, 9, 11, 15}
var xIndex2 = []uint{0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15}
var xIndex3 = []uint{0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15}
func _Block(dig *digest, p []byte) int {
a := dig.s[0]
b := dig.s[1]
c := dig.s[2]
d := dig.s[3]
n := 0
var X [16]uint32
for len(p) >= _Chunk {
aa, bb, cc, dd := a, b, c, d
j := 0
for i := 0; i < 16; i++ {
X[i] = uint32(p[j]) | uint32(p[j+1])<<8 | uint32(p[j+2])<<16 | uint32(p[j+3])<<24
j += 4
}
// If this needs to be made faster in the future,
// the usual trick is to unroll each of these
// loops by a factor of 4; that lets you replace
// the shift[] lookups with constants and,
// with suitable variable renaming in each
// unrolled body, delete the a, b, c, d = d, a, b, c
// (or you can let the optimizer do the renaming).
//
// The index variables are uint so that % by a power
// of two can be optimized easily by a compiler.
// Round 1.
for i := uint(0); i < 16; i++ {
x := i
s := shift1[i%4]
f := ((c ^ d) & b) ^ d
a += f + X[x]
a = a<<s | a>>(32-s)
a, b, c, d = d, a, b, c
}
// Round 2.
for i := uint(0); i < 16; i++ {
x := xIndex2[i]
s := shift2[i%4]
g := (b & c) | (b & d) | (c & d)
a += g + X[x] + 0x5a827999
a = a<<s | a>>(32-s)
a, b, c, d = d, a, b, c
}
// Round 3.
for i := uint(0); i < 16; i++ {
x := xIndex3[i]
s := shift3[i%4]
h := b ^ c ^ d
a += h + X[x] + 0x6ed9eba1
a = a<<s | a>>(32-s)
a, b, c, d = d, a, b, c
}
a += aa
b += bb
c += cc
d += dd
p = p[_Chunk:]
n += _Chunk
}
dig.s[0] = a
dig.s[1] = b
dig.s[2] = c
dig.s[3] = d
return n
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ocsp parses OCSP responses as specified in RFC 2560. OCSP responses
// are signed messages attesting to the validity of a certificate for a small
// period of time. This is used to manage revocation for X.509 certificates.
package ocsp
import (
"crypto"
"crypto/rsa"
_ "crypto/sha1"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"time"
)
var idPKIXOCSPBasic = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 5, 5, 7, 48, 1, 1})
var idSHA1WithRSA = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 1, 5})
// These are internal structures that reflect the ASN.1 structure of an OCSP
// response. See RFC 2560, section 4.2.
const (
ocspSuccess = 0
ocspMalformed = 1
ocspInternalError = 2
ocspTryLater = 3
ocspSigRequired = 4
ocspUnauthorized = 5
)
type certID struct {
HashAlgorithm pkix.AlgorithmIdentifier
NameHash []byte
IssuerKeyHash []byte
SerialNumber asn1.RawValue
}
type responseASN1 struct {
Status asn1.Enumerated
Response responseBytes `asn1:"explicit,tag:0"`
}
type responseBytes struct {
ResponseType asn1.ObjectIdentifier
Response []byte
}
type basicResponse struct {
TBSResponseData responseData
SignatureAlgorithm pkix.AlgorithmIdentifier
Signature asn1.BitString
Certificates []asn1.RawValue `asn1:"explicit,tag:0,optional"`
}
type responseData struct {
Raw asn1.RawContent
Version int `asn1:"optional,default:1,explicit,tag:0"`
RequestorName pkix.RDNSequence `asn1:"optional,explicit,tag:1"`
KeyHash []byte `asn1:"optional,explicit,tag:2"`
ProducedAt time.Time
Responses []singleResponse
}
type singleResponse struct {
CertID certID
Good asn1.Flag `asn1:"explicit,tag:0,optional"`
Revoked revokedInfo `asn1:"explicit,tag:1,optional"`
Unknown asn1.Flag `asn1:"explicit,tag:2,optional"`
ThisUpdate time.Time
NextUpdate time.Time `asn1:"explicit,tag:0,optional"`
}
type revokedInfo struct {
RevocationTime time.Time
Reason int `asn1:"explicit,tag:0,optional"`
}
// This is the exposed reflection of the internal OCSP structures.
const (
// Good means that the certificate is valid.
Good = iota
// Revoked means that the certificate has been deliberately revoked.
Revoked = iota
// Unknown means that the OCSP responder doesn't know about the certificate.
Unknown = iota
// ServerFailed means that the OCSP responder failed to process the request.
ServerFailed = iota
)
// Response represents an OCSP response. See RFC 2560.
type Response struct {
// Status is one of {Good, Revoked, Unknown, ServerFailed}
Status int
SerialNumber []byte
ProducedAt, ThisUpdate, NextUpdate, RevokedAt time.Time
RevocationReason int
Certificate *x509.Certificate
}
// ParseError results from an invalid OCSP response.
type ParseError string
func (p ParseError) Error() string {
return string(p)
}
// ParseResponse parses an OCSP response in DER form. It only supports
// responses for a single certificate and only those using RSA signatures.
// Non-RSA responses will result in an x509.UnsupportedAlgorithmError.
// Signature errors or parse failures will result in a ParseError.
func ParseResponse(bytes []byte) (*Response, error) {
var resp responseASN1
rest, err := asn1.Unmarshal(bytes, &resp)
if err != nil {
return nil, err
}
if len(rest) > 0 {
return nil, ParseError("trailing data in OCSP response")
}
ret := new(Response)
if resp.Status != ocspSuccess {
ret.Status = ServerFailed
return ret, nil
}
if !resp.Response.ResponseType.Equal(idPKIXOCSPBasic) {
return nil, ParseError("bad OCSP response type")
}
var basicResp basicResponse
rest, err = asn1.Unmarshal(resp.Response.Response, &basicResp)
if err != nil {
return nil, err
}
if len(basicResp.Certificates) != 1 {
return nil, ParseError("OCSP response contains bad number of certificates")
}
if len(basicResp.TBSResponseData.Responses) != 1 {
return nil, ParseError("OCSP response contains bad number of responses")
}
ret.Certificate, err = x509.ParseCertificate(basicResp.Certificates[0].FullBytes)
if err != nil {
return nil, err
}
if ret.Certificate.PublicKeyAlgorithm != x509.RSA || !basicResp.SignatureAlgorithm.Algorithm.Equal(idSHA1WithRSA) {
return nil, x509.UnsupportedAlgorithmError{}
}
hashType := crypto.SHA1
h := hashType.New()
pub := ret.Certificate.PublicKey.(*rsa.PublicKey)
h.Write(basicResp.TBSResponseData.Raw)
digest := h.Sum(nil)
signature := basicResp.Signature.RightAlign()
if rsa.VerifyPKCS1v15(pub, hashType, digest, signature) != nil {
return nil, ParseError("bad OCSP signature")
}
r := basicResp.TBSResponseData.Responses[0]
ret.SerialNumber = r.CertID.SerialNumber.Bytes
switch {
case bool(r.Good):
ret.Status = Good
case bool(r.Unknown):
ret.Status = Unknown
default:
ret.Status = Revoked
ret.RevokedAt = r.Revoked.RevocationTime
ret.RevocationReason = r.Revoked.Reason
}
ret.ProducedAt = basicResp.TBSResponseData.ProducedAt
ret.ThisUpdate = r.ThisUpdate
ret.NextUpdate = r.NextUpdate
return ret, nil
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ocsp
import (
"bytes"
"encoding/hex"
"reflect"
"testing"
"time"
)
func TestOCSPDecode(t *testing.T) {
responseBytes, _ := hex.DecodeString(ocspResponseHex)
resp, err := ParseResponse(responseBytes)
if err != nil {
t.Error(err)
}
expected := Response{
Status: 0,
SerialNumber: []byte{0x1, 0xd0, 0xfa},
RevocationReason: 0,
ThisUpdate: time.Date(2010, 7, 7, 15, 1, 5, 0, time.UTC),
NextUpdate: time.Date(2010, 7, 7, 18, 35, 17, 0, time.UTC),
}
if !reflect.DeepEqual(resp.ThisUpdate, expected.ThisUpdate) {
t.Errorf("resp.ThisUpdate: got %d, want %d", resp.ThisUpdate, expected.ThisUpdate)
}
if !reflect.DeepEqual(resp.NextUpdate, expected.NextUpdate) {
t.Errorf("resp.NextUpdate: got %d, want %d", resp.NextUpdate, expected.NextUpdate)
}
if resp.Status != expected.Status {
t.Errorf("resp.Status: got %d, want %d", resp.Status, expected.Status)
}
if !bytes.Equal(resp.SerialNumber, expected.SerialNumber) {
t.Errorf("resp.SerialNumber: got %x, want %x", resp.SerialNumber, expected.SerialNumber)
}
if resp.RevocationReason != expected.RevocationReason {
t.Errorf("resp.RevocationReason: got %d, want %d", resp.RevocationReason, expected.RevocationReason)
}
}
// This OCSP response was taken from Thawte's public OCSP responder.
// To recreate:
// $ openssl s_client -tls1 -showcerts -servername www.google.com -connect www.google.com:443
// Copy and paste the first certificate into /tmp/cert.crt and the second into
// /tmp/intermediate.crt
// $ openssl ocsp -issuer /tmp/intermediate.crt -cert /tmp/cert.crt -url http://ocsp.thawte.com -resp_text -respout /tmp/ocsp.der
// Then hex encode the result:
// $ python -c 'print file("/tmp/ocsp.der", "r").read().encode("hex")'
const ocspResponseHex = "308206bc0a0100a08206b5308206b106092b0601050507300101048206a23082069e3081" +
"c9a14e304c310b300906035504061302494c31163014060355040a130d5374617274436f" +
"6d204c74642e312530230603550403131c5374617274436f6d20436c6173732031204f43" +
"5350205369676e6572180f32303130303730373137333531375a30663064303c30090605" +
"2b0e03021a050004146568874f40750f016a3475625e1f5c93e5a26d580414eb4234d098" +
"b0ab9ff41b6b08f7cc642eef0e2c45020301d0fa8000180f323031303037303731353031" +
"30355aa011180f32303130303730373138333531375a300d06092a864886f70d01010505" +
"000382010100ab557ff070d1d7cebbb5f0ec91a15c3fed22eb2e1b8244f1b84545f013a4" +
"fb46214c5e3fbfbebb8a56acc2b9db19f68fd3c3201046b3824d5ba689f99864328710cb" +
"467195eb37d84f539e49f859316b32964dc3e47e36814ce94d6c56dd02733b1d0802f7ff" +
"4eebdbbd2927dcf580f16cbc290f91e81b53cb365e7223f1d6e20a88ea064104875e0145" +
"672b20fc14829d51ca122f5f5d77d3ad6c83889c55c7dc43680ba2fe3cef8b05dbcabdc0" +
"d3e09aaf9725597f8c858c2fa38c0d6aed2e6318194420dd1a1137445d13e1c97ab47896" +
"17a4e08925f46f867b72e3a4dc1f08cb870b2b0717f7207faa0ac512e628a029aba7457a" +
"e63dcf3281e2162d9349a08204ba308204b6308204b23082039aa003020102020101300d" +
"06092a864886f70d010105050030818c310b300906035504061302494c31163014060355" +
"040a130d5374617274436f6d204c74642e312b3029060355040b13225365637572652044" +
"69676974616c204365727469666963617465205369676e696e6731383036060355040313" +
"2f5374617274436f6d20436c6173732031205072696d61727920496e7465726d65646961" +
"746520536572766572204341301e170d3037313032353030323330365a170d3132313032" +
"333030323330365a304c310b300906035504061302494c31163014060355040a130d5374" +
"617274436f6d204c74642e312530230603550403131c5374617274436f6d20436c617373" +
"2031204f435350205369676e657230820122300d06092a864886f70d0101010500038201" +
"0f003082010a0282010100b9561b4c45318717178084e96e178df2255e18ed8d8ecc7c2b" +
"7b51a6c1c2e6bf0aa3603066f132fe10ae97b50e99fa24b83fc53dd2777496387d14e1c3" +
"a9b6a4933e2ac12413d085570a95b8147414a0bc007c7bcf222446ef7f1a156d7ea1c577" +
"fc5f0facdfd42eb0f5974990cb2f5cefebceef4d1bdc7ae5c1075c5a99a93171f2b0845b" +
"4ff0864e973fcfe32f9d7511ff87a3e943410c90a4493a306b6944359340a9ca96f02b66" +
"ce67f028df2980a6aaee8d5d5d452b8b0eb93f923cc1e23fcccbdbe7ffcb114d08fa7a6a" +
"3c404f825d1a0e715935cf623a8c7b59670014ed0622f6089a9447a7a19010f7fe58f841" +
"29a2765ea367824d1c3bb2fda308530203010001a382015c30820158300c0603551d1301" +
"01ff04023000300b0603551d0f0404030203a8301e0603551d250417301506082b060105" +
"0507030906092b0601050507300105301d0603551d0e0416041445e0a36695414c5dd449" +
"bc00e33cdcdbd2343e173081a80603551d230481a030819d8014eb4234d098b0ab9ff41b" +
"6b08f7cc642eef0e2c45a18181a47f307d310b300906035504061302494c311630140603" +
"55040a130d5374617274436f6d204c74642e312b3029060355040b132253656375726520" +
"4469676974616c204365727469666963617465205369676e696e67312930270603550403" +
"13205374617274436f6d2043657274696669636174696f6e20417574686f726974798201" +
"0a30230603551d12041c301a8618687474703a2f2f7777772e737461727473736c2e636f" +
"6d2f302c06096086480186f842010d041f161d5374617274436f6d205265766f63617469" +
"6f6e20417574686f72697479300d06092a864886f70d01010505000382010100182d2215" +
"8f0fc0291324fa8574c49bb8ff2835085adcbf7b7fc4191c397ab6951328253fffe1e5ec" +
"2a7da0d50fca1a404e6968481366939e666c0a6209073eca57973e2fefa9ed1718e8176f" +
"1d85527ff522c08db702e3b2b180f1cbff05d98128252cf0f450f7dd2772f4188047f19d" +
"c85317366f94bc52d60f453a550af58e308aaab00ced33040b62bf37f5b1ab2a4f7f0f80" +
"f763bf4d707bc8841d7ad9385ee2a4244469260b6f2bf085977af9074796048ecc2f9d48" +
"a1d24ce16e41a9941568fec5b42771e118f16c106a54ccc339a4b02166445a167902e75e" +
"6d8620b0825dcd18a069b90fd851d10fa8effd409deec02860d26d8d833f304b10669b42"
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is
// very similar to PEM except that it has an additional CRC checksum.
package armor
import (
"bufio"
"bytes"
"crypto/openpgp/errors"
"encoding/base64"
"io"
)
// A Block represents an OpenPGP armored structure.
//
// The encoded form is:
// -----BEGIN Type-----
// Headers
//
// base64-encoded Bytes
// '=' base64 encoded checksum
// -----END Type-----
// where Headers is a possibly empty sequence of Key: Value lines.
//
// Since the armored data can be very large, this package presents a streaming
// interface.
type Block struct {
Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE").
Header map[string]string // Optional headers.
Body io.Reader // A Reader from which the contents can be read
lReader lineReader
oReader openpgpReader
}
var ArmorCorrupt error = errors.StructuralError("armor invalid")
const crc24Init = 0xb704ce
const crc24Poly = 0x1864cfb
const crc24Mask = 0xffffff
// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1
func crc24(crc uint32, d []byte) uint32 {
for _, b := range d {
crc ^= uint32(b) << 16
for i := 0; i < 8; i++ {
crc <<= 1
if crc&0x1000000 != 0 {
crc ^= crc24Poly
}
}
}
return crc
}
var armorStart = []byte("-----BEGIN ")
var armorEnd = []byte("-----END ")
var armorEndOfLine = []byte("-----")
// lineReader wraps a line based reader. It watches for the end of an armor
// block and records the expected CRC value.
type lineReader struct {
in *bufio.Reader
buf []byte
eof bool
crc uint32
}
func (l *lineReader) Read(p []byte) (n int, err error) {
if l.eof {
return 0, io.EOF
}
if len(l.buf) > 0 {
n = copy(p, l.buf)
l.buf = l.buf[n:]
return
}
line, isPrefix, err := l.in.ReadLine()
if err != nil {
return
}
if isPrefix {
return 0, ArmorCorrupt
}
if len(line) == 5 && line[0] == '=' {
// This is the checksum line
var expectedBytes [3]byte
var m int
m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:])
if m != 3 || err != nil {
return
}
l.crc = uint32(expectedBytes[0])<<16 |
uint32(expectedBytes[1])<<8 |
uint32(expectedBytes[2])
line, _, err = l.in.ReadLine()
if err != nil && err != io.EOF {
return
}
if !bytes.HasPrefix(line, armorEnd) {
return 0, ArmorCorrupt
}
l.eof = true
return 0, io.EOF
}
if len(line) > 64 {
return 0, ArmorCorrupt
}
n = copy(p, line)
bytesToSave := len(line) - n
if bytesToSave > 0 {
if cap(l.buf) < bytesToSave {
l.buf = make([]byte, 0, bytesToSave)
}
l.buf = l.buf[0:bytesToSave]
copy(l.buf, line[n:])
}
return
}
// openpgpReader passes Read calls to the underlying base64 decoder, but keeps
// a running CRC of the resulting data and checks the CRC against the value
// found by the lineReader at EOF.
type openpgpReader struct {
lReader *lineReader
b64Reader io.Reader
currentCRC uint32
}
func (r *openpgpReader) Read(p []byte) (n int, err error) {
n, err = r.b64Reader.Read(p)
r.currentCRC = crc24(r.currentCRC, p[:n])
if err == io.EOF {
if r.lReader.crc != uint32(r.currentCRC&crc24Mask) {
return 0, ArmorCorrupt
}
}
return
}
// Decode reads a PGP armored block from the given Reader. It will ignore
// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The
// given Reader is not usable after calling this function: an arbitrary amount
// of data may have been read past the end of the block.
func Decode(in io.Reader) (p *Block, err error) {
r, _ := bufio.NewReaderSize(in, 100)
var line []byte
ignoreNext := false
TryNextBlock:
p = nil
// Skip leading garbage
for {
ignoreThis := ignoreNext
line, ignoreNext, err = r.ReadLine()
if err != nil {
return
}
if ignoreNext || ignoreThis {
continue
}
line = bytes.TrimSpace(line)
if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) {
break
}
}
p = new(Block)
p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)])
p.Header = make(map[string]string)
nextIsContinuation := false
var lastKey string
// Read headers
for {
isContinuation := nextIsContinuation
line, nextIsContinuation, err = r.ReadLine()
if err != nil {
p = nil
return
}
if isContinuation {
p.Header[lastKey] += string(line)
continue
}
line = bytes.TrimSpace(line)
if len(line) == 0 {
break
}
i := bytes.Index(line, []byte(": "))
if i == -1 {
goto TryNextBlock
}
lastKey = string(line[:i])
p.Header[lastKey] = string(line[i+2:])
}
p.lReader.in = r
p.oReader.currentCRC = crc24Init
p.oReader.lReader = &p.lReader
p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader)
p.Body = &p.oReader
return
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package armor
import (
"bytes"
"hash/adler32"
"io/ioutil"
"testing"
)
func TestDecodeEncode(t *testing.T) {
buf := bytes.NewBuffer([]byte(armorExample1))
result, err := Decode(buf)
if err != nil {
t.Error(err)
}
expectedType := "PGP SIGNATURE"
if result.Type != expectedType {
t.Errorf("result.Type: got:%s want:%s", result.Type, expectedType)
}
if len(result.Header) != 1 {
t.Errorf("len(result.Header): got:%d want:1", len(result.Header))
}
v, ok := result.Header["Version"]
if !ok || v != "GnuPG v1.4.10 (GNU/Linux)" {
t.Errorf("result.Header: got:%#v", result.Header)
}
contents, err := ioutil.ReadAll(result.Body)
if err != nil {
t.Error(err)
}
if adler32.Checksum(contents) != 0x27b144be {
t.Errorf("contents: got: %x", contents)
}
buf = bytes.NewBuffer(nil)
w, err := Encode(buf, result.Type, result.Header)
if err != nil {
t.Error(err)
}
_, err = w.Write(contents)
if err != nil {
t.Error(err)
}
w.Close()
if !bytes.Equal(buf.Bytes(), []byte(armorExample1)) {
t.Errorf("got: %s\nwant: %s", string(buf.Bytes()), armorExample1)
}
}
func TestLongHeader(t *testing.T) {
buf := bytes.NewBuffer([]byte(armorLongLine))
result, err := Decode(buf)
if err != nil {
t.Error(err)
return
}
value, ok := result.Header["Version"]
if !ok {
t.Errorf("missing Version header")
}
if value != longValueExpected {
t.Errorf("got: %s want: %s", value, longValueExpected)
}
}
const armorExample1 = `-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.10 (GNU/Linux)
iJwEAAECAAYFAk1Fv/0ACgkQo01+GMIMMbsYTwQAiAw+QAaNfY6WBdplZ/uMAccm
4g+81QPmTSGHnetSb6WBiY13kVzK4HQiZH8JSkmmroMLuGeJwsRTEL4wbjRyUKEt
p1xwUZDECs234F1xiG5enc5SGlRtP7foLBz9lOsjx+LEcA4sTl5/2eZR9zyFZqWW
TxRjs+fJCIFuo71xb1g=
=/teI
-----END PGP SIGNATURE-----`
const armorLongLine = `-----BEGIN PGP SIGNATURE-----
Version: 0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz
iQEcBAABAgAGBQJMtFESAAoJEKsQXJGvOPsVj40H/1WW6jaMXv4BW+1ueDSMDwM8
kx1fLOXbVM5/Kn5LStZNt1jWWnpxdz7eq3uiqeCQjmqUoRde3YbB2EMnnwRbAhpp
cacnAvy9ZQ78OTxUdNW1mhX5bS6q1MTEJnl+DcyigD70HG/yNNQD7sOPMdYQw0TA
byQBwmLwmTsuZsrYqB68QyLHI+DUugn+kX6Hd2WDB62DKa2suoIUIHQQCd/ofwB3
WfCYInXQKKOSxu2YOg2Eb4kLNhSMc1i9uKUWAH+sdgJh7NBgdoE4MaNtBFkHXRvv
okWuf3+xA9ksp1npSY/mDvgHijmjvtpRDe6iUeqfCn8N9u9CBg8geANgaG8+QA4=
=wfQG
-----END PGP SIGNATURE-----`
const longValueExpected = "0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz"
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package armor
import (
"encoding/base64"
"io"
)
var armorHeaderSep = []byte(": ")
var blockEnd = []byte("\n=")
var newline = []byte("\n")
var armorEndOfLineOut = []byte("-----\n")
// writeSlices writes its arguments to the given Writer.
func writeSlices(out io.Writer, slices ...[]byte) (err error) {
for _, s := range slices {
_, err = out.Write(s)
if err != nil {
return err
}
}
return
}
// lineBreaker breaks data across several lines, all of the same byte length
// (except possibly the last). Lines are broken with a single '\n'.
type lineBreaker struct {
lineLength int
line []byte
used int
out io.Writer
haveWritten bool
}
func newLineBreaker(out io.Writer, lineLength int) *lineBreaker {
return &lineBreaker{
lineLength: lineLength,
line: make([]byte, lineLength),
used: 0,
out: out,
}
}
func (l *lineBreaker) Write(b []byte) (n int, err error) {
n = len(b)
if n == 0 {
return
}
if l.used == 0 && l.haveWritten {
_, err = l.out.Write([]byte{'\n'})
if err != nil {
return
}
}
if l.used+len(b) < l.lineLength {
l.used += copy(l.line[l.used:], b)
return
}
l.haveWritten = true
_, err = l.out.Write(l.line[0:l.used])
if err != nil {
return
}
excess := l.lineLength - l.used
l.used = 0
_, err = l.out.Write(b[0:excess])
if err != nil {
return
}
_, err = l.Write(b[excess:])
return
}
func (l *lineBreaker) Close() (err error) {
if l.used > 0 {
_, err = l.out.Write(l.line[0:l.used])
if err != nil {
return
}
}
return
}
// encoding keeps track of a running CRC24 over the data which has been written
// to it and outputs a OpenPGP checksum when closed, followed by an armor
// trailer.
//
// It's built into a stack of io.Writers:
// encoding -> base64 encoder -> lineBreaker -> out
type encoding struct {
out io.Writer
breaker *lineBreaker
b64 io.WriteCloser
crc uint32
blockType []byte
}
func (e *encoding) Write(data []byte) (n int, err error) {
e.crc = crc24(e.crc, data)
return e.b64.Write(data)
}
func (e *encoding) Close() (err error) {
err = e.b64.Close()
if err != nil {
return
}
e.breaker.Close()
var checksumBytes [3]byte
checksumBytes[0] = byte(e.crc >> 16)
checksumBytes[1] = byte(e.crc >> 8)
checksumBytes[2] = byte(e.crc)
var b64ChecksumBytes [4]byte
base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:])
return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine)
}
// Encode returns a WriteCloser which will encode the data written to it in
// OpenPGP armor.
func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) {
bType := []byte(blockType)
err = writeSlices(out, armorStart, bType, armorEndOfLineOut)
if err != nil {
return
}
for k, v := range headers {
err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline)
if err != nil {
return
}
}
_, err = out.Write(newline)
if err != nil {
return
}
e := &encoding{
out: out,
breaker: newLineBreaker(out, 64),
crc: crc24Init,
blockType: bType,
}
e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker)
return e, nil
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package openpgp
import "hash"
// NewCanonicalTextHash reformats text written to it into the canonical
// form and then applies the hash h. See RFC 4880, section 5.2.1.
func NewCanonicalTextHash(h hash.Hash) hash.Hash {
return &canonicalTextHash{h, 0}
}
type canonicalTextHash struct {
h hash.Hash
s int
}
var newline = []byte{'\r', '\n'}
func (cth *canonicalTextHash) Write(buf []byte) (int, error) {
start := 0
for i, c := range buf {
switch cth.s {
case 0:
if c == '\r' {
cth.s = 1
} else if c == '\n' {
cth.h.Write(buf[start:i])
cth.h.Write(newline)
start = i + 1
}
case 1:
cth.s = 0
}
}
cth.h.Write(buf[start:])
return len(buf), nil
}
func (cth *canonicalTextHash) Sum(in []byte) []byte {
return cth.h.Sum(in)
}
func (cth *canonicalTextHash) Reset() {
cth.h.Reset()
cth.s = 0
}
func (cth *canonicalTextHash) Size() int {
return cth.h.Size()
}
func (cth *canonicalTextHash) BlockSize() int {
return cth.h.BlockSize()
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package openpgp
import (
"bytes"
"testing"
)
type recordingHash struct {
buf *bytes.Buffer
}
func (r recordingHash) Write(b []byte) (n int, err error) {
return r.buf.Write(b)
}
func (r recordingHash) Sum(in []byte) []byte {
return append(in, r.buf.Bytes()...)
}
func (r recordingHash) Reset() {
panic("shouldn't be called")
}
func (r recordingHash) Size() int {
panic("shouldn't be called")
}
func (r recordingHash) BlockSize() int {
panic("shouldn't be called")
}
func testCanonicalText(t *testing.T, input, expected string) {
r := recordingHash{bytes.NewBuffer(nil)}
c := NewCanonicalTextHash(r)
c.Write([]byte(input))
result := c.Sum(nil)
if expected != string(result) {
t.Errorf("input: %x got: %x want: %x", input, result, expected)
}
}
func TestCanonicalText(t *testing.T) {
testCanonicalText(t, "foo\n", "foo\r\n")
testCanonicalText(t, "foo", "foo")
testCanonicalText(t, "foo\r\n", "foo\r\n")
testCanonicalText(t, "foo\r\nbar", "foo\r\nbar")
testCanonicalText(t, "foo\r\nbar\n\n", "foo\r\nbar\r\n\r\n")
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package elgamal implements ElGamal encryption, suitable for OpenPGP,
// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on
// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31,
// n. 4, 1985, pp. 469-472.
//
// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it
// unsuitable for other protocols. RSA should be used in preference in any
// case.
package elgamal
import (
"crypto/rand"
"crypto/subtle"
"errors"
"io"
"math/big"
)
// PublicKey represents an ElGamal public key.
type PublicKey struct {
G, P, Y *big.Int
}
// PrivateKey represents an ElGamal private key.
type PrivateKey struct {
PublicKey
X *big.Int
}
// Encrypt encrypts the given message to the given public key. The result is a
// pair of integers. Errors can result from reading random, or because msg is
// too large to be encrypted to the public key.
func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) {
pLen := (pub.P.BitLen() + 7) / 8
if len(msg) > pLen-11 {
err = errors.New("elgamal: message too long")
return
}
// EM = 0x02 || PS || 0x00 || M
em := make([]byte, pLen-1)
em[0] = 2
ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):]
err = nonZeroRandomBytes(ps, random)
if err != nil {
return
}
em[len(em)-len(msg)-1] = 0
copy(mm, msg)
m := new(big.Int).SetBytes(em)
k, err := rand.Int(random, pub.P)
if err != nil {
return
}
c1 = new(big.Int).Exp(pub.G, k, pub.P)
s := new(big.Int).Exp(pub.Y, k, pub.P)
c2 = s.Mul(s, m)
c2.Mod(c2, pub.P)
return
}
// Decrypt takes two integers, resulting from an ElGamal encryption, and
// returns the plaintext of the message. An error can result only if the
// ciphertext is invalid. Users should keep in mind that this is a padding
// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can
// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks
// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel
// Bleichenbacher, Advances in Cryptology (Crypto '98),
func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) {
s := new(big.Int).Exp(c1, priv.X, priv.P)
s.ModInverse(s, priv.P)
s.Mul(s, c2)
s.Mod(s, priv.P)
em := s.Bytes()
firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2)
// The remainder of the plaintext must be a string of non-zero random
// octets, followed by a 0, followed by the message.
// lookingForIndex: 1 iff we are still looking for the zero.
// index: the offset of the first zero byte.
var lookingForIndex, index int
lookingForIndex = 1
for i := 1; i < len(em); i++ {
equals0 := subtle.ConstantTimeByteEq(em[i], 0)
index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index)
lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex)
}
if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 {
return nil, errors.New("elgamal: decryption error")
}
return em[index+1:], nil
}
// nonZeroRandomBytes fills the given slice with non-zero random octets.
func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) {
_, err = io.ReadFull(rand, s)
if err != nil {
return
}
for i := 0; i < len(s); i++ {
for s[i] == 0 {
_, err = io.ReadFull(rand, s[i:i+1])
if err != nil {
return
}
}
}
return
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package elgamal
import (
"bytes"
"crypto/rand"
"math/big"
"testing"
)
// This is the 1024-bit MODP group from RFC 5114, section 2.1:
const primeHex = "B10B8F96A080E01DDE92DE5EAE5D54EC52C99FBCFB06A3C69A6A9DCA52D23B616073E28675A23D189838EF1E2EE652C013ECB4AEA906112324975C3CD49B83BFACCBDD7D90C4BD7098488E9C219A73724EFFD6FAE5644738FAA31A4FF55BCCC0A151AF5F0DC8B4BD45BF37DF365C1A65E68CFDA76D4DA708DF1FB2BC2E4A4371"
const generatorHex = "A4D1CBD5C3FD34126765A442EFB99905F8104DD258AC507FD6406CFF14266D31266FEA1E5C41564B777E690F5504F213160217B4B01B886A5E91547F9E2749F4D7FBD7D3B9A92EE1909D0D2263F80A76A6A24C087A091F531DBF0A0169B6A28AD662A4D18E73AFA32D779D5918D08BC8858F4DCEF97C2A24855E6EEB22B3B2E5"
func fromHex(hex string) *big.Int {
n, ok := new(big.Int).SetString(hex, 16)
if !ok {
panic("failed to parse hex number")
}
return n
}
func TestEncryptDecrypt(t *testing.T) {
priv := &PrivateKey{
PublicKey: PublicKey{
G: fromHex(generatorHex),
P: fromHex(primeHex),
},
X: fromHex("42"),
}
priv.Y = new(big.Int).Exp(priv.G, priv.X, priv.P)
message := []byte("hello world")
c1, c2, err := Encrypt(rand.Reader, &priv.PublicKey, message)
if err != nil {
t.Errorf("error encrypting: %s", err)
}
message2, err := Decrypt(priv, c1, c2)
if err != nil {
t.Errorf("error decrypting: %s", err)
}
if !bytes.Equal(message2, message) {
t.Errorf("decryption failed, got: %x, want: %x", message2, message)
}
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package errors contains common error types for the OpenPGP packages.
package errors
import (
"strconv"
)
// A StructuralError is returned when OpenPGP data is found to be syntactically
// invalid.
type StructuralError string
func (s StructuralError) Error() string {
return "OpenPGP data invalid: " + string(s)
}
// UnsupportedError indicates that, although the OpenPGP data is valid, it
// makes use of currently unimplemented features.
type UnsupportedError string
func (s UnsupportedError) Error() string {
return "OpenPGP feature unsupported: " + string(s)
}
// InvalidArgumentError indicates that the caller is in error and passed an
// incorrect value.
type InvalidArgumentError string
func (i InvalidArgumentError) Error() string {
return "OpenPGP argument invalid: " + string(i)
}
// SignatureError indicates that a syntactically valid signature failed to
// validate.
type SignatureError string
func (b SignatureError) Error() string {
return "OpenPGP signature invalid: " + string(b)
}
type keyIncorrectError int
func (ki keyIncorrectError) Error() string {
return "the given key was incorrect"
}
var KeyIncorrectError = keyIncorrectError(0)
type unknownIssuerError int
func (unknownIssuerError) Error() string {
return "signature make by unknown entity"
}
var UnknownIssuerError = unknownIssuerError(0)
type UnknownPacketTypeError uint8
func (upte UnknownPacketTypeError) Error() string {
return "unknown OpenPGP packet type: " + strconv.Itoa(int(upte))
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"compress/flate"
"compress/zlib"
"crypto/openpgp/errors"
"io"
"strconv"
)
// Compressed represents a compressed OpenPGP packet. The decompressed contents
// will contain more OpenPGP packets. See RFC 4880, section 5.6.
type Compressed struct {
Body io.Reader
}
func (c *Compressed) parse(r io.Reader) error {
var buf [1]byte
_, err := readFull(r, buf[:])
if err != nil {
return err
}
switch buf[0] {
case 1:
c.Body = flate.NewReader(r)
case 2:
c.Body, err = zlib.NewReader(r)
default:
err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0])))
}
return err
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"bytes"
"encoding/hex"
"io"
"io/ioutil"
"testing"
)
func TestCompressed(t *testing.T) {
packet, err := Read(readerFromHex(compressedHex))
if err != nil {
t.Errorf("failed to read Compressed: %s", err)
return
}
c, ok := packet.(*Compressed)
if !ok {
t.Error("didn't find Compressed packet")
return
}
contents, err := ioutil.ReadAll(c.Body)
if err != nil && err != io.EOF {
t.Error(err)
return
}
expected, _ := hex.DecodeString(compressedExpectedHex)
if !bytes.Equal(expected, contents) {
t.Errorf("got:%x want:%x", contents, expected)
}
}
const compressedHex = "a3013b2d90c4e02b72e25f727e5e496a5e49b11e1700"
const compressedExpectedHex = "cb1062004d14c8fe636f6e74656e74732e0a"
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"crypto/openpgp/elgamal"
"crypto/openpgp/errors"
"crypto/rand"
"crypto/rsa"
"encoding/binary"
"io"
"math/big"
"strconv"
)
const encryptedKeyVersion = 3
// EncryptedKey represents a public-key encrypted session key. See RFC 4880,
// section 5.1.
type EncryptedKey struct {
KeyId uint64
Algo PublicKeyAlgorithm
CipherFunc CipherFunction // only valid after a successful Decrypt
Key []byte // only valid after a successful Decrypt
encryptedMPI1, encryptedMPI2 []byte
}
func (e *EncryptedKey) parse(r io.Reader) (err error) {
var buf [10]byte
_, err = readFull(r, buf[:])
if err != nil {
return
}
if buf[0] != encryptedKeyVersion {
return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0])))
}
e.KeyId = binary.BigEndian.Uint64(buf[1:9])
e.Algo = PublicKeyAlgorithm(buf[9])
switch e.Algo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
e.encryptedMPI1, _, err = readMPI(r)
case PubKeyAlgoElGamal:
e.encryptedMPI1, _, err = readMPI(r)
if err != nil {
return
}
e.encryptedMPI2, _, err = readMPI(r)
}
_, err = consumeAll(r)
return
}
func checksumKeyMaterial(key []byte) uint16 {
var checksum uint16
for _, v := range key {
checksum += uint16(v)
}
return checksum
}
// Decrypt decrypts an encrypted session key with the given private key. The
// private key must have been decrypted first.
func (e *EncryptedKey) Decrypt(priv *PrivateKey) error {
var err error
var b []byte
// TODO(agl): use session key decryption routines here to avoid
// padding oracle attacks.
switch priv.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
b, err = rsa.DecryptPKCS1v15(rand.Reader, priv.PrivateKey.(*rsa.PrivateKey), e.encryptedMPI1)
case PubKeyAlgoElGamal:
c1 := new(big.Int).SetBytes(e.encryptedMPI1)
c2 := new(big.Int).SetBytes(e.encryptedMPI2)
b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2)
default:
err = errors.InvalidArgumentError("cannot decrypted encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo)))
}
if err != nil {
return err
}
e.CipherFunc = CipherFunction(b[0])
e.Key = b[1 : len(b)-2]
expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1])
checksum := checksumKeyMaterial(e.Key)
if checksum != expectedChecksum {
return errors.StructuralError("EncryptedKey checksum incorrect")
}
return nil
}
// SerializeEncryptedKey serializes an encrypted key packet to w that contains
// key, encrypted to pub.
func SerializeEncryptedKey(w io.Writer, rand io.Reader, pub *PublicKey, cipherFunc CipherFunction, key []byte) error {
var buf [10]byte
buf[0] = encryptedKeyVersion
binary.BigEndian.PutUint64(buf[1:9], pub.KeyId)
buf[9] = byte(pub.PubKeyAlgo)
keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */ )
keyBlock[0] = byte(cipherFunc)
copy(keyBlock[1:], key)
checksum := checksumKeyMaterial(key)
keyBlock[1+len(key)] = byte(checksum >> 8)
keyBlock[1+len(key)+1] = byte(checksum)
switch pub.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
return serializeEncryptedKeyRSA(w, rand, buf, pub.PublicKey.(*rsa.PublicKey), keyBlock)
case PubKeyAlgoElGamal:
return serializeEncryptedKeyElGamal(w, rand, buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock)
case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly:
return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo)))
}
return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo)))
}
func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error {
cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock)
if err != nil {
return errors.InvalidArgumentError("RSA encryption failed: " + err.Error())
}
packetLen := 10 /* header length */ + 2 /* mpi size */ + len(cipherText)
err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
if err != nil {
return err
}
_, err = w.Write(header[:])
if err != nil {
return err
}
return writeMPI(w, 8*uint16(len(cipherText)), cipherText)
}
func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error {
c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock)
if err != nil {
return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error())
}
packetLen := 10 /* header length */
packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8
packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8
err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
if err != nil {
return err
}
_, err = w.Write(header[:])
if err != nil {
return err
}
err = writeBig(w, c1)
if err != nil {
return err
}
return writeBig(w, c2)
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"bytes"
"crypto/rand"
"crypto/rsa"
"fmt"
"math/big"
"testing"
)
func bigFromBase10(s string) *big.Int {
b, ok := new(big.Int).SetString(s, 10)
if !ok {
panic("bigFromBase10 failed")
}
return b
}
var encryptedKeyPub = rsa.PublicKey{
E: 65537,
N: bigFromBase10("115804063926007623305902631768113868327816898845124614648849934718568541074358183759250136204762053879858102352159854352727097033322663029387610959884180306668628526686121021235757016368038585212410610742029286439607686208110250133174279811431933746643015923132833417396844716207301518956640020862630546868823"),
}
var encryptedKeyRSAPriv = &rsa.PrivateKey{
PublicKey: encryptedKeyPub,
D: bigFromBase10("32355588668219869544751561565313228297765464314098552250409557267371233892496951383426602439009993875125222579159850054973310859166139474359774543943714622292329487391199285040721944491839695981199720170366763547754915493640685849961780092241140181198779299712578774460837139360803883139311171713302987058393"),
}
var encryptedKeyPriv = &PrivateKey{
PublicKey: PublicKey{
PubKeyAlgo: PubKeyAlgoRSA,
},
PrivateKey: encryptedKeyRSAPriv,
}
func TestDecryptingEncryptedKey(t *testing.T) {
const encryptedKeyHex = "c18c032a67d68660df41c70104005789d0de26b6a50c985a02a13131ca829c413a35d0e6fa8d6842599252162808ac7439c72151c8c6183e76923fe3299301414d0c25a2f06a2257db3839e7df0ec964773f6e4c4ac7ff3b48c444237166dd46ba8ff443a5410dc670cb486672fdbe7c9dfafb75b4fea83af3a204fe2a7dfa86bd20122b4f3d2646cbeecb8f7be8"
const expectedKeyHex = "d930363f7e0308c333b9618617ea728963d8df993665ae7be1092d4926fd864b"
p, err := Read(readerFromHex(encryptedKeyHex))
if err != nil {
t.Errorf("error from Read: %s", err)
return
}
ek, ok := p.(*EncryptedKey)
if !ok {
t.Errorf("didn't parse an EncryptedKey, got %#v", p)
return
}
if ek.KeyId != 0x2a67d68660df41c7 || ek.Algo != PubKeyAlgoRSA {
t.Errorf("unexpected EncryptedKey contents: %#v", ek)
return
}
err = ek.Decrypt(encryptedKeyPriv)
if err != nil {
t.Errorf("error from Decrypt: %s", err)
return
}
if ek.CipherFunc != CipherAES256 {
t.Errorf("unexpected EncryptedKey contents: %#v", ek)
return
}
keyHex := fmt.Sprintf("%x", ek.Key)
if keyHex != expectedKeyHex {
t.Errorf("bad key, got %s want %x", keyHex, expectedKeyHex)
}
}
func TestEncryptingEncryptedKey(t *testing.T) {
key := []byte{1, 2, 3, 4}
const expectedKeyHex = "01020304"
const keyId = 42
pub := &PublicKey{
PublicKey: &encryptedKeyPub,
KeyId: keyId,
PubKeyAlgo: PubKeyAlgoRSAEncryptOnly,
}
buf := new(bytes.Buffer)
err := SerializeEncryptedKey(buf, rand.Reader, pub, CipherAES128, key)
if err != nil {
t.Errorf("error writing encrypted key packet: %s", err)
}
p, err := Read(buf)
if err != nil {
t.Errorf("error from Read: %s", err)
return
}
ek, ok := p.(*EncryptedKey)
if !ok {
t.Errorf("didn't parse an EncryptedKey, got %#v", p)
return
}
if ek.KeyId != keyId || ek.Algo != PubKeyAlgoRSAEncryptOnly {
t.Errorf("unexpected EncryptedKey contents: %#v", ek)
return
}
err = ek.Decrypt(encryptedKeyPriv)
if err != nil {
t.Errorf("error from Decrypt: %s", err)
return
}
if ek.CipherFunc != CipherAES128 {
t.Errorf("unexpected EncryptedKey contents: %#v", ek)
return
}
keyHex := fmt.Sprintf("%x", ek.Key)
if keyHex != expectedKeyHex {
t.Errorf("bad key, got %s want %x", keyHex, expectedKeyHex)
}
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"encoding/binary"
"io"
)
// LiteralData represents an encrypted file. See RFC 4880, section 5.9.
type LiteralData struct {
IsBinary bool
FileName string
Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined.
Body io.Reader
}
// ForEyesOnly returns whether the contents of the LiteralData have been marked
// as especially sensitive.
func (l *LiteralData) ForEyesOnly() bool {
return l.FileName == "_CONSOLE"
}
func (l *LiteralData) parse(r io.Reader) (err error) {
var buf [256]byte
_, err = readFull(r, buf[:2])
if err != nil {
return
}
l.IsBinary = buf[0] == 'b'
fileNameLen := int(buf[1])
_, err = readFull(r, buf[:fileNameLen])
if err != nil {
return
}
l.FileName = string(buf[:fileNameLen])
_, err = readFull(r, buf[:4])
if err != nil {
return
}
l.Time = binary.BigEndian.Uint32(buf[:4])
l.Body = r
return
}
// SerializeLiteral serializes a literal data packet to w and returns a
// WriteCloser to which the data itself can be written and which MUST be closed
// on completion. The fileName is truncated to 255 bytes.
func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) {
var buf [4]byte
buf[0] = 't'
if isBinary {
buf[0] = 'b'
}
if len(fileName) > 255 {
fileName = fileName[:255]
}
buf[1] = byte(len(fileName))
inner, err := serializeStreamHeader(w, packetTypeLiteralData)
if err != nil {
return
}
_, err = inner.Write(buf[:2])
if err != nil {
return
}
_, err = inner.Write([]byte(fileName))
if err != nil {
return
}
binary.BigEndian.PutUint32(buf[:], time)
_, err = inner.Write(buf[:])
if err != nil {
return
}
plaintext = inner
return
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"crypto"
"crypto/openpgp/errors"
"crypto/openpgp/s2k"
"encoding/binary"
"io"
"strconv"
)
// OnePassSignature represents a one-pass signature packet. See RFC 4880,
// section 5.4.
type OnePassSignature struct {
SigType SignatureType
Hash crypto.Hash
PubKeyAlgo PublicKeyAlgorithm
KeyId uint64
IsLast bool
}
const onePassSignatureVersion = 3
func (ops *OnePassSignature) parse(r io.Reader) (err error) {
var buf [13]byte
_, err = readFull(r, buf[:])
if err != nil {
return
}
if buf[0] != onePassSignatureVersion {
err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0])))
}
var ok bool
ops.Hash, ok = s2k.HashIdToHash(buf[2])
if !ok {
return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2])))
}
ops.SigType = SignatureType(buf[1])
ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3])
ops.KeyId = binary.BigEndian.Uint64(buf[4:12])
ops.IsLast = buf[12] != 0
return
}
// Serialize marshals the given OnePassSignature to w.
func (ops *OnePassSignature) Serialize(w io.Writer) error {
var buf [13]byte
buf[0] = onePassSignatureVersion
buf[1] = uint8(ops.SigType)
var ok bool
buf[2], ok = s2k.HashToHashId(ops.Hash)
if !ok {
return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash)))
}
buf[3] = uint8(ops.PubKeyAlgo)
binary.BigEndian.PutUint64(buf[4:12], ops.KeyId)
if ops.IsLast {
buf[12] = 1
}
if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil {
return err
}
_, err := w.Write(buf[:])
return err
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"bytes"
"crypto/openpgp/errors"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"testing"
)
func TestReadFull(t *testing.T) {
var out [4]byte
b := bytes.NewBufferString("foo")
n, err := readFull(b, out[:3])
if n != 3 || err != nil {
t.Errorf("full read failed n:%d err:%s", n, err)
}
b = bytes.NewBufferString("foo")
n, err = readFull(b, out[:4])
if n != 3 || err != io.ErrUnexpectedEOF {
t.Errorf("partial read failed n:%d err:%s", n, err)
}
b = bytes.NewBuffer(nil)
n, err = readFull(b, out[:3])
if n != 0 || err != io.ErrUnexpectedEOF {
t.Errorf("empty read failed n:%d err:%s", n, err)
}
}
func readerFromHex(s string) io.Reader {
data, err := hex.DecodeString(s)
if err != nil {
panic("readerFromHex: bad input")
}
return bytes.NewBuffer(data)
}
var readLengthTests = []struct {
hexInput string
length int64
isPartial bool
err error
}{
{"", 0, false, io.ErrUnexpectedEOF},
{"1f", 31, false, nil},
{"c0", 0, false, io.ErrUnexpectedEOF},
{"c101", 256 + 1 + 192, false, nil},
{"e0", 1, true, nil},
{"e1", 2, true, nil},
{"e2", 4, true, nil},
{"ff", 0, false, io.ErrUnexpectedEOF},
{"ff00", 0, false, io.ErrUnexpectedEOF},
{"ff0000", 0, false, io.ErrUnexpectedEOF},
{"ff000000", 0, false, io.ErrUnexpectedEOF},
{"ff00000000", 0, false, nil},
{"ff01020304", 16909060, false, nil},
}
func TestReadLength(t *testing.T) {
for i, test := range readLengthTests {
length, isPartial, err := readLength(readerFromHex(test.hexInput))
if test.err != nil {
if err != test.err {
t.Errorf("%d: expected different error got:%s want:%s", i, err, test.err)
}
continue
}
if err != nil {
t.Errorf("%d: unexpected error: %s", i, err)
continue
}
if length != test.length || isPartial != test.isPartial {
t.Errorf("%d: bad result got:(%d,%t) want:(%d,%t)", i, length, isPartial, test.length, test.isPartial)
}
}
}
var partialLengthReaderTests = []struct {
hexInput string
err error
hexOutput string
}{
{"e0", io.ErrUnexpectedEOF, ""},
{"e001", io.ErrUnexpectedEOF, ""},
{"e0010102", nil, "0102"},
{"ff00000000", nil, ""},
{"e10102e1030400", nil, "01020304"},
{"e101", io.ErrUnexpectedEOF, ""},
}
func TestPartialLengthReader(t *testing.T) {
for i, test := range partialLengthReaderTests {
r := &partialLengthReader{readerFromHex(test.hexInput), 0, true}
out, err := ioutil.ReadAll(r)
if test.err != nil {
if err != test.err {
t.Errorf("%d: expected different error got:%s want:%s", i, err, test.err)
}
continue
}
if err != nil {
t.Errorf("%d: unexpected error: %s", i, err)
continue
}
got := fmt.Sprintf("%x", out)
if got != test.hexOutput {
t.Errorf("%d: got:%s want:%s", i, test.hexOutput, got)
}
}
}
var readHeaderTests = []struct {
hexInput string
structuralError bool
unexpectedEOF bool
tag int
length int64
hexOutput string
}{
{"", false, false, 0, 0, ""},
{"7f", true, false, 0, 0, ""},
// Old format headers
{"80", false, true, 0, 0, ""},
{"8001", false, true, 0, 1, ""},
{"800102", false, false, 0, 1, "02"},
{"81000102", false, false, 0, 1, "02"},
{"820000000102", false, false, 0, 1, "02"},
{"860000000102", false, false, 1, 1, "02"},
{"83010203", false, false, 0, -1, "010203"},
// New format headers
{"c0", false, true, 0, 0, ""},
{"c000", false, false, 0, 0, ""},
{"c00102", false, false, 0, 1, "02"},
{"c0020203", false, false, 0, 2, "0203"},
{"c00202", false, true, 0, 2, ""},
{"c3020203", false, false, 3, 2, "0203"},
}
func TestReadHeader(t *testing.T) {
for i, test := range readHeaderTests {
tag, length, contents, err := readHeader(readerFromHex(test.hexInput))
if test.structuralError {
if _, ok := err.(errors.StructuralError); ok {
continue
}
t.Errorf("%d: expected StructuralError, got:%s", i, err)
continue
}
if err != nil {
if len(test.hexInput) == 0 && err == io.EOF {
continue
}
if !test.unexpectedEOF || err != io.ErrUnexpectedEOF {
t.Errorf("%d: unexpected error from readHeader: %s", i, err)
}
continue
}
if int(tag) != test.tag || length != test.length {
t.Errorf("%d: got:(%d,%d) want:(%d,%d)", i, int(tag), length, test.tag, test.length)
continue
}
body, err := ioutil.ReadAll(contents)
if err != nil {
if !test.unexpectedEOF || err != io.ErrUnexpectedEOF {
t.Errorf("%d: unexpected error from contents: %s", i, err)
}
continue
}
if test.unexpectedEOF {
t.Errorf("%d: expected ErrUnexpectedEOF from contents but got no error", i)
continue
}
got := fmt.Sprintf("%x", body)
if got != test.hexOutput {
t.Errorf("%d: got:%s want:%s", i, got, test.hexOutput)
}
}
}
func TestSerializeHeader(t *testing.T) {
tag := packetTypePublicKey
lengths := []int{0, 1, 2, 64, 192, 193, 8000, 8384, 8385, 10000}
for _, length := range lengths {
buf := bytes.NewBuffer(nil)
serializeHeader(buf, tag, length)
tag2, length2, _, err := readHeader(buf)
if err != nil {
t.Errorf("length %d, err: %s", length, err)
}
if tag2 != tag {
t.Errorf("length %d, tag incorrect (got %d, want %d)", length, tag2, tag)
}
if int(length2) != length {
t.Errorf("length %d, length incorrect (got %d)", length, length2)
}
}
}
func TestPartialLengths(t *testing.T) {
buf := bytes.NewBuffer(nil)
w := new(partialLengthWriter)
w.w = noOpCloser{buf}
const maxChunkSize = 64
var b [maxChunkSize]byte
var n uint8
for l := 1; l <= maxChunkSize; l++ {
for i := 0; i < l; i++ {
b[i] = n
n++
}
m, err := w.Write(b[:l])
if m != l {
t.Errorf("short write got: %d want: %d", m, l)
}
if err != nil {
t.Errorf("error from write: %s", err)
}
}
w.Close()
want := (maxChunkSize * (maxChunkSize + 1)) / 2
copyBuf := bytes.NewBuffer(nil)
r := &partialLengthReader{buf, 0, true}
m, err := io.Copy(copyBuf, r)
if m != int64(want) {
t.Errorf("short copy got: %d want: %d", m, want)
}
if err != nil {
t.Errorf("error from copy: %s", err)
}
copyBytes := copyBuf.Bytes()
for i := 0; i < want; i++ {
if copyBytes[i] != uint8(i) {
t.Errorf("bad pattern in copy at %d", i)
break
}
}
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"bytes"
"crypto/cipher"
"crypto/dsa"
"crypto/openpgp/elgamal"
"crypto/openpgp/errors"
"crypto/openpgp/s2k"
"crypto/rsa"
"crypto/sha1"
"io"
"io/ioutil"
"math/big"
"strconv"
"time"
)
// PrivateKey represents a possibly encrypted private key. See RFC 4880,
// section 5.5.3.
type PrivateKey struct {
PublicKey
Encrypted bool // if true then the private key is unavailable until Decrypt has been called.
encryptedData []byte
cipher CipherFunction
s2k func(out, in []byte)
PrivateKey interface{} // An *rsa.PrivateKey or *dsa.PrivateKey.
sha1Checksum bool
iv []byte
}
func NewRSAPrivateKey(currentTime time.Time, priv *rsa.PrivateKey) *PrivateKey {
pk := new(PrivateKey)
pk.PublicKey = *NewRSAPublicKey(currentTime, &priv.PublicKey)
pk.PrivateKey = priv
return pk
}
func NewDSAPrivateKey(currentTime time.Time, priv *dsa.PrivateKey) *PrivateKey {
pk := new(PrivateKey)
pk.PublicKey = *NewDSAPublicKey(currentTime, &priv.PublicKey)
pk.PrivateKey = priv
return pk
}
func (pk *PrivateKey) parse(r io.Reader) (err error) {
err = (&pk.PublicKey).parse(r)
if err != nil {
return
}
var buf [1]byte
_, err = readFull(r, buf[:])
if err != nil {
return
}
s2kType := buf[0]
switch s2kType {
case 0:
pk.s2k = nil
pk.Encrypted = false
case 254, 255:
_, err = readFull(r, buf[:])
if err != nil {
return
}
pk.cipher = CipherFunction(buf[0])
pk.Encrypted = true
pk.s2k, err = s2k.Parse(r)
if err != nil {
return
}
if s2kType == 254 {
pk.sha1Checksum = true
}
default:
return errors.UnsupportedError("deprecated s2k function in private key")
}
if pk.Encrypted {
blockSize := pk.cipher.blockSize()
if blockSize == 0 {
return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher)))
}
pk.iv = make([]byte, blockSize)
_, err = readFull(r, pk.iv)
if err != nil {
return
}
}
pk.encryptedData, err = ioutil.ReadAll(r)
if err != nil {
return
}
if !pk.Encrypted {
return pk.parsePrivateKey(pk.encryptedData)
}
return
}
func mod64kHash(d []byte) uint16 {
var h uint16
for _, b := range d {
h += uint16(b)
}
return h
}
func (pk *PrivateKey) Serialize(w io.Writer) (err error) {
// TODO(agl): support encrypted private keys
buf := bytes.NewBuffer(nil)
err = pk.PublicKey.serializeWithoutHeaders(buf)
if err != nil {
return
}
buf.WriteByte(0 /* no encryption */ )
privateKeyBuf := bytes.NewBuffer(nil)
switch priv := pk.PrivateKey.(type) {
case *rsa.PrivateKey:
err = serializeRSAPrivateKey(privateKeyBuf, priv)
case *dsa.PrivateKey:
err = serializeDSAPrivateKey(privateKeyBuf, priv)
default:
err = errors.InvalidArgumentError("unknown private key type")
}
if err != nil {
return
}
ptype := packetTypePrivateKey
contents := buf.Bytes()
privateKeyBytes := privateKeyBuf.Bytes()
if pk.IsSubkey {
ptype = packetTypePrivateSubkey
}
err = serializeHeader(w, ptype, len(contents)+len(privateKeyBytes)+2)
if err != nil {
return
}
_, err = w.Write(contents)
if err != nil {
return
}
_, err = w.Write(privateKeyBytes)
if err != nil {
return
}
checksum := mod64kHash(privateKeyBytes)
var checksumBytes [2]byte
checksumBytes[0] = byte(checksum >> 8)
checksumBytes[1] = byte(checksum)
_, err = w.Write(checksumBytes[:])
return
}
func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error {
err := writeBig(w, priv.D)
if err != nil {
return err
}
err = writeBig(w, priv.Primes[1])
if err != nil {
return err
}
err = writeBig(w, priv.Primes[0])
if err != nil {
return err
}
return writeBig(w, priv.Precomputed.Qinv)
}
func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error {
return writeBig(w, priv.X)
}
// Decrypt decrypts an encrypted private key using a passphrase.
func (pk *PrivateKey) Decrypt(passphrase []byte) error {
if !pk.Encrypted {
return nil
}
key := make([]byte, pk.cipher.KeySize())
pk.s2k(key, passphrase)
block := pk.cipher.new(key)
cfb := cipher.NewCFBDecrypter(block, pk.iv)
data := pk.encryptedData
cfb.XORKeyStream(data, data)
if pk.sha1Checksum {
if len(data) < sha1.Size {
return errors.StructuralError("truncated private key data")
}
h := sha1.New()
h.Write(data[:len(data)-sha1.Size])
sum := h.Sum(nil)
if !bytes.Equal(sum, data[len(data)-sha1.Size:]) {
return errors.StructuralError("private key checksum failure")
}
data = data[:len(data)-sha1.Size]
} else {
if len(data) < 2 {
return errors.StructuralError("truncated private key data")
}
var sum uint16
for i := 0; i < len(data)-2; i++ {
sum += uint16(data[i])
}
if data[len(data)-2] != uint8(sum>>8) ||
data[len(data)-1] != uint8(sum) {
return errors.StructuralError("private key checksum failure")
}
data = data[:len(data)-2]
}
return pk.parsePrivateKey(data)
}
func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) {
switch pk.PublicKey.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly:
return pk.parseRSAPrivateKey(data)
case PubKeyAlgoDSA:
return pk.parseDSAPrivateKey(data)
case PubKeyAlgoElGamal:
return pk.parseElGamalPrivateKey(data)
}
panic("impossible")
}
func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) {
rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey)
rsaPriv := new(rsa.PrivateKey)
rsaPriv.PublicKey = *rsaPub
buf := bytes.NewBuffer(data)
d, _, err := readMPI(buf)
if err != nil {
return
}
p, _, err := readMPI(buf)
if err != nil {
return
}
q, _, err := readMPI(buf)
if err != nil {
return
}
rsaPriv.D = new(big.Int).SetBytes(d)
rsaPriv.Primes = make([]*big.Int, 2)
rsaPriv.Primes[0] = new(big.Int).SetBytes(p)
rsaPriv.Primes[1] = new(big.Int).SetBytes(q)
rsaPriv.Precompute()
pk.PrivateKey = rsaPriv
pk.Encrypted = false
pk.encryptedData = nil
return nil
}
func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) {
dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey)
dsaPriv := new(dsa.PrivateKey)
dsaPriv.PublicKey = *dsaPub
buf := bytes.NewBuffer(data)
x, _, err := readMPI(buf)
if err != nil {
return
}
dsaPriv.X = new(big.Int).SetBytes(x)
pk.PrivateKey = dsaPriv
pk.Encrypted = false
pk.encryptedData = nil
return nil
}
func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) {
pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey)
priv := new(elgamal.PrivateKey)
priv.PublicKey = *pub
buf := bytes.NewBuffer(data)
x, _, err := readMPI(buf)
if err != nil {
return
}
priv.X = new(big.Int).SetBytes(x)
pk.PrivateKey = priv
pk.Encrypted = false
pk.encryptedData = nil
return nil
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"testing"
"time"
)
var privateKeyTests = []struct {
privateKeyHex string
creationTime time.Time
}{
{
privKeyRSAHex,
time.Unix(0x4cc349a8, 0),
},
{
privKeyElGamalHex,
time.Unix(0x4df9ee1a, 0),
},
}
func TestPrivateKeyRead(t *testing.T) {
for i, test := range privateKeyTests {
packet, err := Read(readerFromHex(test.privateKeyHex))
if err != nil {
t.Errorf("#%d: failed to parse: %s", i, err)
continue
}
privKey := packet.(*PrivateKey)
if !privKey.Encrypted {
t.Errorf("#%d: private key isn't encrypted", i)
continue
}
err = privKey.Decrypt([]byte("testing"))
if err != nil {
t.Errorf("#%d: failed to decrypt: %s", i, err)
continue
}
if !privKey.CreationTime.Equal(test.creationTime) || privKey.Encrypted {
t.Errorf("#%d: bad result, got: %#v", i, privKey)
}
}
}
// Generated with `gpg --export-secret-keys "Test Key 2"`
const privKeyRSAHex = "9501fe044cc349a8010400b70ca0010e98c090008d45d1ee8f9113bd5861fd57b88bacb7c68658747663f1e1a3b5a98f32fda6472373c024b97359cd2efc88ff60f77751adfbf6af5e615e6a1408cfad8bf0cea30b0d5f53aa27ad59089ba9b15b7ebc2777a25d7b436144027e3bcd203909f147d0e332b240cf63d3395f5dfe0df0a6c04e8655af7eacdf0011010001fe0303024a252e7d475fd445607de39a265472aa74a9320ba2dac395faa687e9e0336aeb7e9a7397e511b5afd9dc84557c80ac0f3d4d7bfec5ae16f20d41c8c84a04552a33870b930420e230e179564f6d19bb153145e76c33ae993886c388832b0fa042ddda7f133924f3854481533e0ede31d51278c0519b29abc3bf53da673e13e3e1214b52413d179d7f66deee35cac8eacb060f78379d70ef4af8607e68131ff529439668fc39c9ce6dfef8a5ac234d234802cbfb749a26107db26406213ae5c06d4673253a3cbee1fcbae58d6ab77e38d6e2c0e7c6317c48e054edadb5a40d0d48acb44643d998139a8a66bb820be1f3f80185bc777d14b5954b60effe2448a036d565c6bc0b915fcea518acdd20ab07bc1529f561c58cd044f723109b93f6fd99f876ff891d64306b5d08f48bab59f38695e9109c4dec34013ba3153488ce070268381ba923ee1eb77125b36afcb4347ec3478c8f2735b06ef17351d872e577fa95d0c397c88c71b59629a36aec"
// Generated by `gpg --export-secret-keys` followed by a manual extraction of
// the ElGamal subkey from the packets.
const privKeyElGamalHex = "9d0157044df9ee1a100400eb8e136a58ec39b582629cdadf830bc64e0a94ed8103ca8bb247b27b11b46d1d25297ef4bcc3071785ba0c0bedfe89eabc5287fcc0edf81ab5896c1c8e4b20d27d79813c7aede75320b33eaeeaa586edc00fd1036c10133e6ba0ff277245d0d59d04b2b3421b7244aca5f4a8d870c6f1c1fbff9e1c26699a860b9504f35ca1d700030503fd1ededd3b840795be6d9ccbe3c51ee42e2f39233c432b831ddd9c4e72b7025a819317e47bf94f9ee316d7273b05d5fcf2999c3a681f519b1234bbfa6d359b4752bd9c3f77d6b6456cde152464763414ca130f4e91d91041432f90620fec0e6d6b5116076c2985d5aeaae13be492b9b329efcaf7ee25120159a0a30cd976b42d7afe030302dae7eb80db744d4960c4df930d57e87fe81412eaace9f900e6c839817a614ddb75ba6603b9417c33ea7b6c93967dfa2bcff3fa3c74a5ce2c962db65b03aece14c96cbd0038fc"
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"bytes"
"encoding/hex"
"testing"
"time"
)
var pubKeyTests = []struct {
hexData string
hexFingerprint string
creationTime time.Time
pubKeyAlgo PublicKeyAlgorithm
keyId uint64
keyIdString string
keyIdShort string
}{
{rsaPkDataHex, rsaFingerprintHex, time.Unix(0x4d3c5c10, 0), PubKeyAlgoRSA, 0xa34d7e18c20c31bb, "A34D7E18C20C31BB", "C20C31BB"},
{dsaPkDataHex, dsaFingerprintHex, time.Unix(0x4d432f89, 0), PubKeyAlgoDSA, 0x8e8fbe54062f19ed, "8E8FBE54062F19ED", "062F19ED"},
}
func TestPublicKeyRead(t *testing.T) {
for i, test := range pubKeyTests {
packet, err := Read(readerFromHex(test.hexData))
if err != nil {
t.Errorf("#%d: Read error: %s", i, err)
continue
}
pk, ok := packet.(*PublicKey)
if !ok {
t.Errorf("#%d: failed to parse, got: %#v", i, packet)
continue
}
if pk.PubKeyAlgo != test.pubKeyAlgo {
t.Errorf("#%d: bad public key algorithm got:%x want:%x", i, pk.PubKeyAlgo, test.pubKeyAlgo)
}
if !pk.CreationTime.Equal(test.creationTime) {
t.Errorf("#%d: bad creation time got:%v want:%v", i, pk.CreationTime, test.creationTime)
}
expectedFingerprint, _ := hex.DecodeString(test.hexFingerprint)
if !bytes.Equal(expectedFingerprint, pk.Fingerprint[:]) {
t.Errorf("#%d: bad fingerprint got:%x want:%x", i, pk.Fingerprint[:], expectedFingerprint)
}
if pk.KeyId != test.keyId {
t.Errorf("#%d: bad keyid got:%x want:%x", i, pk.KeyId, test.keyId)
}
if g, e := pk.KeyIdString(), test.keyIdString; g != e {
t.Errorf("#%d: bad KeyIdString got:%q want:%q", i, g, e)
}
if g, e := pk.KeyIdShortString(), test.keyIdShort; g != e {
t.Errorf("#%d: bad KeyIdShortString got:%q want:%q", i, g, e)
}
}
}
func TestPublicKeySerialize(t *testing.T) {
for i, test := range pubKeyTests {
packet, err := Read(readerFromHex(test.hexData))
if err != nil {
t.Errorf("#%d: Read error: %s", i, err)
continue
}
pk, ok := packet.(*PublicKey)
if !ok {
t.Errorf("#%d: failed to parse, got: %#v", i, packet)
continue
}
serializeBuf := bytes.NewBuffer(nil)
err = pk.Serialize(serializeBuf)
if err != nil {
t.Errorf("#%d: failed to serialize: %s", i, err)
continue
}
packet, err = Read(serializeBuf)
if err != nil {
t.Errorf("#%d: Read error (from serialized data): %s", i, err)
continue
}
pk, ok = packet.(*PublicKey)
if !ok {
t.Errorf("#%d: failed to parse serialized data, got: %#v", i, packet)
continue
}
}
}
const rsaFingerprintHex = "5fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb"
const rsaPkDataHex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001"
const dsaFingerprintHex = "eece4c094db002103714c63c8e8fbe54062f19ed"
const dsaPkDataHex = "9901a2044d432f89110400cd581334f0d7a1e1bdc8b9d6d8c0baf68793632735d2bb0903224cbaa1dfbf35a60ee7a13b92643421e1eb41aa8d79bea19a115a677f6b8ba3c7818ce53a6c2a24a1608bd8b8d6e55c5090cbde09dd26e356267465ae25e69ec8bdd57c7bbb2623e4d73336f73a0a9098f7f16da2e25252130fd694c0e8070c55a812a423ae7f00a0ebf50e70c2f19c3520a551bd4b08d30f23530d3d03ff7d0bf4a53a64a09dc5e6e6e35854b7d70c882b0c60293401958b1bd9e40abec3ea05ba87cf64899299d4bd6aa7f459c201d3fbbd6c82004bdc5e8a9eb8082d12054cc90fa9d4ec251a843236a588bf49552441817436c4f43326966fe85447d4e6d0acf8fa1ef0f014730770603ad7634c3088dc52501c237328417c31c89ed70400b2f1a98b0bf42f11fefc430704bebbaa41d9f355600c3facee1e490f64208e0e094ea55e3a598a219a58500bf78ac677b670a14f4e47e9cf8eab4f368cc1ddcaa18cc59309d4cc62dd4f680e73e6cc3e1ce87a84d0925efbcb26c575c093fc42eecf45135fabf6403a25c2016e1774c0484e440a18319072c617cc97ac0a3bb0"
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"crypto/openpgp/errors"
"io"
)
// Reader reads packets from an io.Reader and allows packets to be 'unread' so
// that they result from the next call to Next.
type Reader struct {
q []Packet
readers []io.Reader
}
// Next returns the most recently unread Packet, or reads another packet from
// the top-most io.Reader. Unknown packet types are skipped.
func (r *Reader) Next() (p Packet, err error) {
if len(r.q) > 0 {
p = r.q[len(r.q)-1]
r.q = r.q[:len(r.q)-1]
return
}
for len(r.readers) > 0 {
p, err = Read(r.readers[len(r.readers)-1])
if err == nil {
return
}
if err == io.EOF {
r.readers = r.readers[:len(r.readers)-1]
continue
}
if _, ok := err.(errors.UnknownPacketTypeError); !ok {
return nil, err
}
}
return nil, io.EOF
}
// Push causes the Reader to start reading from a new io.Reader. When an EOF
// error is seen from the new io.Reader, it is popped and the Reader continues
// to read from the next most recent io.Reader.
func (r *Reader) Push(reader io.Reader) {
r.readers = append(r.readers, reader)
}
// Unread causes the given Packet to be returned from the next call to Next.
func (r *Reader) Unread(p Packet) {
r.q = append(r.q, p)
}
func NewReader(r io.Reader) *Reader {
return &Reader{
q: nil,
readers: []io.Reader{r},
}
}
This source diff could not be displayed because it is too large. You can view the blob instead.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment