libgo: Update to October 24 version of master library.
From-SVN: r204466
This commit is contained in:
parent
f20f261304
commit
f038dae646
596 changed files with 32029 additions and 7466 deletions
|
@ -1,4 +1,4 @@
|
|||
a7bd9a33067b
|
||||
7ebbddd21330
|
||||
|
||||
The first line of this file holds the Mercurial revision number of the
|
||||
last merge done from the master library sources.
|
||||
|
|
|
@ -37,7 +37,8 @@ AM_CPPFLAGS = -I $(srcdir)/runtime $(LIBFFIINCS) $(PTHREAD_CFLAGS)
|
|||
|
||||
ACLOCAL_AMFLAGS = -I ./config -I ../config
|
||||
|
||||
AM_CFLAGS = -fexceptions -fplan9-extensions $(SPLIT_STACK) $(WARN_CFLAGS) \
|
||||
AM_CFLAGS = -fexceptions -fnon-call-exceptions -fplan9-extensions \
|
||||
$(SPLIT_STACK) $(WARN_CFLAGS) \
|
||||
$(STRINGOPS_FLAG) $(OSCFLAGS) \
|
||||
-I $(srcdir)/../libgcc -I $(srcdir)/../libbacktrace \
|
||||
-I $(MULTIBUILDTOP)../../gcc/include
|
||||
|
@ -103,6 +104,7 @@ toolexeclibgo_DATA = \
|
|||
bufio.gox \
|
||||
bytes.gox \
|
||||
crypto.gox \
|
||||
encoding.gox \
|
||||
errors.gox \
|
||||
expvar.gox \
|
||||
flag.gox \
|
||||
|
@ -251,6 +253,11 @@ toolexeclibgoimage_DATA = \
|
|||
image/jpeg.gox \
|
||||
image/png.gox
|
||||
|
||||
toolexeclibgoimagecolordir = $(toolexeclibgoimagedir)/color
|
||||
|
||||
toolexeclibgoimagecolor_DATA = \
|
||||
image/color/palette.gox
|
||||
|
||||
toolexeclibgoindexdir = $(toolexeclibgodir)/index
|
||||
|
||||
toolexeclibgoindex_DATA = \
|
||||
|
@ -573,6 +580,9 @@ go_bytes_c_files = \
|
|||
go_crypto_files = \
|
||||
go/crypto/crypto.go
|
||||
|
||||
go_encoding_files = \
|
||||
go/encoding/encoding.go
|
||||
|
||||
go_errors_files = \
|
||||
go/errors/errors.go
|
||||
|
||||
|
@ -669,7 +679,7 @@ go_net_fd_os_file =
|
|||
go_net_newpollserver_file =
|
||||
else # !LIBGO_IS_LINUX && !LIBGO_IS_RTEMS
|
||||
if LIBGO_IS_NETBSD
|
||||
go_net_fd_os_file = go/net/fd_bsd.go
|
||||
go_net_fd_os_file =
|
||||
go_net_newpollserver_file =
|
||||
else # !LIBGO_IS_NETBSD && !LIBGO_IS_LINUX && !LIBGO_IS_RTEMS
|
||||
# By default use select with pipes. Most systems should have
|
||||
|
@ -726,9 +736,13 @@ else
|
|||
if LIBGO_IS_FREEBSD
|
||||
go_net_sendfile_file = go/net/sendfile_freebsd.go
|
||||
else
|
||||
if LIBGO_IS_DRAGONFLY
|
||||
go_net_sendfile_file = go/net/sendfile_dragonfly.go
|
||||
else
|
||||
go_net_sendfile_file = go/net/sendfile_stub.go
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
if LIBGO_IS_LINUX
|
||||
go_net_interface_file = go/net/interface_linux.go
|
||||
|
@ -736,9 +750,13 @@ else
|
|||
if LIBGO_IS_NETBSD
|
||||
go_net_interface_file = go/net/interface_netbsd.go
|
||||
else
|
||||
if LIBGO_IS_DRAGONFLY
|
||||
go_net_interface_file = go/net/interface_dragonfly.go
|
||||
else
|
||||
go_net_interface_file = go/net/interface_stub.go
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
if LIBGO_IS_LINUX
|
||||
go_net_cloexec_file = go/net/sock_cloexec.go
|
||||
|
@ -746,13 +764,13 @@ else
|
|||
go_net_cloexec_file = go/net/sys_cloexec.go
|
||||
endif
|
||||
|
||||
if LIBGO_IS_LINUX
|
||||
go_net_poll_file = go/net/fd_poll_runtime.go
|
||||
if LIBGO_IS_OPENBSD
|
||||
go_net_tcpsockopt_file = go/net/tcpsockopt_openbsd.go
|
||||
else
|
||||
if LIBGO_IS_DARWIN
|
||||
go_net_poll_file = go/net/fd_poll_runtime.go
|
||||
go_net_tcpsockopt_file = go/net/tcpsockopt_darwin.go
|
||||
else
|
||||
go_net_poll_file = go/net/fd_poll_unix.go
|
||||
go_net_tcpsockopt_file = go/net/tcpsockopt_unix.go
|
||||
endif
|
||||
endif
|
||||
|
||||
|
@ -766,6 +784,7 @@ go_net_files = \
|
|||
go/net/dnsconfig_unix.go \
|
||||
go/net/dnsmsg.go \
|
||||
$(go_net_newpollserver_file) \
|
||||
go/net/fd_mutex.go \
|
||||
go/net/fd_unix.go \
|
||||
$(go_net_fd_os_file) \
|
||||
go/net/file_unix.go \
|
||||
|
@ -783,18 +802,21 @@ go_net_files = \
|
|||
go/net/net.go \
|
||||
go/net/parse.go \
|
||||
go/net/pipe.go \
|
||||
$(go_net_poll_file) \
|
||||
go/net/fd_poll_runtime.go \
|
||||
go/net/port.go \
|
||||
go/net/port_unix.go \
|
||||
go/net/race0.go \
|
||||
$(go_net_sendfile_file) \
|
||||
go/net/singleflight.go \
|
||||
go/net/sock_posix.go \
|
||||
go/net/sock_unix.go \
|
||||
$(go_net_sock_file) \
|
||||
go/net/sockopt_posix.go \
|
||||
$(go_net_sockopt_file) \
|
||||
$(go_net_sockoptip_file) \
|
||||
go/net/tcpsock.go \
|
||||
go/net/tcpsock_posix.go \
|
||||
go/net/tcpsockopt_posix.go \
|
||||
$(go_net_tcpsockopt_file) \
|
||||
go/net/udpsock.go \
|
||||
go/net/udpsock_posix.go \
|
||||
go/net/unixsock.go \
|
||||
|
@ -818,6 +840,12 @@ go_os_dir_file = go/os/dir_regfile.go
|
|||
endif
|
||||
endif
|
||||
|
||||
if LIBGO_IS_DARWIN
|
||||
go_os_getwd_file = go/os/getwd_darwin.go
|
||||
else
|
||||
go_os_getwd_file =
|
||||
endif
|
||||
|
||||
if LIBGO_IS_LINUX
|
||||
go_os_sys_file = go/os/sys_linux.go
|
||||
else
|
||||
|
@ -854,6 +882,9 @@ else
|
|||
if LIBGO_IS_NETBSD
|
||||
go_os_stat_file = go/os/stat_atimespec.go
|
||||
else
|
||||
if LIBGO_IS_DRAGONFLY
|
||||
go_os_stat_file = go/os/stat_dragonfly.go
|
||||
else
|
||||
go_os_stat_file = go/os/stat.go
|
||||
endif
|
||||
endif
|
||||
|
@ -861,6 +892,7 @@ endif
|
|||
endif
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
if LIBGO_IS_LINUX
|
||||
go_os_pipe_file = go/os/pipe_linux.go
|
||||
|
@ -874,7 +906,7 @@ go_os_files = \
|
|||
go/os/doc.go \
|
||||
go/os/env.go \
|
||||
go/os/error.go \
|
||||
go/os/error_posix.go \
|
||||
go/os/error_unix.go \
|
||||
go/os/exec.go \
|
||||
go/os/exec_posix.go \
|
||||
go/os/exec_unix.go \
|
||||
|
@ -882,6 +914,7 @@ go_os_files = \
|
|||
go/os/file_posix.go \
|
||||
go/os/file_unix.go \
|
||||
go/os/getwd.go \
|
||||
$(go_os_getwd_file) \
|
||||
go/os/path.go \
|
||||
go/os/path_unix.go \
|
||||
$(go_os_pipe_file) \
|
||||
|
@ -970,7 +1003,10 @@ go_strings_files = \
|
|||
go/strings/reader.go \
|
||||
go/strings/replace.go \
|
||||
go/strings/search.go \
|
||||
go/strings/strings.go
|
||||
go/strings/strings.go \
|
||||
go/strings/strings_decl.go
|
||||
go_strings_c_files = \
|
||||
go/strings/indexbyte.c
|
||||
|
||||
go_sync_files = \
|
||||
go/sync/cond.go \
|
||||
|
@ -1000,6 +1036,7 @@ go_syslog_c_files = \
|
|||
go_testing_files = \
|
||||
go/testing/allocs.go \
|
||||
go/testing/benchmark.go \
|
||||
go/testing/cover.go \
|
||||
go/testing/example.go \
|
||||
go/testing/testing.go
|
||||
|
||||
|
@ -1048,6 +1085,7 @@ go_archive_tar_files = \
|
|||
|
||||
go_archive_zip_files = \
|
||||
go/archive/zip/reader.go \
|
||||
go/archive/zip/register.go \
|
||||
go/archive/zip/struct.go \
|
||||
go/archive/zip/writer.go
|
||||
|
||||
|
@ -1098,6 +1136,7 @@ go_crypto_cipher_files = \
|
|||
go/crypto/cipher/cfb.go \
|
||||
go/crypto/cipher/cipher.go \
|
||||
go/crypto/cipher/ctr.go \
|
||||
go/crypto/cipher/gcm.go \
|
||||
go/crypto/cipher/io.go \
|
||||
go/crypto/cipher/ofb.go
|
||||
go_crypto_des_files = \
|
||||
|
@ -1110,7 +1149,8 @@ go_crypto_ecdsa_files = \
|
|||
go/crypto/ecdsa/ecdsa.go
|
||||
go_crypto_elliptic_files = \
|
||||
go/crypto/elliptic/elliptic.go \
|
||||
go/crypto/elliptic/p224.go
|
||||
go/crypto/elliptic/p224.go \
|
||||
go/crypto/elliptic/p256.go
|
||||
go_crypto_hmac_files = \
|
||||
go/crypto/hmac/hmac.go
|
||||
go_crypto_md5_files = \
|
||||
|
@ -1125,6 +1165,7 @@ go_crypto_rc4_files = \
|
|||
go/crypto/rc4/rc4_ref.go
|
||||
go_crypto_rsa_files = \
|
||||
go/crypto/rsa/pkcs1v15.go \
|
||||
go/crypto/rsa/pss.go \
|
||||
go/crypto/rsa/rsa.go
|
||||
go_crypto_sha1_files = \
|
||||
go/crypto/sha1/sha1.go \
|
||||
|
@ -1308,11 +1349,15 @@ go_image_color_files = \
|
|||
go/image/color/color.go \
|
||||
go/image/color/ycbcr.go
|
||||
|
||||
go_image_color_palette_files = \
|
||||
go/image/color/palette/palette.go
|
||||
|
||||
go_image_draw_files = \
|
||||
go/image/draw/draw.go
|
||||
|
||||
go_image_gif_files = \
|
||||
go/image/gif/reader.go
|
||||
go/image/gif/reader.go \
|
||||
go/image/gif/writer.go
|
||||
|
||||
go_image_jpeg_files = \
|
||||
go/image/jpeg/fdct.go \
|
||||
|
@ -1766,6 +1811,7 @@ libgo_go_objs = \
|
|||
bytes.lo \
|
||||
bytes/index.lo \
|
||||
crypto.lo \
|
||||
encoding.lo \
|
||||
errors.lo \
|
||||
expvar.lo \
|
||||
flag.lo \
|
||||
|
@ -1787,6 +1833,7 @@ libgo_go_objs = \
|
|||
sort.lo \
|
||||
strconv.lo \
|
||||
strings.lo \
|
||||
strings/index.lo \
|
||||
sync.lo \
|
||||
syscall.lo \
|
||||
syscall/errno.lo \
|
||||
|
@ -1863,6 +1910,7 @@ libgo_go_objs = \
|
|||
net/http/httputil.lo \
|
||||
net/http/pprof.lo \
|
||||
image/color.lo \
|
||||
image/color/palette.lo \
|
||||
image/draw.lo \
|
||||
image/gif.lo \
|
||||
image/jpeg.lo \
|
||||
|
@ -2033,6 +2081,15 @@ crypto/check: $(CHECK_DEPS)
|
|||
@$(CHECK)
|
||||
.PHONY: crypto/check
|
||||
|
||||
@go_include@ encoding.lo.dep
|
||||
encoding.lo.dep: $(go_encoding_files)
|
||||
$(BUILDDEPS)
|
||||
encoding.lo: $(go_encoding_files)
|
||||
$(BUILDPACKAGE)
|
||||
encoding/check: $(CHECK_DEPS)
|
||||
@$(CHECK)
|
||||
.PHONY: encoding/check
|
||||
|
||||
@go_include@ errors.lo.dep
|
||||
errors.lo.dep: $(go_errors_files)
|
||||
$(BUILDDEPS)
|
||||
|
@ -2214,6 +2271,9 @@ strings.lo.dep: $(go_strings_files)
|
|||
$(BUILDDEPS)
|
||||
strings.lo: $(go_strings_files)
|
||||
$(BUILDPACKAGE)
|
||||
strings/index.lo: $(go_strings_c_files)
|
||||
@$(MKDIR_P) strings
|
||||
$(LTCOMPILE) -c -o strings/index.lo $(srcdir)/go/strings/indexbyte.c
|
||||
strings/check: $(CHECK_DEPS)
|
||||
@$(CHECK)
|
||||
.PHONY: strings/check
|
||||
|
@ -2821,6 +2881,15 @@ image/color/check: $(CHECK_DEPS)
|
|||
@$(CHECK)
|
||||
.PHONY: image/color/check
|
||||
|
||||
@go_include@ image/color/palette.lo.dep
|
||||
image/color/palette.lo.dep: $(go_image_color_palette_files)
|
||||
$(BUILDDEPS)
|
||||
image/color/palette.lo: $(go_image_color_palette_files)
|
||||
$(BUILDPACKAGE)
|
||||
image/color/palette/check: $(CHECK_DEPS)
|
||||
@$(CHECK)
|
||||
.PHONY: image/color/palette/check
|
||||
|
||||
@go_include@ image/draw.lo.dep
|
||||
image/draw.lo.dep: $(go_image_draw_files)
|
||||
$(BUILDDEPS)
|
||||
|
@ -3236,6 +3305,8 @@ bytes.gox: bytes.lo
|
|||
$(BUILDGOX)
|
||||
crypto.gox: crypto.lo
|
||||
$(BUILDGOX)
|
||||
encoding.gox: encoding.lo
|
||||
$(BUILDGOX)
|
||||
errors.gox: errors.lo
|
||||
$(BUILDGOX)
|
||||
expvar.gox: expvar.lo
|
||||
|
@ -3433,6 +3504,9 @@ image/jpeg.gox: image/jpeg.lo
|
|||
image/png.gox: image/png.lo
|
||||
$(BUILDGOX)
|
||||
|
||||
image/color/palette.gox: image/color/palette.lo
|
||||
$(BUILDGOX)
|
||||
|
||||
index/suffixarray.gox: index/suffixarray.lo
|
||||
$(BUILDGOX)
|
||||
|
||||
|
|
|
@ -105,6 +105,7 @@ am__installdirs = "$(DESTDIR)$(toolexeclibdir)" \
|
|||
"$(DESTDIR)$(toolexeclibgohashdir)" \
|
||||
"$(DESTDIR)$(toolexeclibgohtmldir)" \
|
||||
"$(DESTDIR)$(toolexeclibgoimagedir)" \
|
||||
"$(DESTDIR)$(toolexeclibgoimagecolordir)" \
|
||||
"$(DESTDIR)$(toolexeclibgoindexdir)" \
|
||||
"$(DESTDIR)$(toolexeclibgoiodir)" \
|
||||
"$(DESTDIR)$(toolexeclibgologdir)" \
|
||||
|
@ -132,12 +133,12 @@ libgobegin_a_OBJECTS = $(am_libgobegin_a_OBJECTS)
|
|||
LTLIBRARIES = $(toolexeclib_LTLIBRARIES)
|
||||
am__DEPENDENCIES_1 =
|
||||
am__DEPENDENCIES_2 = bufio.lo bytes.lo bytes/index.lo crypto.lo \
|
||||
errors.lo expvar.lo flag.lo fmt.lo hash.lo html.lo image.lo \
|
||||
io.lo log.lo math.lo mime.lo net.lo os.lo path.lo \
|
||||
encoding.lo errors.lo expvar.lo flag.lo fmt.lo hash.lo html.lo \
|
||||
image.lo io.lo log.lo math.lo mime.lo net.lo os.lo path.lo \
|
||||
reflect-go.lo reflect/makefunc.lo regexp.lo runtime-go.lo \
|
||||
sort.lo strconv.lo strings.lo sync.lo syscall.lo \
|
||||
syscall/errno.lo syscall/signame.lo syscall/wait.lo testing.lo \
|
||||
time-go.lo unicode.lo archive/tar.lo archive/zip.lo \
|
||||
sort.lo strconv.lo strings.lo strings/index.lo sync.lo \
|
||||
syscall.lo syscall/errno.lo syscall/signame.lo syscall/wait.lo \
|
||||
testing.lo time-go.lo unicode.lo archive/tar.lo archive/zip.lo \
|
||||
compress/bzip2.lo compress/flate.lo compress/gzip.lo \
|
||||
compress/lzw.lo compress/zlib.lo container/heap.lo \
|
||||
container/list.lo container/ring.lo crypto/aes.lo \
|
||||
|
@ -157,13 +158,13 @@ am__DEPENDENCIES_2 = bufio.lo bytes.lo bytes/index.lo crypto.lo \
|
|||
hash/crc64.lo hash/fnv.lo net/http/cgi.lo \
|
||||
net/http/cookiejar.lo net/http/fcgi.lo net/http/httptest.lo \
|
||||
net/http/httputil.lo net/http/pprof.lo image/color.lo \
|
||||
image/draw.lo image/gif.lo image/jpeg.lo image/png.lo \
|
||||
index/suffixarray.lo io/ioutil.lo log/syslog.lo \
|
||||
log/syslog/syslog_c.lo math/big.lo math/cmplx.lo math/rand.lo \
|
||||
mime/multipart.lo net/http.lo net/mail.lo net/rpc.lo \
|
||||
net/smtp.lo net/textproto.lo net/url.lo old/regexp.lo \
|
||||
old/template.lo os/exec.lo $(am__DEPENDENCIES_1) os/signal.lo \
|
||||
os/user.lo path/filepath.lo regexp/syntax.lo \
|
||||
image/color/palette.lo image/draw.lo image/gif.lo \
|
||||
image/jpeg.lo image/png.lo index/suffixarray.lo io/ioutil.lo \
|
||||
log/syslog.lo log/syslog/syslog_c.lo math/big.lo math/cmplx.lo \
|
||||
math/rand.lo mime/multipart.lo net/http.lo net/mail.lo \
|
||||
net/rpc.lo net/smtp.lo net/textproto.lo net/url.lo \
|
||||
old/regexp.lo old/template.lo os/exec.lo $(am__DEPENDENCIES_1) \
|
||||
os/signal.lo os/user.lo path/filepath.lo regexp/syntax.lo \
|
||||
net/rpc/jsonrpc.lo runtime/debug.lo runtime/pprof.lo \
|
||||
sync/atomic.lo sync/atomic_c.lo text/scanner.lo \
|
||||
text/tabwriter.lo text/template.lo text/template/parse.lo \
|
||||
|
@ -260,16 +261,16 @@ DATA = $(toolexeclibgo_DATA) $(toolexeclibgoarchive_DATA) \
|
|||
$(toolexeclibgodebug_DATA) $(toolexeclibgoencoding_DATA) \
|
||||
$(toolexeclibgoexp_DATA) $(toolexeclibgogo_DATA) \
|
||||
$(toolexeclibgohash_DATA) $(toolexeclibgohtml_DATA) \
|
||||
$(toolexeclibgoimage_DATA) $(toolexeclibgoindex_DATA) \
|
||||
$(toolexeclibgoio_DATA) $(toolexeclibgolog_DATA) \
|
||||
$(toolexeclibgomath_DATA) $(toolexeclibgomime_DATA) \
|
||||
$(toolexeclibgonet_DATA) $(toolexeclibgonethttp_DATA) \
|
||||
$(toolexeclibgonetrpc_DATA) $(toolexeclibgoold_DATA) \
|
||||
$(toolexeclibgoos_DATA) $(toolexeclibgopath_DATA) \
|
||||
$(toolexeclibgoregexp_DATA) $(toolexeclibgoruntime_DATA) \
|
||||
$(toolexeclibgosync_DATA) $(toolexeclibgotesting_DATA) \
|
||||
$(toolexeclibgotext_DATA) $(toolexeclibgotexttemplate_DATA) \
|
||||
$(toolexeclibgounicode_DATA)
|
||||
$(toolexeclibgoimage_DATA) $(toolexeclibgoimagecolor_DATA) \
|
||||
$(toolexeclibgoindex_DATA) $(toolexeclibgoio_DATA) \
|
||||
$(toolexeclibgolog_DATA) $(toolexeclibgomath_DATA) \
|
||||
$(toolexeclibgomime_DATA) $(toolexeclibgonet_DATA) \
|
||||
$(toolexeclibgonethttp_DATA) $(toolexeclibgonetrpc_DATA) \
|
||||
$(toolexeclibgoold_DATA) $(toolexeclibgoos_DATA) \
|
||||
$(toolexeclibgopath_DATA) $(toolexeclibgoregexp_DATA) \
|
||||
$(toolexeclibgoruntime_DATA) $(toolexeclibgosync_DATA) \
|
||||
$(toolexeclibgotesting_DATA) $(toolexeclibgotext_DATA) \
|
||||
$(toolexeclibgotexttemplate_DATA) $(toolexeclibgounicode_DATA)
|
||||
RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
|
||||
distclean-recursive maintainer-clean-recursive
|
||||
AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \
|
||||
|
@ -443,7 +444,8 @@ WARN_CFLAGS = $(WARN_FLAGS) $(WERROR)
|
|||
# -I/-D flags to pass when compiling.
|
||||
AM_CPPFLAGS = -I $(srcdir)/runtime $(LIBFFIINCS) $(PTHREAD_CFLAGS)
|
||||
ACLOCAL_AMFLAGS = -I ./config -I ../config
|
||||
AM_CFLAGS = -fexceptions -fplan9-extensions $(SPLIT_STACK) $(WARN_CFLAGS) \
|
||||
AM_CFLAGS = -fexceptions -fnon-call-exceptions -fplan9-extensions \
|
||||
$(SPLIT_STACK) $(WARN_CFLAGS) \
|
||||
$(STRINGOPS_FLAG) $(OSCFLAGS) \
|
||||
-I $(srcdir)/../libgcc -I $(srcdir)/../libbacktrace \
|
||||
-I $(MULTIBUILDTOP)../../gcc/include
|
||||
|
@ -506,6 +508,7 @@ toolexeclibgo_DATA = \
|
|||
bufio.gox \
|
||||
bytes.gox \
|
||||
crypto.gox \
|
||||
encoding.gox \
|
||||
errors.gox \
|
||||
expvar.gox \
|
||||
flag.gox \
|
||||
|
@ -640,6 +643,10 @@ toolexeclibgoimage_DATA = \
|
|||
image/jpeg.gox \
|
||||
image/png.gox
|
||||
|
||||
toolexeclibgoimagecolordir = $(toolexeclibgoimagedir)/color
|
||||
toolexeclibgoimagecolor_DATA = \
|
||||
image/color/palette.gox
|
||||
|
||||
toolexeclibgoindexdir = $(toolexeclibgodir)/index
|
||||
toolexeclibgoindex_DATA = \
|
||||
index/suffixarray.gox
|
||||
|
@ -865,6 +872,9 @@ go_bytes_c_files = \
|
|||
go_crypto_files = \
|
||||
go/crypto/crypto.go
|
||||
|
||||
go_encoding_files = \
|
||||
go/encoding/encoding.go
|
||||
|
||||
go_errors_files = \
|
||||
go/errors/errors.go
|
||||
|
||||
|
@ -955,7 +965,7 @@ go_mime_files = \
|
|||
# By default use select with pipes. Most systems should have
|
||||
# something better.
|
||||
@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_NETBSD_FALSE@@LIBGO_IS_RTEMS_FALSE@go_net_fd_os_file = go/net/fd_select.go
|
||||
@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_NETBSD_TRUE@@LIBGO_IS_RTEMS_FALSE@go_net_fd_os_file = go/net/fd_bsd.go
|
||||
@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_NETBSD_TRUE@@LIBGO_IS_RTEMS_FALSE@go_net_fd_os_file =
|
||||
@LIBGO_IS_LINUX_TRUE@@LIBGO_IS_RTEMS_FALSE@go_net_fd_os_file =
|
||||
@LIBGO_IS_RTEMS_TRUE@go_net_fd_os_file = go/net/fd_select.go
|
||||
@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_NETBSD_FALSE@@LIBGO_IS_RTEMS_FALSE@go_net_newpollserver_file =
|
||||
|
@ -986,17 +996,19 @@ go_mime_files = \
|
|||
@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_TRUE@go_net_sockoptip_file = go/net/sockoptip_bsd.go go/net/sockoptip_posix.go
|
||||
@LIBGO_IS_IRIX_TRUE@@LIBGO_IS_LINUX_FALSE@go_net_sockoptip_file = go/net/sockoptip_linux.go go/net/sockoptip_posix.go
|
||||
@LIBGO_IS_LINUX_TRUE@go_net_sockoptip_file = go/net/sockoptip_linux.go go/net/sockoptip_posix.go
|
||||
@LIBGO_IS_FREEBSD_FALSE@@LIBGO_IS_LINUX_FALSE@go_net_sendfile_file = go/net/sendfile_stub.go
|
||||
@LIBGO_IS_DRAGONFLY_FALSE@@LIBGO_IS_FREEBSD_FALSE@@LIBGO_IS_LINUX_FALSE@go_net_sendfile_file = go/net/sendfile_stub.go
|
||||
@LIBGO_IS_DRAGONFLY_TRUE@@LIBGO_IS_FREEBSD_FALSE@@LIBGO_IS_LINUX_FALSE@go_net_sendfile_file = go/net/sendfile_dragonfly.go
|
||||
@LIBGO_IS_FREEBSD_TRUE@@LIBGO_IS_LINUX_FALSE@go_net_sendfile_file = go/net/sendfile_freebsd.go
|
||||
@LIBGO_IS_LINUX_TRUE@go_net_sendfile_file = go/net/sendfile_linux.go
|
||||
@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_NETBSD_FALSE@go_net_interface_file = go/net/interface_stub.go
|
||||
@LIBGO_IS_DRAGONFLY_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_NETBSD_FALSE@go_net_interface_file = go/net/interface_stub.go
|
||||
@LIBGO_IS_DRAGONFLY_TRUE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_NETBSD_FALSE@go_net_interface_file = go/net/interface_dragonfly.go
|
||||
@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_NETBSD_TRUE@go_net_interface_file = go/net/interface_netbsd.go
|
||||
@LIBGO_IS_LINUX_TRUE@go_net_interface_file = go/net/interface_linux.go
|
||||
@LIBGO_IS_LINUX_FALSE@go_net_cloexec_file = go/net/sys_cloexec.go
|
||||
@LIBGO_IS_LINUX_TRUE@go_net_cloexec_file = go/net/sock_cloexec.go
|
||||
@LIBGO_IS_DARWIN_FALSE@@LIBGO_IS_LINUX_FALSE@go_net_poll_file = go/net/fd_poll_unix.go
|
||||
@LIBGO_IS_DARWIN_TRUE@@LIBGO_IS_LINUX_FALSE@go_net_poll_file = go/net/fd_poll_runtime.go
|
||||
@LIBGO_IS_LINUX_TRUE@go_net_poll_file = go/net/fd_poll_runtime.go
|
||||
@LIBGO_IS_DARWIN_FALSE@@LIBGO_IS_OPENBSD_FALSE@go_net_tcpsockopt_file = go/net/tcpsockopt_unix.go
|
||||
@LIBGO_IS_DARWIN_TRUE@@LIBGO_IS_OPENBSD_FALSE@go_net_tcpsockopt_file = go/net/tcpsockopt_darwin.go
|
||||
@LIBGO_IS_OPENBSD_TRUE@go_net_tcpsockopt_file = go/net/tcpsockopt_openbsd.go
|
||||
go_net_files = \
|
||||
go/net/cgo_unix.go \
|
||||
$(go_net_cgo_file) \
|
||||
|
@ -1007,6 +1019,7 @@ go_net_files = \
|
|||
go/net/dnsconfig_unix.go \
|
||||
go/net/dnsmsg.go \
|
||||
$(go_net_newpollserver_file) \
|
||||
go/net/fd_mutex.go \
|
||||
go/net/fd_unix.go \
|
||||
$(go_net_fd_os_file) \
|
||||
go/net/file_unix.go \
|
||||
|
@ -1024,18 +1037,21 @@ go_net_files = \
|
|||
go/net/net.go \
|
||||
go/net/parse.go \
|
||||
go/net/pipe.go \
|
||||
$(go_net_poll_file) \
|
||||
go/net/fd_poll_runtime.go \
|
||||
go/net/port.go \
|
||||
go/net/port_unix.go \
|
||||
go/net/race0.go \
|
||||
$(go_net_sendfile_file) \
|
||||
go/net/singleflight.go \
|
||||
go/net/sock_posix.go \
|
||||
go/net/sock_unix.go \
|
||||
$(go_net_sock_file) \
|
||||
go/net/sockopt_posix.go \
|
||||
$(go_net_sockopt_file) \
|
||||
$(go_net_sockoptip_file) \
|
||||
go/net/tcpsock.go \
|
||||
go/net/tcpsock_posix.go \
|
||||
go/net/tcpsockopt_posix.go \
|
||||
$(go_net_tcpsockopt_file) \
|
||||
go/net/udpsock.go \
|
||||
go/net/udpsock_posix.go \
|
||||
go/net/unixsock.go \
|
||||
|
@ -1046,12 +1062,15 @@ go_net_files = \
|
|||
@LIBGO_IS_386_TRUE@@LIBGO_IS_SOLARIS_TRUE@go_os_dir_file = go/os/dir_largefile.go
|
||||
@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_FALSE@go_os_dir_file = go/os/dir_regfile.go
|
||||
@LIBGO_IS_LINUX_TRUE@@LIBGO_IS_SOLARIS_FALSE@go_os_dir_file = go/os/dir_largefile.go
|
||||
@LIBGO_IS_DARWIN_FALSE@go_os_getwd_file =
|
||||
@LIBGO_IS_DARWIN_TRUE@go_os_getwd_file = go/os/getwd_darwin.go
|
||||
@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_RTEMS_FALSE@@LIBGO_IS_SOLARIS_FALSE@go_os_sys_file = go/os/sys_bsd.go
|
||||
@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_RTEMS_TRUE@@LIBGO_IS_SOLARIS_FALSE@go_os_sys_file = go/os/sys_uname.go
|
||||
@LIBGO_IS_IRIX_TRUE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_FALSE@go_os_sys_file = go/os/sys_uname.go
|
||||
@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_TRUE@go_os_sys_file = go/os/sys_uname.go
|
||||
@LIBGO_IS_LINUX_TRUE@go_os_sys_file = go/os/sys_linux.go
|
||||
@LIBGO_IS_DARWIN_FALSE@@LIBGO_IS_FREEBSD_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_NETBSD_FALSE@@LIBGO_IS_OPENBSD_FALSE@@LIBGO_IS_SOLARIS_FALSE@go_os_stat_file = go/os/stat.go
|
||||
@LIBGO_IS_DARWIN_FALSE@@LIBGO_IS_DRAGONFLY_FALSE@@LIBGO_IS_FREEBSD_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_NETBSD_FALSE@@LIBGO_IS_OPENBSD_FALSE@@LIBGO_IS_SOLARIS_FALSE@go_os_stat_file = go/os/stat.go
|
||||
@LIBGO_IS_DARWIN_FALSE@@LIBGO_IS_DRAGONFLY_TRUE@@LIBGO_IS_FREEBSD_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_NETBSD_FALSE@@LIBGO_IS_OPENBSD_FALSE@@LIBGO_IS_SOLARIS_FALSE@go_os_stat_file = go/os/stat_dragonfly.go
|
||||
@LIBGO_IS_DARWIN_FALSE@@LIBGO_IS_FREEBSD_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_NETBSD_TRUE@@LIBGO_IS_OPENBSD_FALSE@@LIBGO_IS_SOLARIS_FALSE@go_os_stat_file = go/os/stat_atimespec.go
|
||||
@LIBGO_IS_DARWIN_FALSE@@LIBGO_IS_FREEBSD_TRUE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_OPENBSD_FALSE@@LIBGO_IS_SOLARIS_FALSE@go_os_stat_file = go/os/stat_atimespec.go
|
||||
@LIBGO_IS_DARWIN_TRUE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_OPENBSD_FALSE@@LIBGO_IS_SOLARIS_FALSE@go_os_stat_file = go/os/stat_atimespec.go
|
||||
|
@ -1066,7 +1085,7 @@ go_os_files = \
|
|||
go/os/doc.go \
|
||||
go/os/env.go \
|
||||
go/os/error.go \
|
||||
go/os/error_posix.go \
|
||||
go/os/error_unix.go \
|
||||
go/os/exec.go \
|
||||
go/os/exec_posix.go \
|
||||
go/os/exec_unix.go \
|
||||
|
@ -1074,6 +1093,7 @@ go_os_files = \
|
|||
go/os/file_posix.go \
|
||||
go/os/file_unix.go \
|
||||
go/os/getwd.go \
|
||||
$(go_os_getwd_file) \
|
||||
go/os/path.go \
|
||||
go/os/path_unix.go \
|
||||
$(go_os_pipe_file) \
|
||||
|
@ -1149,7 +1169,11 @@ go_strings_files = \
|
|||
go/strings/reader.go \
|
||||
go/strings/replace.go \
|
||||
go/strings/search.go \
|
||||
go/strings/strings.go
|
||||
go/strings/strings.go \
|
||||
go/strings/strings_decl.go
|
||||
|
||||
go_strings_c_files = \
|
||||
go/strings/indexbyte.c
|
||||
|
||||
go_sync_files = \
|
||||
go/sync/cond.go \
|
||||
|
@ -1173,6 +1197,7 @@ go_syslog_c_files = \
|
|||
go_testing_files = \
|
||||
go/testing/allocs.go \
|
||||
go/testing/benchmark.go \
|
||||
go/testing/cover.go \
|
||||
go/testing/example.go \
|
||||
go/testing/testing.go
|
||||
|
||||
|
@ -1208,6 +1233,7 @@ go_archive_tar_files = \
|
|||
|
||||
go_archive_zip_files = \
|
||||
go/archive/zip/reader.go \
|
||||
go/archive/zip/register.go \
|
||||
go/archive/zip/struct.go \
|
||||
go/archive/zip/writer.go
|
||||
|
||||
|
@ -1259,6 +1285,7 @@ go_crypto_cipher_files = \
|
|||
go/crypto/cipher/cfb.go \
|
||||
go/crypto/cipher/cipher.go \
|
||||
go/crypto/cipher/ctr.go \
|
||||
go/crypto/cipher/gcm.go \
|
||||
go/crypto/cipher/io.go \
|
||||
go/crypto/cipher/ofb.go
|
||||
|
||||
|
@ -1275,7 +1302,8 @@ go_crypto_ecdsa_files = \
|
|||
|
||||
go_crypto_elliptic_files = \
|
||||
go/crypto/elliptic/elliptic.go \
|
||||
go/crypto/elliptic/p224.go
|
||||
go/crypto/elliptic/p224.go \
|
||||
go/crypto/elliptic/p256.go
|
||||
|
||||
go_crypto_hmac_files = \
|
||||
go/crypto/hmac/hmac.go
|
||||
|
@ -1295,6 +1323,7 @@ go_crypto_rc4_files = \
|
|||
|
||||
go_crypto_rsa_files = \
|
||||
go/crypto/rsa/pkcs1v15.go \
|
||||
go/crypto/rsa/pss.go \
|
||||
go/crypto/rsa/rsa.go
|
||||
|
||||
go_crypto_sha1_files = \
|
||||
|
@ -1509,11 +1538,15 @@ go_image_color_files = \
|
|||
go/image/color/color.go \
|
||||
go/image/color/ycbcr.go
|
||||
|
||||
go_image_color_palette_files = \
|
||||
go/image/color/palette/palette.go
|
||||
|
||||
go_image_draw_files = \
|
||||
go/image/draw/draw.go
|
||||
|
||||
go_image_gif_files = \
|
||||
go/image/gif/reader.go
|
||||
go/image/gif/reader.go \
|
||||
go/image/gif/writer.go
|
||||
|
||||
go_image_jpeg_files = \
|
||||
go/image/jpeg/fdct.go \
|
||||
|
@ -1849,6 +1882,7 @@ libgo_go_objs = \
|
|||
bytes.lo \
|
||||
bytes/index.lo \
|
||||
crypto.lo \
|
||||
encoding.lo \
|
||||
errors.lo \
|
||||
expvar.lo \
|
||||
flag.lo \
|
||||
|
@ -1870,6 +1904,7 @@ libgo_go_objs = \
|
|||
sort.lo \
|
||||
strconv.lo \
|
||||
strings.lo \
|
||||
strings/index.lo \
|
||||
sync.lo \
|
||||
syscall.lo \
|
||||
syscall/errno.lo \
|
||||
|
@ -1946,6 +1981,7 @@ libgo_go_objs = \
|
|||
net/http/httputil.lo \
|
||||
net/http/pprof.lo \
|
||||
image/color.lo \
|
||||
image/color/palette.lo \
|
||||
image/draw.lo \
|
||||
image/gif.lo \
|
||||
image/jpeg.lo \
|
||||
|
@ -3518,6 +3554,26 @@ uninstall-toolexeclibgoimageDATA:
|
|||
test -n "$$files" || exit 0; \
|
||||
echo " ( cd '$(DESTDIR)$(toolexeclibgoimagedir)' && rm -f" $$files ")"; \
|
||||
cd "$(DESTDIR)$(toolexeclibgoimagedir)" && rm -f $$files
|
||||
install-toolexeclibgoimagecolorDATA: $(toolexeclibgoimagecolor_DATA)
|
||||
@$(NORMAL_INSTALL)
|
||||
test -z "$(toolexeclibgoimagecolordir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgoimagecolordir)"
|
||||
@list='$(toolexeclibgoimagecolor_DATA)'; test -n "$(toolexeclibgoimagecolordir)" || list=; \
|
||||
for p in $$list; do \
|
||||
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
|
||||
echo "$$d$$p"; \
|
||||
done | $(am__base_list) | \
|
||||
while read files; do \
|
||||
echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(toolexeclibgoimagecolordir)'"; \
|
||||
$(INSTALL_DATA) $$files "$(DESTDIR)$(toolexeclibgoimagecolordir)" || exit $$?; \
|
||||
done
|
||||
|
||||
uninstall-toolexeclibgoimagecolorDATA:
|
||||
@$(NORMAL_UNINSTALL)
|
||||
@list='$(toolexeclibgoimagecolor_DATA)'; test -n "$(toolexeclibgoimagecolordir)" || list=; \
|
||||
files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
|
||||
test -n "$$files" || exit 0; \
|
||||
echo " ( cd '$(DESTDIR)$(toolexeclibgoimagecolordir)' && rm -f" $$files ")"; \
|
||||
cd "$(DESTDIR)$(toolexeclibgoimagecolordir)" && rm -f $$files
|
||||
install-toolexeclibgoindexDATA: $(toolexeclibgoindex_DATA)
|
||||
@$(NORMAL_INSTALL)
|
||||
test -z "$(toolexeclibgoindexdir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgoindexdir)"
|
||||
|
@ -4019,7 +4075,7 @@ all-am: Makefile $(LIBRARIES) $(LTLIBRARIES) all-multi $(DATA) \
|
|||
config.h
|
||||
installdirs: installdirs-recursive
|
||||
installdirs-am:
|
||||
for dir in "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(toolexeclibgodir)" "$(DESTDIR)$(toolexeclibgoarchivedir)" "$(DESTDIR)$(toolexeclibgocompressdir)" "$(DESTDIR)$(toolexeclibgocontainerdir)" "$(DESTDIR)$(toolexeclibgocryptodir)" "$(DESTDIR)$(toolexeclibgocryptox509dir)" "$(DESTDIR)$(toolexeclibgodatabasedir)" "$(DESTDIR)$(toolexeclibgodatabasesqldir)" "$(DESTDIR)$(toolexeclibgodebugdir)" "$(DESTDIR)$(toolexeclibgoencodingdir)" "$(DESTDIR)$(toolexeclibgoexpdir)" "$(DESTDIR)$(toolexeclibgogodir)" "$(DESTDIR)$(toolexeclibgohashdir)" "$(DESTDIR)$(toolexeclibgohtmldir)" "$(DESTDIR)$(toolexeclibgoimagedir)" "$(DESTDIR)$(toolexeclibgoindexdir)" "$(DESTDIR)$(toolexeclibgoiodir)" "$(DESTDIR)$(toolexeclibgologdir)" "$(DESTDIR)$(toolexeclibgomathdir)" "$(DESTDIR)$(toolexeclibgomimedir)" "$(DESTDIR)$(toolexeclibgonetdir)" "$(DESTDIR)$(toolexeclibgonethttpdir)" "$(DESTDIR)$(toolexeclibgonetrpcdir)" "$(DESTDIR)$(toolexeclibgoolddir)" "$(DESTDIR)$(toolexeclibgoosdir)" "$(DESTDIR)$(toolexeclibgopathdir)" "$(DESTDIR)$(toolexeclibgoregexpdir)" "$(DESTDIR)$(toolexeclibgoruntimedir)" "$(DESTDIR)$(toolexeclibgosyncdir)" "$(DESTDIR)$(toolexeclibgotestingdir)" "$(DESTDIR)$(toolexeclibgotextdir)" "$(DESTDIR)$(toolexeclibgotexttemplatedir)" "$(DESTDIR)$(toolexeclibgounicodedir)"; do \
|
||||
for dir in "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(toolexeclibgodir)" "$(DESTDIR)$(toolexeclibgoarchivedir)" "$(DESTDIR)$(toolexeclibgocompressdir)" "$(DESTDIR)$(toolexeclibgocontainerdir)" "$(DESTDIR)$(toolexeclibgocryptodir)" "$(DESTDIR)$(toolexeclibgocryptox509dir)" "$(DESTDIR)$(toolexeclibgodatabasedir)" "$(DESTDIR)$(toolexeclibgodatabasesqldir)" "$(DESTDIR)$(toolexeclibgodebugdir)" "$(DESTDIR)$(toolexeclibgoencodingdir)" "$(DESTDIR)$(toolexeclibgoexpdir)" "$(DESTDIR)$(toolexeclibgogodir)" "$(DESTDIR)$(toolexeclibgohashdir)" "$(DESTDIR)$(toolexeclibgohtmldir)" "$(DESTDIR)$(toolexeclibgoimagedir)" "$(DESTDIR)$(toolexeclibgoimagecolordir)" "$(DESTDIR)$(toolexeclibgoindexdir)" "$(DESTDIR)$(toolexeclibgoiodir)" "$(DESTDIR)$(toolexeclibgologdir)" "$(DESTDIR)$(toolexeclibgomathdir)" "$(DESTDIR)$(toolexeclibgomimedir)" "$(DESTDIR)$(toolexeclibgonetdir)" "$(DESTDIR)$(toolexeclibgonethttpdir)" "$(DESTDIR)$(toolexeclibgonetrpcdir)" "$(DESTDIR)$(toolexeclibgoolddir)" "$(DESTDIR)$(toolexeclibgoosdir)" "$(DESTDIR)$(toolexeclibgopathdir)" "$(DESTDIR)$(toolexeclibgoregexpdir)" "$(DESTDIR)$(toolexeclibgoruntimedir)" "$(DESTDIR)$(toolexeclibgosyncdir)" "$(DESTDIR)$(toolexeclibgotestingdir)" "$(DESTDIR)$(toolexeclibgotextdir)" "$(DESTDIR)$(toolexeclibgotexttemplatedir)" "$(DESTDIR)$(toolexeclibgounicodedir)"; do \
|
||||
test -z "$$dir" || $(MKDIR_P) "$$dir"; \
|
||||
done
|
||||
install: install-recursive
|
||||
|
@ -4092,6 +4148,7 @@ install-exec-am: install-multi install-toolexeclibLIBRARIES \
|
|||
install-toolexeclibgoencodingDATA install-toolexeclibgoexpDATA \
|
||||
install-toolexeclibgogoDATA install-toolexeclibgohashDATA \
|
||||
install-toolexeclibgohtmlDATA install-toolexeclibgoimageDATA \
|
||||
install-toolexeclibgoimagecolorDATA \
|
||||
install-toolexeclibgoindexDATA install-toolexeclibgoioDATA \
|
||||
install-toolexeclibgologDATA install-toolexeclibgomathDATA \
|
||||
install-toolexeclibgomimeDATA install-toolexeclibgonetDATA \
|
||||
|
@ -4159,6 +4216,7 @@ uninstall-am: uninstall-toolexeclibLIBRARIES \
|
|||
uninstall-toolexeclibgohashDATA \
|
||||
uninstall-toolexeclibgohtmlDATA \
|
||||
uninstall-toolexeclibgoimageDATA \
|
||||
uninstall-toolexeclibgoimagecolorDATA \
|
||||
uninstall-toolexeclibgoindexDATA uninstall-toolexeclibgoioDATA \
|
||||
uninstall-toolexeclibgologDATA uninstall-toolexeclibgomathDATA \
|
||||
uninstall-toolexeclibgomimeDATA uninstall-toolexeclibgonetDATA \
|
||||
|
@ -4203,6 +4261,7 @@ uninstall-am: uninstall-toolexeclibLIBRARIES \
|
|||
install-toolexeclibgoencodingDATA install-toolexeclibgoexpDATA \
|
||||
install-toolexeclibgogoDATA install-toolexeclibgohashDATA \
|
||||
install-toolexeclibgohtmlDATA install-toolexeclibgoimageDATA \
|
||||
install-toolexeclibgoimagecolorDATA \
|
||||
install-toolexeclibgoindexDATA install-toolexeclibgoioDATA \
|
||||
install-toolexeclibgologDATA install-toolexeclibgomathDATA \
|
||||
install-toolexeclibgomimeDATA install-toolexeclibgonetDATA \
|
||||
|
@ -4234,6 +4293,7 @@ uninstall-am: uninstall-toolexeclibLIBRARIES \
|
|||
uninstall-toolexeclibgohashDATA \
|
||||
uninstall-toolexeclibgohtmlDATA \
|
||||
uninstall-toolexeclibgoimageDATA \
|
||||
uninstall-toolexeclibgoimagecolorDATA \
|
||||
uninstall-toolexeclibgoindexDATA uninstall-toolexeclibgoioDATA \
|
||||
uninstall-toolexeclibgologDATA uninstall-toolexeclibgomathDATA \
|
||||
uninstall-toolexeclibgomimeDATA uninstall-toolexeclibgonetDATA \
|
||||
|
@ -4391,6 +4451,15 @@ crypto/check: $(CHECK_DEPS)
|
|||
@$(CHECK)
|
||||
.PHONY: crypto/check
|
||||
|
||||
@go_include@ encoding.lo.dep
|
||||
encoding.lo.dep: $(go_encoding_files)
|
||||
$(BUILDDEPS)
|
||||
encoding.lo: $(go_encoding_files)
|
||||
$(BUILDPACKAGE)
|
||||
encoding/check: $(CHECK_DEPS)
|
||||
@$(CHECK)
|
||||
.PHONY: encoding/check
|
||||
|
||||
@go_include@ errors.lo.dep
|
||||
errors.lo.dep: $(go_errors_files)
|
||||
$(BUILDDEPS)
|
||||
|
@ -4572,6 +4641,9 @@ strings.lo.dep: $(go_strings_files)
|
|||
$(BUILDDEPS)
|
||||
strings.lo: $(go_strings_files)
|
||||
$(BUILDPACKAGE)
|
||||
strings/index.lo: $(go_strings_c_files)
|
||||
@$(MKDIR_P) strings
|
||||
$(LTCOMPILE) -c -o strings/index.lo $(srcdir)/go/strings/indexbyte.c
|
||||
strings/check: $(CHECK_DEPS)
|
||||
@$(CHECK)
|
||||
.PHONY: strings/check
|
||||
|
@ -5179,6 +5251,15 @@ image/color/check: $(CHECK_DEPS)
|
|||
@$(CHECK)
|
||||
.PHONY: image/color/check
|
||||
|
||||
@go_include@ image/color/palette.lo.dep
|
||||
image/color/palette.lo.dep: $(go_image_color_palette_files)
|
||||
$(BUILDDEPS)
|
||||
image/color/palette.lo: $(go_image_color_palette_files)
|
||||
$(BUILDPACKAGE)
|
||||
image/color/palette/check: $(CHECK_DEPS)
|
||||
@$(CHECK)
|
||||
.PHONY: image/color/palette/check
|
||||
|
||||
@go_include@ image/draw.lo.dep
|
||||
image/draw.lo.dep: $(go_image_draw_files)
|
||||
$(BUILDDEPS)
|
||||
|
@ -5586,6 +5667,8 @@ bytes.gox: bytes.lo
|
|||
$(BUILDGOX)
|
||||
crypto.gox: crypto.lo
|
||||
$(BUILDGOX)
|
||||
encoding.gox: encoding.lo
|
||||
$(BUILDGOX)
|
||||
errors.gox: errors.lo
|
||||
$(BUILDGOX)
|
||||
expvar.gox: expvar.lo
|
||||
|
@ -5783,6 +5866,9 @@ image/jpeg.gox: image/jpeg.lo
|
|||
image/png.gox: image/png.lo
|
||||
$(BUILDGOX)
|
||||
|
||||
image/color/palette.gox: image/color/palette.lo
|
||||
$(BUILDGOX)
|
||||
|
||||
index/suffixarray.gox: index/suffixarray.lo
|
||||
$(BUILDGOX)
|
||||
|
||||
|
|
|
@ -147,6 +147,9 @@
|
|||
/* Define to 1 if you have the `mknodat' function. */
|
||||
#undef HAVE_MKNODAT
|
||||
|
||||
/* Define to 1 if you have the <netinet/icmp6.h> header file. */
|
||||
#undef HAVE_NETINET_ICMP6_H
|
||||
|
||||
/* Define to 1 if you have the <netinet/if_ether.h> header file. */
|
||||
#undef HAVE_NETINET_IF_ETHER_H
|
||||
|
||||
|
|
22
libgo/configure
vendored
22
libgo/configure
vendored
|
@ -659,6 +659,8 @@ LIBGO_IS_SOLARIS_FALSE
|
|||
LIBGO_IS_SOLARIS_TRUE
|
||||
LIBGO_IS_RTEMS_FALSE
|
||||
LIBGO_IS_RTEMS_TRUE
|
||||
LIBGO_IS_DRAGONFLY_FALSE
|
||||
LIBGO_IS_DRAGONFLY_TRUE
|
||||
LIBGO_IS_OPENBSD_FALSE
|
||||
LIBGO_IS_OPENBSD_TRUE
|
||||
LIBGO_IS_NETBSD_FALSE
|
||||
|
@ -11111,7 +11113,7 @@ else
|
|||
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
|
||||
lt_status=$lt_dlunknown
|
||||
cat > conftest.$ac_ext <<_LT_EOF
|
||||
#line 11114 "configure"
|
||||
#line 11116 "configure"
|
||||
#include "confdefs.h"
|
||||
|
||||
#if HAVE_DLFCN_H
|
||||
|
@ -11217,7 +11219,7 @@ else
|
|||
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
|
||||
lt_status=$lt_dlunknown
|
||||
cat > conftest.$ac_ext <<_LT_EOF
|
||||
#line 11220 "configure"
|
||||
#line 11222 "configure"
|
||||
#include "confdefs.h"
|
||||
|
||||
#if HAVE_DLFCN_H
|
||||
|
@ -13490,6 +13492,7 @@ is_irix=no
|
|||
is_linux=no
|
||||
is_netbsd=no
|
||||
is_openbsd=no
|
||||
is_dragonfly=no
|
||||
is_rtems=no
|
||||
is_solaris=no
|
||||
GOOS=unknown
|
||||
|
@ -13500,6 +13503,7 @@ case ${host} in
|
|||
*-*-linux*) is_linux=yes; GOOS=linux ;;
|
||||
*-*-netbsd*) is_netbsd=yes; GOOS=netbsd ;;
|
||||
*-*-openbsd*) is_openbsd=yes; GOOS=openbsd ;;
|
||||
*-*-dragonfly*) is_dragonfly=yes; GOOS=dragonfly ;;
|
||||
*-*-rtems*) is_rtems=yes; GOOS=rtems ;;
|
||||
*-*-solaris2*) is_solaris=yes; GOOS=solaris ;;
|
||||
esac
|
||||
|
@ -13551,6 +13555,14 @@ else
|
|||
LIBGO_IS_OPENBSD_FALSE=
|
||||
fi
|
||||
|
||||
if test $is_dragonly = yes; then
|
||||
LIBGO_IS_DRAGONFLY_TRUE=
|
||||
LIBGO_IS_DRAGONFLY_FALSE='#'
|
||||
else
|
||||
LIBGO_IS_DRAGONFLY_TRUE='#'
|
||||
LIBGO_IS_DRAGONFLY_FALSE=
|
||||
fi
|
||||
|
||||
if test $is_rtems = yes; then
|
||||
LIBGO_IS_RTEMS_TRUE=
|
||||
LIBGO_IS_RTEMS_FALSE='#'
|
||||
|
@ -14600,7 +14612,7 @@ no)
|
|||
;;
|
||||
esac
|
||||
|
||||
for ac_header in sys/file.h sys/mman.h syscall.h sys/epoll.h sys/inotify.h sys/ptrace.h sys/syscall.h sys/user.h sys/utsname.h sys/select.h sys/socket.h net/if.h net/if_arp.h net/route.h netpacket/packet.h sys/prctl.h sys/mount.h sys/vfs.h sys/statfs.h sys/timex.h sys/sysinfo.h utime.h linux/ether.h linux/fs.h linux/reboot.h netinet/in_syst.h netinet/ip.h netinet/ip_mroute.h netinet/if_ether.h
|
||||
for ac_header in sys/file.h sys/mman.h syscall.h sys/epoll.h sys/inotify.h sys/ptrace.h sys/syscall.h sys/user.h sys/utsname.h sys/select.h sys/socket.h net/if.h net/if_arp.h net/route.h netpacket/packet.h sys/prctl.h sys/mount.h sys/vfs.h sys/statfs.h sys/timex.h sys/sysinfo.h utime.h linux/ether.h linux/fs.h linux/reboot.h netinet/icmp6.h netinet/in_syst.h netinet/ip.h netinet/ip_mroute.h netinet/if_ether.h
|
||||
do :
|
||||
as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
|
||||
ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default"
|
||||
|
@ -15502,6 +15514,10 @@ if test -z "${LIBGO_IS_OPENBSD_TRUE}" && test -z "${LIBGO_IS_OPENBSD_FALSE}"; th
|
|||
as_fn_error "conditional \"LIBGO_IS_OPENBSD\" was never defined.
|
||||
Usually this means the macro was only invoked conditionally." "$LINENO" 5
|
||||
fi
|
||||
if test -z "${LIBGO_IS_DRAGONFLY_TRUE}" && test -z "${LIBGO_IS_DRAGONFLY_FALSE}"; then
|
||||
as_fn_error "conditional \"LIBGO_IS_DRAGONFLY\" was never defined.
|
||||
Usually this means the macro was only invoked conditionally." "$LINENO" 5
|
||||
fi
|
||||
if test -z "${LIBGO_IS_RTEMS_TRUE}" && test -z "${LIBGO_IS_RTEMS_FALSE}"; then
|
||||
as_fn_error "conditional \"LIBGO_IS_RTEMS\" was never defined.
|
||||
Usually this means the macro was only invoked conditionally." "$LINENO" 5
|
||||
|
|
|
@ -133,6 +133,7 @@ is_irix=no
|
|||
is_linux=no
|
||||
is_netbsd=no
|
||||
is_openbsd=no
|
||||
is_dragonfly=no
|
||||
is_rtems=no
|
||||
is_solaris=no
|
||||
GOOS=unknown
|
||||
|
@ -143,6 +144,7 @@ case ${host} in
|
|||
*-*-linux*) is_linux=yes; GOOS=linux ;;
|
||||
*-*-netbsd*) is_netbsd=yes; GOOS=netbsd ;;
|
||||
*-*-openbsd*) is_openbsd=yes; GOOS=openbsd ;;
|
||||
*-*-dragonfly*) is_dragonfly=yes; GOOS=dragonfly ;;
|
||||
*-*-rtems*) is_rtems=yes; GOOS=rtems ;;
|
||||
*-*-solaris2*) is_solaris=yes; GOOS=solaris ;;
|
||||
esac
|
||||
|
@ -152,6 +154,7 @@ AM_CONDITIONAL(LIBGO_IS_IRIX, test $is_irix = yes)
|
|||
AM_CONDITIONAL(LIBGO_IS_LINUX, test $is_linux = yes)
|
||||
AM_CONDITIONAL(LIBGO_IS_NETBSD, test $is_netbsd = yes)
|
||||
AM_CONDITIONAL(LIBGO_IS_OPENBSD, test $is_openbsd = yes)
|
||||
AM_CONDITIONAL(LIBGO_IS_DRAGONFLY, test $is_dragonly = yes)
|
||||
AM_CONDITIONAL(LIBGO_IS_RTEMS, test $is_rtems = yes)
|
||||
AM_CONDITIONAL(LIBGO_IS_SOLARIS, test $is_solaris = yes)
|
||||
AC_SUBST(GOOS)
|
||||
|
@ -471,7 +474,7 @@ no)
|
|||
;;
|
||||
esac
|
||||
|
||||
AC_CHECK_HEADERS(sys/file.h sys/mman.h syscall.h sys/epoll.h sys/inotify.h sys/ptrace.h sys/syscall.h sys/user.h sys/utsname.h sys/select.h sys/socket.h net/if.h net/if_arp.h net/route.h netpacket/packet.h sys/prctl.h sys/mount.h sys/vfs.h sys/statfs.h sys/timex.h sys/sysinfo.h utime.h linux/ether.h linux/fs.h linux/reboot.h netinet/in_syst.h netinet/ip.h netinet/ip_mroute.h netinet/if_ether.h)
|
||||
AC_CHECK_HEADERS(sys/file.h sys/mman.h syscall.h sys/epoll.h sys/inotify.h sys/ptrace.h sys/syscall.h sys/user.h sys/utsname.h sys/select.h sys/socket.h net/if.h net/if_arp.h net/route.h netpacket/packet.h sys/prctl.h sys/mount.h sys/vfs.h sys/statfs.h sys/timex.h sys/sysinfo.h utime.h linux/ether.h linux/fs.h linux/reboot.h netinet/icmp6.h netinet/in_syst.h netinet/ip.h netinet/ip_mroute.h netinet/if_ether.h)
|
||||
|
||||
AC_CHECK_HEADERS([linux/filter.h linux/if_addr.h linux/if_ether.h linux/if_tun.h linux/netlink.h linux/rtnetlink.h], [], [],
|
||||
[#ifdef HAVE_SYS_SOCKET_H
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
package tar
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
@ -82,9 +83,9 @@ func (fi headerFileInfo) Sys() interface{} { return fi.h }
|
|||
// Name returns the base name of the file.
|
||||
func (fi headerFileInfo) Name() string {
|
||||
if fi.IsDir() {
|
||||
return path.Clean(fi.h.Name)
|
||||
return path.Base(path.Clean(fi.h.Name))
|
||||
}
|
||||
return fi.h.Name
|
||||
return path.Base(fi.h.Name)
|
||||
}
|
||||
|
||||
// Mode returns the permission and mode bits for the headerFileInfo.
|
||||
|
@ -174,9 +175,29 @@ const (
|
|||
c_ISSOCK = 0140000 // Socket
|
||||
)
|
||||
|
||||
// Keywords for the PAX Extended Header
|
||||
const (
|
||||
paxAtime = "atime"
|
||||
paxCharset = "charset"
|
||||
paxComment = "comment"
|
||||
paxCtime = "ctime" // please note that ctime is not a valid pax header.
|
||||
paxGid = "gid"
|
||||
paxGname = "gname"
|
||||
paxLinkpath = "linkpath"
|
||||
paxMtime = "mtime"
|
||||
paxPath = "path"
|
||||
paxSize = "size"
|
||||
paxUid = "uid"
|
||||
paxUname = "uname"
|
||||
paxNone = ""
|
||||
)
|
||||
|
||||
// FileInfoHeader creates a partially-populated Header from fi.
|
||||
// If fi describes a symlink, FileInfoHeader records link as the link target.
|
||||
// If fi describes a directory, a slash is appended to the name.
|
||||
// Because os.FileInfo's Name method returns only the base name of
|
||||
// the file it describes, it may be necessary to modify the Name field
|
||||
// of the returned header to provide the full path name of the file.
|
||||
func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
|
||||
if fi == nil {
|
||||
return nil, errors.New("tar: FileInfo is nil")
|
||||
|
@ -257,3 +278,25 @@ func (sp *slicer) next(n int) (b []byte) {
|
|||
b, *sp = s[0:n], s[n:]
|
||||
return
|
||||
}
|
||||
|
||||
func isASCII(s string) bool {
|
||||
for _, c := range s {
|
||||
if c >= 0x80 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func toASCII(s string) string {
|
||||
if isASCII(s) {
|
||||
return s
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
for _, c := range s {
|
||||
if c < 0x80 {
|
||||
buf.WriteByte(byte(c))
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
|
|
@ -95,45 +95,45 @@ func (tr *Reader) Next() (*Header, error) {
|
|||
func mergePAX(hdr *Header, headers map[string]string) error {
|
||||
for k, v := range headers {
|
||||
switch k {
|
||||
case "path":
|
||||
case paxPath:
|
||||
hdr.Name = v
|
||||
case "linkpath":
|
||||
case paxLinkpath:
|
||||
hdr.Linkname = v
|
||||
case "gname":
|
||||
case paxGname:
|
||||
hdr.Gname = v
|
||||
case "uname":
|
||||
case paxUname:
|
||||
hdr.Uname = v
|
||||
case "uid":
|
||||
case paxUid:
|
||||
uid, err := strconv.ParseInt(v, 10, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Uid = int(uid)
|
||||
case "gid":
|
||||
case paxGid:
|
||||
gid, err := strconv.ParseInt(v, 10, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Gid = int(gid)
|
||||
case "atime":
|
||||
case paxAtime:
|
||||
t, err := parsePAXTime(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.AccessTime = t
|
||||
case "mtime":
|
||||
case paxMtime:
|
||||
t, err := parsePAXTime(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.ModTime = t
|
||||
case "ctime":
|
||||
case paxCtime:
|
||||
t, err := parsePAXTime(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.ChangeTime = t
|
||||
case "size":
|
||||
case paxSize:
|
||||
size, err := strconv.ParseInt(v, 10, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -243,13 +243,15 @@ func (tr *Reader) octal(b []byte) int64 {
|
|||
return x
|
||||
}
|
||||
|
||||
// Removing leading spaces.
|
||||
for len(b) > 0 && b[0] == ' ' {
|
||||
b = b[1:]
|
||||
}
|
||||
// Removing trailing NULs and spaces.
|
||||
for len(b) > 0 && (b[len(b)-1] == ' ' || b[len(b)-1] == '\x00') {
|
||||
b = b[0 : len(b)-1]
|
||||
// Because unused fields are filled with NULs, we need
|
||||
// to skip leading NULs. Fields may also be padded with
|
||||
// spaces or NULs.
|
||||
// So we remove leading and trailing NULs and spaces to
|
||||
// be sure.
|
||||
b = bytes.Trim(b, " \x00")
|
||||
|
||||
if len(b) == 0 {
|
||||
return 0
|
||||
}
|
||||
x, err := strconv.ParseUint(cString(b), 8, 64)
|
||||
if err != nil {
|
||||
|
|
|
@ -142,6 +142,25 @@ var untarTests = []*untarTest{
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
file: "testdata/nil-uid.tar", // golang.org/issue/5290
|
||||
headers: []*Header{
|
||||
{
|
||||
Name: "P1050238.JPG.log",
|
||||
Mode: 0664,
|
||||
Uid: 0,
|
||||
Gid: 0,
|
||||
Size: 14,
|
||||
ModTime: time.Unix(1365454838, 0),
|
||||
Typeflag: TypeReg,
|
||||
Linkname: "",
|
||||
Uname: "eyefi",
|
||||
Gname: "eyefi",
|
||||
Devmajor: 0,
|
||||
Devminor: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestReader(t *testing.T) {
|
||||
|
@ -152,6 +171,7 @@ testLoop:
|
|||
t.Errorf("test %d: Unexpected error: %v", i, err)
|
||||
continue
|
||||
}
|
||||
defer f.Close()
|
||||
tr := NewReader(f)
|
||||
for j, header := range test.headers {
|
||||
hdr, err := tr.Next()
|
||||
|
@ -172,7 +192,6 @@ testLoop:
|
|||
if hdr != nil || err != nil {
|
||||
t.Errorf("test %d: Unexpected entry or error: hdr=%v err=%v", i, hdr, err)
|
||||
}
|
||||
f.Close()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -8,7 +8,9 @@ import (
|
|||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
@ -249,7 +251,14 @@ func TestHeaderRoundTrip(t *testing.T) {
|
|||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
if got, want := h2.Name, g.h.Name; got != want {
|
||||
if strings.Contains(fi.Name(), "/") {
|
||||
t.Errorf("FileInfo of %q contains slash: %q", g.h.Name, fi.Name())
|
||||
}
|
||||
name := path.Base(g.h.Name)
|
||||
if fi.IsDir() {
|
||||
name += "/"
|
||||
}
|
||||
if got, want := h2.Name, name; got != want {
|
||||
t.Errorf("i=%d: Name: got %v, want %v", i, got, want)
|
||||
}
|
||||
if got, want := h2.Size, g.h.Size; got != want {
|
||||
|
|
BIN
libgo/go/archive/tar/testdata/nil-uid.tar
vendored
Normal file
BIN
libgo/go/archive/tar/testdata/nil-uid.tar
vendored
Normal file
Binary file not shown.
|
@ -24,6 +24,7 @@ var (
|
|||
ErrFieldTooLong = errors.New("archive/tar: header field too long")
|
||||
ErrWriteAfterClose = errors.New("archive/tar: write after close")
|
||||
errNameTooLong = errors.New("archive/tar: name too long")
|
||||
errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values")
|
||||
)
|
||||
|
||||
// A Writer provides sequential writing of a tar archive in POSIX.1 format.
|
||||
|
@ -37,6 +38,7 @@ type Writer struct {
|
|||
pad int64 // amount of padding to write after current file entry
|
||||
closed bool
|
||||
usedBinary bool // whether the binary numeric field extension was used
|
||||
preferPax bool // use pax header instead of binary numeric header
|
||||
}
|
||||
|
||||
// NewWriter creates a new Writer writing to w.
|
||||
|
@ -65,16 +67,23 @@ func (tw *Writer) Flush() error {
|
|||
}
|
||||
|
||||
// Write s into b, terminating it with a NUL if there is room.
|
||||
func (tw *Writer) cString(b []byte, s string) {
|
||||
// If the value is too long for the field and allowPax is true add a paxheader record instead
|
||||
func (tw *Writer) cString(b []byte, s string, allowPax bool, paxKeyword string, paxHeaders map[string]string) {
|
||||
needsPaxHeader := allowPax && len(s) > len(b) || !isASCII(s)
|
||||
if needsPaxHeader {
|
||||
paxHeaders[paxKeyword] = s
|
||||
return
|
||||
}
|
||||
if len(s) > len(b) {
|
||||
if tw.err == nil {
|
||||
tw.err = ErrFieldTooLong
|
||||
}
|
||||
return
|
||||
}
|
||||
copy(b, s)
|
||||
if len(s) < len(b) {
|
||||
b[len(s)] = 0
|
||||
ascii := toASCII(s)
|
||||
copy(b, ascii)
|
||||
if len(ascii) < len(b) {
|
||||
b[len(ascii)] = 0
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -85,17 +94,27 @@ func (tw *Writer) octal(b []byte, x int64) {
|
|||
for len(s)+1 < len(b) {
|
||||
s = "0" + s
|
||||
}
|
||||
tw.cString(b, s)
|
||||
tw.cString(b, s, false, paxNone, nil)
|
||||
}
|
||||
|
||||
// Write x into b, either as octal or as binary (GNUtar/star extension).
|
||||
func (tw *Writer) numeric(b []byte, x int64) {
|
||||
// If the value is too long for the field and writingPax is enabled both for the field and the add a paxheader record instead
|
||||
func (tw *Writer) numeric(b []byte, x int64, allowPax bool, paxKeyword string, paxHeaders map[string]string) {
|
||||
// Try octal first.
|
||||
s := strconv.FormatInt(x, 8)
|
||||
if len(s) < len(b) {
|
||||
tw.octal(b, x)
|
||||
return
|
||||
}
|
||||
|
||||
// If it is too long for octal, and pax is preferred, use a pax header
|
||||
if allowPax && tw.preferPax {
|
||||
tw.octal(b, 0)
|
||||
s := strconv.FormatInt(x, 10)
|
||||
paxHeaders[paxKeyword] = s
|
||||
return
|
||||
}
|
||||
|
||||
// Too big: use binary (big-endian).
|
||||
tw.usedBinary = true
|
||||
for i := len(b) - 1; x > 0 && i >= 0; i-- {
|
||||
|
@ -115,6 +134,15 @@ var (
|
|||
// WriteHeader calls Flush if it is not the first header.
|
||||
// Calling after a Close will return ErrWriteAfterClose.
|
||||
func (tw *Writer) WriteHeader(hdr *Header) error {
|
||||
return tw.writeHeader(hdr, true)
|
||||
}
|
||||
|
||||
// WriteHeader writes hdr and prepares to accept the file's contents.
|
||||
// WriteHeader calls Flush if it is not the first header.
|
||||
// Calling after a Close will return ErrWriteAfterClose.
|
||||
// As this method is called internally by writePax header to allow it to
|
||||
// suppress writing the pax header.
|
||||
func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
|
||||
if tw.closed {
|
||||
return ErrWriteAfterClose
|
||||
}
|
||||
|
@ -124,31 +152,21 @@ func (tw *Writer) WriteHeader(hdr *Header) error {
|
|||
if tw.err != nil {
|
||||
return tw.err
|
||||
}
|
||||
// Decide whether or not to use PAX extensions
|
||||
|
||||
// a map to hold pax header records, if any are needed
|
||||
paxHeaders := make(map[string]string)
|
||||
|
||||
// TODO(shanemhansen): we might want to use PAX headers for
|
||||
// subsecond time resolution, but for now let's just capture
|
||||
// the long name/long symlink use case.
|
||||
suffix := hdr.Name
|
||||
prefix := ""
|
||||
if len(hdr.Name) > fileNameSize || len(hdr.Linkname) > fileNameSize {
|
||||
var err error
|
||||
prefix, suffix, err = tw.splitUSTARLongName(hdr.Name)
|
||||
// Either we were unable to pack the long name into ustar format
|
||||
// or the link name is too long; use PAX headers.
|
||||
if err == errNameTooLong || len(hdr.Linkname) > fileNameSize {
|
||||
if err := tw.writePAXHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
tw.nb = int64(hdr.Size)
|
||||
tw.pad = -tw.nb & (blockSize - 1) // blockSize is a power of two
|
||||
// too long fields or non ascii characters
|
||||
|
||||
header := make([]byte, blockSize)
|
||||
s := slicer(header)
|
||||
tw.cString(s.next(fileNameSize), suffix)
|
||||
|
||||
// keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
|
||||
pathHeaderBytes := s.next(fileNameSize)
|
||||
|
||||
tw.cString(pathHeaderBytes, hdr.Name, true, paxPath, paxHeaders)
|
||||
|
||||
// Handle out of range ModTime carefully.
|
||||
var modTime int64
|
||||
|
@ -156,27 +174,55 @@ func (tw *Writer) WriteHeader(hdr *Header) error {
|
|||
modTime = hdr.ModTime.Unix()
|
||||
}
|
||||
|
||||
tw.octal(s.next(8), hdr.Mode) // 100:108
|
||||
tw.numeric(s.next(8), int64(hdr.Uid)) // 108:116
|
||||
tw.numeric(s.next(8), int64(hdr.Gid)) // 116:124
|
||||
tw.numeric(s.next(12), hdr.Size) // 124:136
|
||||
tw.numeric(s.next(12), modTime) // 136:148
|
||||
s.next(8) // chksum (148:156)
|
||||
s.next(1)[0] = hdr.Typeflag // 156:157
|
||||
tw.cString(s.next(100), hdr.Linkname) // linkname (157:257)
|
||||
copy(s.next(8), []byte("ustar\x0000")) // 257:265
|
||||
tw.cString(s.next(32), hdr.Uname) // 265:297
|
||||
tw.cString(s.next(32), hdr.Gname) // 297:329
|
||||
tw.numeric(s.next(8), hdr.Devmajor) // 329:337
|
||||
tw.numeric(s.next(8), hdr.Devminor) // 337:345
|
||||
tw.cString(s.next(155), prefix) // 345:500
|
||||
tw.octal(s.next(8), hdr.Mode) // 100:108
|
||||
tw.numeric(s.next(8), int64(hdr.Uid), true, paxUid, paxHeaders) // 108:116
|
||||
tw.numeric(s.next(8), int64(hdr.Gid), true, paxGid, paxHeaders) // 116:124
|
||||
tw.numeric(s.next(12), hdr.Size, true, paxSize, paxHeaders) // 124:136
|
||||
tw.numeric(s.next(12), modTime, false, paxNone, nil) // 136:148 --- consider using pax for finer granularity
|
||||
s.next(8) // chksum (148:156)
|
||||
s.next(1)[0] = hdr.Typeflag // 156:157
|
||||
|
||||
tw.cString(s.next(100), hdr.Linkname, true, paxLinkpath, paxHeaders)
|
||||
|
||||
copy(s.next(8), []byte("ustar\x0000")) // 257:265
|
||||
tw.cString(s.next(32), hdr.Uname, true, paxUname, paxHeaders) // 265:297
|
||||
tw.cString(s.next(32), hdr.Gname, true, paxGname, paxHeaders) // 297:329
|
||||
tw.numeric(s.next(8), hdr.Devmajor, false, paxNone, nil) // 329:337
|
||||
tw.numeric(s.next(8), hdr.Devminor, false, paxNone, nil) // 337:345
|
||||
|
||||
// keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
|
||||
prefixHeaderBytes := s.next(155)
|
||||
tw.cString(prefixHeaderBytes, "", false, paxNone, nil) // 345:500 prefix
|
||||
|
||||
// Use the GNU magic instead of POSIX magic if we used any GNU extensions.
|
||||
if tw.usedBinary {
|
||||
copy(header[257:265], []byte("ustar \x00"))
|
||||
}
|
||||
// Use the ustar magic if we used ustar long names.
|
||||
if len(prefix) > 0 {
|
||||
copy(header[257:265], []byte("ustar\000"))
|
||||
|
||||
_, paxPathUsed := paxHeaders[paxPath]
|
||||
// try to use a ustar header when only the name is too long
|
||||
if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
|
||||
suffix := hdr.Name
|
||||
prefix := ""
|
||||
if len(hdr.Name) > fileNameSize && isASCII(hdr.Name) {
|
||||
var err error
|
||||
prefix, suffix, err = tw.splitUSTARLongName(hdr.Name)
|
||||
if err == nil {
|
||||
// ok we can use a ustar long name instead of pax, now correct the fields
|
||||
|
||||
// remove the path field from the pax header. this will suppress the pax header
|
||||
delete(paxHeaders, paxPath)
|
||||
|
||||
// update the path fields
|
||||
tw.cString(pathHeaderBytes, suffix, false, paxNone, nil)
|
||||
tw.cString(prefixHeaderBytes, prefix, false, paxNone, nil)
|
||||
|
||||
// Use the ustar magic if we used ustar long names.
|
||||
if len(prefix) > 0 {
|
||||
copy(header[257:265], []byte("ustar\000"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The chksum field is terminated by a NUL and a space.
|
||||
|
@ -190,8 +236,18 @@ func (tw *Writer) WriteHeader(hdr *Header) error {
|
|||
return tw.err
|
||||
}
|
||||
|
||||
_, tw.err = tw.w.Write(header)
|
||||
if len(paxHeaders) > 0 {
|
||||
if !allowPax {
|
||||
return errInvalidHeader
|
||||
}
|
||||
if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
tw.nb = int64(hdr.Size)
|
||||
tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
|
||||
|
||||
_, tw.err = tw.w.Write(header)
|
||||
return tw.err
|
||||
}
|
||||
|
||||
|
@ -207,8 +263,11 @@ func (tw *Writer) splitUSTARLongName(name string) (prefix, suffix string, err er
|
|||
length--
|
||||
}
|
||||
i := strings.LastIndex(name[:length], "/")
|
||||
nlen := length - i - 1
|
||||
if i <= 0 || nlen > fileNameSize || nlen == 0 {
|
||||
// nlen contains the resulting length in the name field.
|
||||
// plen contains the resulting length in the prefix field.
|
||||
nlen := len(name) - i - 1
|
||||
plen := i
|
||||
if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
|
||||
err = errNameTooLong
|
||||
return
|
||||
}
|
||||
|
@ -218,7 +277,7 @@ func (tw *Writer) splitUSTARLongName(name string) (prefix, suffix string, err er
|
|||
|
||||
// writePaxHeader writes an extended pax header to the
|
||||
// archive.
|
||||
func (tw *Writer) writePAXHeader(hdr *Header) error {
|
||||
func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
|
||||
// Prepare extended header
|
||||
ext := new(Header)
|
||||
ext.Typeflag = TypeXHeader
|
||||
|
@ -229,18 +288,23 @@ func (tw *Writer) writePAXHeader(hdr *Header) error {
|
|||
// with the current pid.
|
||||
pid := os.Getpid()
|
||||
dir, file := path.Split(hdr.Name)
|
||||
ext.Name = path.Join(dir,
|
||||
fmt.Sprintf("PaxHeaders.%d", pid), file)[0:100]
|
||||
fullName := path.Join(dir,
|
||||
fmt.Sprintf("PaxHeaders.%d", pid), file)
|
||||
|
||||
ascii := toASCII(fullName)
|
||||
if len(ascii) > 100 {
|
||||
ascii = ascii[:100]
|
||||
}
|
||||
ext.Name = ascii
|
||||
// Construct the body
|
||||
var buf bytes.Buffer
|
||||
if len(hdr.Name) > fileNameSize {
|
||||
fmt.Fprint(&buf, paxHeader("path="+hdr.Name))
|
||||
}
|
||||
if len(hdr.Linkname) > fileNameSize {
|
||||
fmt.Fprint(&buf, paxHeader("linkpath="+hdr.Linkname))
|
||||
|
||||
for k, v := range paxHeaders {
|
||||
fmt.Fprint(&buf, paxHeader(k+"="+v))
|
||||
}
|
||||
|
||||
ext.Size = int64(len(buf.Bytes()))
|
||||
if err := tw.WriteHeader(ext); err != nil {
|
||||
if err := tw.writeHeader(ext, false); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := tw.Write(buf.Bytes()); err != nil {
|
||||
|
|
|
@ -243,15 +243,110 @@ func TestPax(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPaxSymlink(t *testing.T) {
|
||||
// Create an archive with a large linkname
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
hdr.Typeflag = TypeSymlink
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat:1 %v", err)
|
||||
}
|
||||
// Force a PAX long linkname to be written
|
||||
longLinkname := strings.Repeat("1234567890/1234567890", 10)
|
||||
hdr.Linkname = longLinkname
|
||||
|
||||
hdr.Size = 0
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Simple test to make sure PAX extensions are in effect
|
||||
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
|
||||
t.Fatal("Expected at least one PAX header to be written.")
|
||||
}
|
||||
// Test that we can get a long name back out of the archive.
|
||||
reader := NewReader(&buf)
|
||||
hdr, err = reader.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hdr.Linkname != longLinkname {
|
||||
t.Fatal("Couldn't recover long link name")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPaxNonAscii(t *testing.T) {
|
||||
// Create an archive with non ascii. These should trigger a pax header
|
||||
// because pax headers have a defined utf-8 encoding.
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat:1 %v", err)
|
||||
}
|
||||
|
||||
// some sample data
|
||||
chineseFilename := "文件名"
|
||||
chineseGroupname := "組"
|
||||
chineseUsername := "用戶名"
|
||||
|
||||
hdr.Name = chineseFilename
|
||||
hdr.Gname = chineseGroupname
|
||||
hdr.Uname = chineseUsername
|
||||
|
||||
contents := strings.Repeat(" ", int(hdr.Size))
|
||||
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = writer.Write([]byte(contents)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Simple test to make sure PAX extensions are in effect
|
||||
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
|
||||
t.Fatal("Expected at least one PAX header to be written.")
|
||||
}
|
||||
// Test that we can get a long name back out of the archive.
|
||||
reader := NewReader(&buf)
|
||||
hdr, err = reader.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hdr.Name != chineseFilename {
|
||||
t.Fatal("Couldn't recover unicode name")
|
||||
}
|
||||
if hdr.Gname != chineseGroupname {
|
||||
t.Fatal("Couldn't recover unicode group")
|
||||
}
|
||||
if hdr.Uname != chineseUsername {
|
||||
t.Fatal("Couldn't recover unicode user")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPAXHeader(t *testing.T) {
|
||||
medName := strings.Repeat("CD", 50)
|
||||
longName := strings.Repeat("AB", 100)
|
||||
paxTests := [][2]string{
|
||||
{"name=/etc/hosts", "19 name=/etc/hosts\n"},
|
||||
{paxPath + "=/etc/hosts", "19 path=/etc/hosts\n"},
|
||||
{"a=b", "6 a=b\n"}, // Single digit length
|
||||
{"a=names", "11 a=names\n"}, // Test case involving carries
|
||||
{"name=" + longName, fmt.Sprintf("210 name=%s\n", longName)},
|
||||
{"name=" + medName, fmt.Sprintf("110 name=%s\n", medName)}}
|
||||
{paxPath + "=" + longName, fmt.Sprintf("210 path=%s\n", longName)},
|
||||
{paxPath + "=" + medName, fmt.Sprintf("110 path=%s\n", medName)}}
|
||||
|
||||
for _, test := range paxTests {
|
||||
key, expected := test[0], test[1]
|
||||
|
@ -260,3 +355,39 @@ func TestPAXHeader(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUSTARLongName(t *testing.T) {
|
||||
// Create an archive with a path that failed to split with USTAR extension in previous versions.
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
hdr.Typeflag = TypeDir
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat:1 %v", err)
|
||||
}
|
||||
// Force a PAX long name to be written. The name was taken from a practical example
|
||||
// that fails and replaced ever char through numbers to anonymize the sample.
|
||||
longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/"
|
||||
hdr.Name = longName
|
||||
|
||||
hdr.Size = 0
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Test that we can get a long name back out of the archive.
|
||||
reader := NewReader(&buf)
|
||||
hdr, err = reader.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hdr.Name != longName {
|
||||
t.Fatal("Couldn't recover long name")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,13 +6,11 @@ package zip
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"compress/flate"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
|
@ -116,6 +114,19 @@ func (rc *ReadCloser) Close() error {
|
|||
return rc.f.Close()
|
||||
}
|
||||
|
||||
// DataOffset returns the offset of the file's possibly-compressed
|
||||
// data, relative to the beginning of the zip file.
|
||||
//
|
||||
// Most callers should instead use Open, which transparently
|
||||
// decompresses data and verifies checksums.
|
||||
func (f *File) DataOffset() (offset int64, err error) {
|
||||
bodyOffset, err := f.findBodyOffset()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return f.headerOffset + bodyOffset, nil
|
||||
}
|
||||
|
||||
// Open returns a ReadCloser that provides access to the File's contents.
|
||||
// Multiple files may be read concurrently.
|
||||
func (f *File) Open() (rc io.ReadCloser, err error) {
|
||||
|
@ -125,15 +136,12 @@ func (f *File) Open() (rc io.ReadCloser, err error) {
|
|||
}
|
||||
size := int64(f.CompressedSize64)
|
||||
r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, size)
|
||||
switch f.Method {
|
||||
case Store: // (no compression)
|
||||
rc = ioutil.NopCloser(r)
|
||||
case Deflate:
|
||||
rc = flate.NewReader(r)
|
||||
default:
|
||||
dcomp := decompressor(f.Method)
|
||||
if dcomp == nil {
|
||||
err = ErrAlgorithm
|
||||
return
|
||||
}
|
||||
rc = dcomp(r)
|
||||
var desr io.Reader
|
||||
if f.hasDataDescriptor() {
|
||||
desr = io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset+size, dataDescriptorLen)
|
||||
|
@ -184,9 +192,8 @@ func (r *checksumReader) Close() error { return r.rc.Close() }
|
|||
// findBodyOffset does the minimum work to verify the file has a header
|
||||
// and returns the file body offset.
|
||||
func (f *File) findBodyOffset() (int64, error) {
|
||||
r := io.NewSectionReader(f.zipr, f.headerOffset, f.zipsize-f.headerOffset)
|
||||
var buf [fileHeaderLen]byte
|
||||
if _, err := io.ReadFull(r, buf[:]); err != nil {
|
||||
if _, err := f.zipr.ReadAt(buf[:], f.headerOffset); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
b := readBuf(buf[:])
|
||||
|
|
|
@ -276,6 +276,7 @@ func readTestZip(t *testing.T, zt ZipTest) {
|
|||
var rc *ReadCloser
|
||||
rc, err = OpenReader(filepath.Join("testdata", zt.Name))
|
||||
if err == nil {
|
||||
defer rc.Close()
|
||||
z = &rc.Reader
|
||||
}
|
||||
}
|
||||
|
|
71
libgo/go/archive/zip/register.go
Normal file
71
libgo/go/archive/zip/register.go
Normal file
|
@ -0,0 +1,71 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package zip
|
||||
|
||||
import (
|
||||
"compress/flate"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A Compressor returns a compressing writer, writing to the
|
||||
// provided writer. On Close, any pending data should be flushed.
|
||||
type Compressor func(io.Writer) (io.WriteCloser, error)
|
||||
|
||||
// Decompressor is a function that wraps a Reader with a decompressing Reader.
|
||||
// The decompressed ReadCloser is returned to callers who open files from
|
||||
// within the archive. These callers are responsible for closing this reader
|
||||
// when they're finished reading.
|
||||
type Decompressor func(io.Reader) io.ReadCloser
|
||||
|
||||
var (
|
||||
mu sync.RWMutex // guards compressor and decompressor maps
|
||||
|
||||
compressors = map[uint16]Compressor{
|
||||
Store: func(w io.Writer) (io.WriteCloser, error) { return &nopCloser{w}, nil },
|
||||
Deflate: func(w io.Writer) (io.WriteCloser, error) { return flate.NewWriter(w, 5) },
|
||||
}
|
||||
|
||||
decompressors = map[uint16]Decompressor{
|
||||
Store: ioutil.NopCloser,
|
||||
Deflate: flate.NewReader,
|
||||
}
|
||||
)
|
||||
|
||||
// RegisterDecompressor allows custom decompressors for a specified method ID.
|
||||
func RegisterDecompressor(method uint16, d Decompressor) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
if _, ok := decompressors[method]; ok {
|
||||
panic("decompressor already registered")
|
||||
}
|
||||
decompressors[method] = d
|
||||
}
|
||||
|
||||
// RegisterCompressor registers custom compressors for a specified method ID.
|
||||
// The common methods Store and Deflate are built in.
|
||||
func RegisterCompressor(method uint16, comp Compressor) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
if _, ok := compressors[method]; ok {
|
||||
panic("compressor already registered")
|
||||
}
|
||||
compressors[method] = comp
|
||||
}
|
||||
|
||||
func compressor(method uint16) Compressor {
|
||||
mu.RLock()
|
||||
defer mu.RUnlock()
|
||||
return compressors[method]
|
||||
}
|
||||
|
||||
func decompressor(method uint16) Decompressor {
|
||||
mu.RLock()
|
||||
defer mu.RUnlock()
|
||||
return decompressors[method]
|
||||
}
|
|
@ -21,6 +21,7 @@ package zip
|
|||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
@ -99,7 +100,7 @@ type headerFileInfo struct {
|
|||
fh *FileHeader
|
||||
}
|
||||
|
||||
func (fi headerFileInfo) Name() string { return fi.fh.Name }
|
||||
func (fi headerFileInfo) Name() string { return path.Base(fi.fh.Name) }
|
||||
func (fi headerFileInfo) Size() int64 {
|
||||
if fi.fh.UncompressedSize64 > 0 {
|
||||
return int64(fi.fh.UncompressedSize64)
|
||||
|
@ -113,6 +114,9 @@ func (fi headerFileInfo) Sys() interface{} { return fi.fh }
|
|||
|
||||
// FileInfoHeader creates a partially-populated FileHeader from an
|
||||
// os.FileInfo.
|
||||
// Because os.FileInfo's Name method returns only the base name of
|
||||
// the file it describes, it may be necessary to modify the Name field
|
||||
// of the returned header to provide the full path name of the file.
|
||||
func FileInfoHeader(fi os.FileInfo) (*FileHeader, error) {
|
||||
size := fi.Size()
|
||||
fh := &FileHeader{
|
||||
|
|
|
@ -6,7 +6,6 @@ package zip
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"compress/flate"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"hash"
|
||||
|
@ -198,18 +197,15 @@ func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) {
|
|||
compCount: &countWriter{w: w.cw},
|
||||
crc32: crc32.NewIEEE(),
|
||||
}
|
||||
switch fh.Method {
|
||||
case Store:
|
||||
fw.comp = nopCloser{fw.compCount}
|
||||
case Deflate:
|
||||
var err error
|
||||
fw.comp, err = flate.NewWriter(fw.compCount, 5)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
comp := compressor(fh.Method)
|
||||
if comp == nil {
|
||||
return nil, ErrAlgorithm
|
||||
}
|
||||
var err error
|
||||
fw.comp, err = comp(fw.compCount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fw.rawCount = &countWriter{w: fw.comp}
|
||||
|
||||
h := &header{
|
||||
|
|
|
@ -9,22 +9,24 @@ package zip
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestOver65kFiles(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("slow test; skipping")
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
w := NewWriter(buf)
|
||||
const nFiles = (1 << 16) + 42
|
||||
for i := 0; i < nFiles; i++ {
|
||||
_, err := w.Create(fmt.Sprintf("%d.dat", i))
|
||||
_, err := w.CreateHeader(&FileHeader{
|
||||
Name: fmt.Sprintf("%d.dat", i),
|
||||
Method: Store, // avoid Issue 6136 and Issue 6138
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("creating file %d: %v", i, err)
|
||||
}
|
||||
|
@ -105,29 +107,156 @@ func TestFileHeaderRoundTrip64(t *testing.T) {
|
|||
testHeaderRoundTrip(fh, uint32max, fh.UncompressedSize64, t)
|
||||
}
|
||||
|
||||
type repeatedByte struct {
|
||||
off int64
|
||||
b byte
|
||||
n int64
|
||||
}
|
||||
|
||||
// rleBuffer is a run-length-encoded byte buffer.
|
||||
// It's an io.Writer (like a bytes.Buffer) and also an io.ReaderAt,
|
||||
// allowing random-access reads.
|
||||
type rleBuffer struct {
|
||||
buf []repeatedByte
|
||||
}
|
||||
|
||||
func (r *rleBuffer) Size() int64 {
|
||||
if len(r.buf) == 0 {
|
||||
return 0
|
||||
}
|
||||
last := &r.buf[len(r.buf)-1]
|
||||
return last.off + last.n
|
||||
}
|
||||
|
||||
func (r *rleBuffer) Write(p []byte) (n int, err error) {
|
||||
var rp *repeatedByte
|
||||
if len(r.buf) > 0 {
|
||||
rp = &r.buf[len(r.buf)-1]
|
||||
// Fast path, if p is entirely the same byte repeated.
|
||||
if lastByte := rp.b; len(p) > 0 && p[0] == lastByte {
|
||||
all := true
|
||||
for _, b := range p {
|
||||
if b != lastByte {
|
||||
all = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if all {
|
||||
rp.n += int64(len(p))
|
||||
return len(p), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, b := range p {
|
||||
if rp == nil || rp.b != b {
|
||||
r.buf = append(r.buf, repeatedByte{r.Size(), b, 1})
|
||||
rp = &r.buf[len(r.buf)-1]
|
||||
} else {
|
||||
rp.n++
|
||||
}
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (r *rleBuffer) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
if len(p) == 0 {
|
||||
return
|
||||
}
|
||||
skipParts := sort.Search(len(r.buf), func(i int) bool {
|
||||
part := &r.buf[i]
|
||||
return part.off+part.n > off
|
||||
})
|
||||
parts := r.buf[skipParts:]
|
||||
if len(parts) > 0 {
|
||||
skipBytes := off - parts[0].off
|
||||
for len(parts) > 0 {
|
||||
part := parts[0]
|
||||
for i := skipBytes; i < part.n; i++ {
|
||||
if n == len(p) {
|
||||
return
|
||||
}
|
||||
p[n] = part.b
|
||||
n++
|
||||
}
|
||||
parts = parts[1:]
|
||||
skipBytes = 0
|
||||
}
|
||||
}
|
||||
if n != len(p) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Just testing the rleBuffer used in the Zip64 test above. Not used by the zip code.
|
||||
func TestRLEBuffer(t *testing.T) {
|
||||
b := new(rleBuffer)
|
||||
var all []byte
|
||||
writes := []string{"abcdeee", "eeeeeee", "eeeefghaaiii"}
|
||||
for _, w := range writes {
|
||||
b.Write([]byte(w))
|
||||
all = append(all, w...)
|
||||
}
|
||||
if len(b.buf) != 10 {
|
||||
t.Fatalf("len(b.buf) = %d; want 10", len(b.buf))
|
||||
}
|
||||
|
||||
for i := 0; i < len(all); i++ {
|
||||
for j := 0; j < len(all)-i; j++ {
|
||||
buf := make([]byte, j)
|
||||
n, err := b.ReadAt(buf, int64(i))
|
||||
if err != nil || n != len(buf) {
|
||||
t.Errorf("ReadAt(%d, %d) = %d, %v; want %d, nil", i, j, n, err, len(buf))
|
||||
}
|
||||
if !bytes.Equal(buf, all[i:i+j]) {
|
||||
t.Errorf("ReadAt(%d, %d) = %q; want %q", i, j, buf, all[i:i+j])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fakeHash32 is a dummy Hash32 that always returns 0.
|
||||
type fakeHash32 struct {
|
||||
hash.Hash32
|
||||
}
|
||||
|
||||
func (fakeHash32) Write(p []byte) (int, error) { return len(p), nil }
|
||||
func (fakeHash32) Sum32() uint32 { return 0 }
|
||||
|
||||
func TestZip64(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("slow test; skipping")
|
||||
}
|
||||
const size = 1 << 32 // before the "END\n" part
|
||||
testZip64(t, size)
|
||||
}
|
||||
|
||||
func testZip64(t testing.TB, size int64) {
|
||||
const chunkSize = 1024
|
||||
chunks := int(size / chunkSize)
|
||||
// write 2^32 bytes plus "END\n" to a zip file
|
||||
buf := new(bytes.Buffer)
|
||||
buf := new(rleBuffer)
|
||||
w := NewWriter(buf)
|
||||
f, err := w.Create("huge.txt")
|
||||
f, err := w.CreateHeader(&FileHeader{
|
||||
Name: "huge.txt",
|
||||
Method: Store,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
chunk := make([]byte, 1024)
|
||||
f.(*fileWriter).crc32 = fakeHash32{}
|
||||
chunk := make([]byte, chunkSize)
|
||||
for i := range chunk {
|
||||
chunk[i] = '.'
|
||||
}
|
||||
chunk[len(chunk)-1] = '\n'
|
||||
end := []byte("END\n")
|
||||
for i := 0; i < (1<<32)/1024; i++ {
|
||||
for i := 0; i < chunks; i++ {
|
||||
_, err := f.Write(chunk)
|
||||
if err != nil {
|
||||
t.Fatal("write chunk:", err)
|
||||
}
|
||||
}
|
||||
end := []byte("END\n")
|
||||
_, err = f.Write(end)
|
||||
if err != nil {
|
||||
t.Fatal("write end:", err)
|
||||
|
@ -137,7 +266,7 @@ func TestZip64(t *testing.T) {
|
|||
}
|
||||
|
||||
// read back zip file and check that we get to the end of it
|
||||
r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
|
||||
r, err := NewReader(buf, int64(buf.Size()))
|
||||
if err != nil {
|
||||
t.Fatal("reader:", err)
|
||||
}
|
||||
|
@ -146,7 +275,8 @@ func TestZip64(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal("opening:", err)
|
||||
}
|
||||
for i := 0; i < (1<<32)/1024; i++ {
|
||||
rc.(*checksumReader).hash = fakeHash32{}
|
||||
for i := 0; i < chunks; i++ {
|
||||
_, err := io.ReadFull(rc, chunk)
|
||||
if err != nil {
|
||||
t.Fatal("read:", err)
|
||||
|
@ -163,11 +293,13 @@ func TestZip64(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal("closing:", err)
|
||||
}
|
||||
if got, want := f0.UncompressedSize, uint32(uint32max); got != want {
|
||||
t.Errorf("UncompressedSize %d, want %d", got, want)
|
||||
if size == 1<<32 {
|
||||
if got, want := f0.UncompressedSize, uint32(uint32max); got != want {
|
||||
t.Errorf("UncompressedSize %d, want %d", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
if got, want := f0.UncompressedSize64, (1<<32)+uint64(len(end)); got != want {
|
||||
if got, want := f0.UncompressedSize64, uint64(size)+uint64(len(end)); got != want {
|
||||
t.Errorf("UncompressedSize64 %d, want %d", got, want)
|
||||
}
|
||||
}
|
||||
|
@ -253,3 +385,11 @@ func TestZeroLengthHeader(t *testing.T) {
|
|||
}
|
||||
testValidHeader(&h, t)
|
||||
}
|
||||
|
||||
// Just benchmarking how fast the Zip64 test above is. Not related to
|
||||
// our zip performance, since the test above disabled CRC32 and flate.
|
||||
func BenchmarkZip64Test(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testZip64(b, 1<<26)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -51,12 +51,9 @@ func NewReaderSize(rd io.Reader, size int) *Reader {
|
|||
if size < minReadBufferSize {
|
||||
size = minReadBufferSize
|
||||
}
|
||||
return &Reader{
|
||||
buf: make([]byte, size),
|
||||
rd: rd,
|
||||
lastByte: -1,
|
||||
lastRuneSize: -1,
|
||||
}
|
||||
r := new(Reader)
|
||||
r.reset(make([]byte, size), rd)
|
||||
return r
|
||||
}
|
||||
|
||||
// NewReader returns a new Reader whose buffer has the default size.
|
||||
|
@ -64,6 +61,21 @@ func NewReader(rd io.Reader) *Reader {
|
|||
return NewReaderSize(rd, defaultBufSize)
|
||||
}
|
||||
|
||||
// Reset discards any buffered data, resets all state, and switches
|
||||
// the buffered reader to read from r.
|
||||
func (b *Reader) Reset(r io.Reader) {
|
||||
b.reset(b.buf, r)
|
||||
}
|
||||
|
||||
func (b *Reader) reset(buf []byte, r io.Reader) {
|
||||
*b = Reader{
|
||||
buf: buf,
|
||||
rd: r,
|
||||
lastByte: -1,
|
||||
lastRuneSize: -1,
|
||||
}
|
||||
}
|
||||
|
||||
var errNegativeRead = errors.New("bufio: reader returned negative count from Read")
|
||||
|
||||
// fill reads a new chunk into the buffer.
|
||||
|
@ -234,7 +246,7 @@ func (b *Reader) Buffered() int { return b.w - b.r }
|
|||
|
||||
// ReadSlice reads until the first occurrence of delim in the input,
|
||||
// returning a slice pointing at the bytes in the buffer.
|
||||
// The bytes stop being valid at the next read call.
|
||||
// The bytes stop being valid at the next read.
|
||||
// If ReadSlice encounters an error before finding a delimiter,
|
||||
// it returns all the data in the buffer and the error itself (often io.EOF).
|
||||
// ReadSlice fails with error ErrBufferFull if the buffer fills without a delim.
|
||||
|
@ -381,7 +393,8 @@ func (b *Reader) ReadBytes(delim byte) (line []byte, err error) {
|
|||
// For simple uses, a Scanner may be more convenient.
|
||||
func (b *Reader) ReadString(delim byte) (line string, err error) {
|
||||
bytes, err := b.ReadBytes(delim)
|
||||
return string(bytes), err
|
||||
line = string(bytes)
|
||||
return line, err
|
||||
}
|
||||
|
||||
// WriteTo implements io.WriterTo.
|
||||
|
@ -424,6 +437,9 @@ func (b *Reader) writeBuf(w io.Writer) (int64, error) {
|
|||
// Writer implements buffering for an io.Writer object.
|
||||
// If an error occurs writing to a Writer, no more data will be
|
||||
// accepted and all subsequent writes will return the error.
|
||||
// After all data has been written, the client should call the
|
||||
// Flush method to guarantee all data has been forwarded to
|
||||
// the underlying io.Writer.
|
||||
type Writer struct {
|
||||
err error
|
||||
buf []byte
|
||||
|
@ -434,28 +450,41 @@ type Writer struct {
|
|||
// NewWriterSize returns a new Writer whose buffer has at least the specified
|
||||
// size. If the argument io.Writer is already a Writer with large enough
|
||||
// size, it returns the underlying Writer.
|
||||
func NewWriterSize(wr io.Writer, size int) *Writer {
|
||||
func NewWriterSize(w io.Writer, size int) *Writer {
|
||||
// Is it already a Writer?
|
||||
b, ok := wr.(*Writer)
|
||||
b, ok := w.(*Writer)
|
||||
if ok && len(b.buf) >= size {
|
||||
return b
|
||||
}
|
||||
if size <= 0 {
|
||||
size = defaultBufSize
|
||||
}
|
||||
b = new(Writer)
|
||||
b.buf = make([]byte, size)
|
||||
b.wr = wr
|
||||
return b
|
||||
return &Writer{
|
||||
buf: make([]byte, size),
|
||||
wr: w,
|
||||
}
|
||||
}
|
||||
|
||||
// NewWriter returns a new Writer whose buffer has the default size.
|
||||
func NewWriter(wr io.Writer) *Writer {
|
||||
return NewWriterSize(wr, defaultBufSize)
|
||||
func NewWriter(w io.Writer) *Writer {
|
||||
return NewWriterSize(w, defaultBufSize)
|
||||
}
|
||||
|
||||
// Reset discards any unflushed buffered data, clears any error, and
|
||||
// resets b to write its output to w.
|
||||
func (b *Writer) Reset(w io.Writer) {
|
||||
b.err = nil
|
||||
b.n = 0
|
||||
b.wr = w
|
||||
}
|
||||
|
||||
// Flush writes any buffered data to the underlying io.Writer.
|
||||
func (b *Writer) Flush() error {
|
||||
err := b.flush()
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *Writer) flush() error {
|
||||
if b.err != nil {
|
||||
return b.err
|
||||
}
|
||||
|
@ -498,7 +527,7 @@ func (b *Writer) Write(p []byte) (nn int, err error) {
|
|||
} else {
|
||||
n = copy(b.buf[b.n:], p)
|
||||
b.n += n
|
||||
b.Flush()
|
||||
b.flush()
|
||||
}
|
||||
nn += n
|
||||
p = p[n:]
|
||||
|
@ -517,7 +546,7 @@ func (b *Writer) WriteByte(c byte) error {
|
|||
if b.err != nil {
|
||||
return b.err
|
||||
}
|
||||
if b.Available() <= 0 && b.Flush() != nil {
|
||||
if b.Available() <= 0 && b.flush() != nil {
|
||||
return b.err
|
||||
}
|
||||
b.buf[b.n] = c
|
||||
|
@ -540,7 +569,7 @@ func (b *Writer) WriteRune(r rune) (size int, err error) {
|
|||
}
|
||||
n := b.Available()
|
||||
if n < utf8.UTFMax {
|
||||
if b.Flush(); b.err != nil {
|
||||
if b.flush(); b.err != nil {
|
||||
return 0, b.err
|
||||
}
|
||||
n = b.Available()
|
||||
|
@ -565,7 +594,7 @@ func (b *Writer) WriteString(s string) (int, error) {
|
|||
b.n += n
|
||||
nn += n
|
||||
s = s[n:]
|
||||
b.Flush()
|
||||
b.flush()
|
||||
}
|
||||
if b.err != nil {
|
||||
return nn, b.err
|
||||
|
@ -585,23 +614,28 @@ func (b *Writer) ReadFrom(r io.Reader) (n int64, err error) {
|
|||
}
|
||||
var m int
|
||||
for {
|
||||
if b.Available() == 0 {
|
||||
if err1 := b.flush(); err1 != nil {
|
||||
return n, err1
|
||||
}
|
||||
}
|
||||
m, err = r.Read(b.buf[b.n:])
|
||||
if m == 0 {
|
||||
break
|
||||
}
|
||||
b.n += m
|
||||
n += int64(m)
|
||||
if b.Available() == 0 {
|
||||
if err1 := b.Flush(); err1 != nil {
|
||||
return n, err1
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
// If we filled the buffer exactly, flush pre-emptively.
|
||||
if b.Available() == 0 {
|
||||
err = b.flush()
|
||||
} else {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
|
|
@ -847,6 +847,10 @@ func TestWriterReadFrom(t *testing.T) {
|
|||
t.Errorf("ws[%d],rs[%d]: w.ReadFrom(r) = %d, %v, want %d, nil", wi, ri, n, err, len(input))
|
||||
continue
|
||||
}
|
||||
if err := w.Flush(); err != nil {
|
||||
t.Errorf("Flush returned %v", err)
|
||||
continue
|
||||
}
|
||||
if got, want := b.String(), string(input); got != want {
|
||||
t.Errorf("ws[%d], rs[%d]:\ngot %q\nwant %q\n", wi, ri, got, want)
|
||||
}
|
||||
|
@ -1003,6 +1007,56 @@ func TestReaderClearError(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// Test for golang.org/issue/5947
|
||||
func TestWriterReadFromWhileFull(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
w := NewWriterSize(buf, 10)
|
||||
|
||||
// Fill buffer exactly.
|
||||
n, err := w.Write([]byte("0123456789"))
|
||||
if n != 10 || err != nil {
|
||||
t.Fatalf("Write returned (%v, %v), want (10, nil)", n, err)
|
||||
}
|
||||
|
||||
// Use ReadFrom to read in some data.
|
||||
n2, err := w.ReadFrom(strings.NewReader("abcdef"))
|
||||
if n2 != 6 || err != nil {
|
||||
t.Fatalf("ReadFrom returned (%v, %v), want (6, nil)", n, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReaderReset(t *testing.T) {
|
||||
r := NewReader(strings.NewReader("foo foo"))
|
||||
buf := make([]byte, 3)
|
||||
r.Read(buf)
|
||||
if string(buf) != "foo" {
|
||||
t.Errorf("buf = %q; want foo", buf)
|
||||
}
|
||||
r.Reset(strings.NewReader("bar bar"))
|
||||
all, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(all) != "bar bar" {
|
||||
t.Errorf("ReadAll = %q; want bar bar", all)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriterReset(t *testing.T) {
|
||||
var buf1, buf2 bytes.Buffer
|
||||
w := NewWriter(&buf1)
|
||||
w.WriteString("foo")
|
||||
w.Reset(&buf2) // and not flushed
|
||||
w.WriteString("bar")
|
||||
w.Flush()
|
||||
if buf1.String() != "" {
|
||||
t.Errorf("buf1 = %q; want empty", buf1.String())
|
||||
}
|
||||
if buf2.String() != "bar" {
|
||||
t.Errorf("buf2 = %q; want bar", buf2.String())
|
||||
}
|
||||
}
|
||||
|
||||
// An onlyReader only implements io.Reader, no matter what other methods the underlying implementation may have.
|
||||
type onlyReader struct {
|
||||
r io.Reader
|
||||
|
@ -1083,3 +1137,46 @@ func BenchmarkWriterCopyNoReadFrom(b *testing.B) {
|
|||
io.Copy(dst, src)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReaderEmpty(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
str := strings.Repeat("x", 16<<10)
|
||||
for i := 0; i < b.N; i++ {
|
||||
br := NewReader(strings.NewReader(str))
|
||||
n, err := io.Copy(ioutil.Discard, br)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if n != int64(len(str)) {
|
||||
b.Fatal("wrong length")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWriterEmpty(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
str := strings.Repeat("x", 1<<10)
|
||||
bs := []byte(str)
|
||||
for i := 0; i < b.N; i++ {
|
||||
bw := NewWriter(ioutil.Discard)
|
||||
bw.Flush()
|
||||
bw.WriteByte('a')
|
||||
bw.Flush()
|
||||
bw.WriteRune('B')
|
||||
bw.Flush()
|
||||
bw.Write(bs)
|
||||
bw.Flush()
|
||||
bw.WriteString(str)
|
||||
bw.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWriterFlush(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
bw := NewWriter(ioutil.Discard)
|
||||
str := strings.Repeat("x", 50)
|
||||
for i := 0; i < b.N; i++ {
|
||||
bw.WriteString(str)
|
||||
bw.Flush()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,6 +12,14 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
func ExampleWriter() {
|
||||
w := bufio.NewWriter(os.Stdout)
|
||||
fmt.Fprint(w, "Hello, ")
|
||||
fmt.Fprint(w, "world!")
|
||||
w.Flush() // Don't forget to flush!
|
||||
// Output: Hello, world!
|
||||
}
|
||||
|
||||
// The simplest use of a Scanner, to read standard input as a set of lines.
|
||||
func ExampleScanner_lines() {
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
|
|
|
@ -44,8 +44,8 @@ type Scanner struct {
|
|||
// to give. The return values are the number of bytes to advance the input
|
||||
// and the next token to return to the user, plus an error, if any. If the
|
||||
// data does not yet hold a complete token, for instance if it has no newline
|
||||
// while scanning lines, SplitFunc can return (0, nil) to signal the Scanner
|
||||
// to read more data into the slice and try again with a longer slice
|
||||
// while scanning lines, SplitFunc can return (0, nil, nil) to signal the
|
||||
// Scanner to read more data into the slice and try again with a longer slice
|
||||
// starting at the same point in the input.
|
||||
//
|
||||
// If the returned error is non-nil, scanning stops and the error
|
||||
|
@ -287,7 +287,7 @@ func ScanLines(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
|||
return 0, nil, nil
|
||||
}
|
||||
|
||||
// isSpace returns whether the character is a Unicode white space character.
|
||||
// isSpace reports whether the character is a Unicode white space character.
|
||||
// We avoid dependency on the unicode package, but check validity of the implementation
|
||||
// in the tests.
|
||||
func isSpace(r rune) bool {
|
||||
|
|
|
@ -236,6 +236,19 @@ func panic(v interface{})
|
|||
// panicking.
|
||||
func recover() interface{}
|
||||
|
||||
// The print built-in function formats its arguments in an implementation-
|
||||
// specific way and writes the result to standard error.
|
||||
// Print is useful for bootstrapping and debugging; it is not guaranteed
|
||||
// to stay in the language.
|
||||
func print(args ...Type)
|
||||
|
||||
// The println built-in function formats its arguments in an implementation-
|
||||
// specific way and writes the result to standard error.
|
||||
// Spaces are always added between arguments and a newline is appended.
|
||||
// Println is useful for bootstrapping and debugging; it is not guaranteed
|
||||
// to stay in the language.
|
||||
func println(args ...Type)
|
||||
|
||||
// The error built-in interface type is the conventional interface for
|
||||
// representing an error condition, with the nil value representing no error.
|
||||
type error interface {
|
||||
|
|
|
@ -11,32 +11,6 @@ import (
|
|||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Compare returns an integer comparing two byte slices lexicographically.
|
||||
// The result will be 0 if a==b, -1 if a < b, and +1 if a > b.
|
||||
// A nil argument is equivalent to an empty slice.
|
||||
func Compare(a, b []byte) int {
|
||||
m := len(a)
|
||||
if m > len(b) {
|
||||
m = len(b)
|
||||
}
|
||||
for i, ac := range a[0:m] {
|
||||
bc := b[i]
|
||||
switch {
|
||||
case ac > bc:
|
||||
return 1
|
||||
case ac < bc:
|
||||
return -1
|
||||
}
|
||||
}
|
||||
switch {
|
||||
case len(a) < len(b):
|
||||
return -1
|
||||
case len(a) > len(b):
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func equalPortable(a, b []byte) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
|
@ -103,7 +77,7 @@ func Count(s, sep []byte) int {
|
|||
return count
|
||||
}
|
||||
|
||||
// Contains returns whether subslice is within b.
|
||||
// Contains reports whether subslice is within b.
|
||||
func Contains(b, subslice []byte) bool {
|
||||
return Index(b, subslice) != -1
|
||||
}
|
||||
|
@ -401,10 +375,7 @@ func Repeat(b []byte, count int) []byte {
|
|||
nb := make([]byte, len(b)*count)
|
||||
bp := 0
|
||||
for i := 0; i < count; i++ {
|
||||
for j := 0; j < len(b); j++ {
|
||||
nb[bp] = b[j]
|
||||
bp++
|
||||
}
|
||||
bp += copy(nb[bp:], b)
|
||||
}
|
||||
return nb
|
||||
}
|
||||
|
|
|
@ -7,10 +7,18 @@ package bytes
|
|||
//go:noescape
|
||||
|
||||
// IndexByte returns the index of the first instance of c in s, or -1 if c is not present in s.
|
||||
func IndexByte(s []byte, c byte) int // asm_$GOARCH.s
|
||||
func IndexByte(s []byte, c byte) int // ../runtime/asm_$GOARCH.s
|
||||
|
||||
//go:noescape
|
||||
|
||||
// Equal returns a boolean reporting whether a == b.
|
||||
// Equal returns a boolean reporting whether a and b
|
||||
// are the same length and contain the same bytes.
|
||||
// A nil argument is equivalent to an empty slice.
|
||||
func Equal(a, b []byte) bool // asm_arm.s or ../runtime/asm_{386,amd64}.s
|
||||
func Equal(a, b []byte) bool // ../runtime/asm_$GOARCH.s
|
||||
|
||||
//go:noescape
|
||||
|
||||
// Compare returns an integer comparing two byte slices lexicographically.
|
||||
// The result will be 0 if a==b, -1 if a < b, and +1 if a > b.
|
||||
// A nil argument is equivalent to an empty slice.
|
||||
func Compare(a, b []byte) int // ../runtime/noasm_arm.goc or ../runtime/asm_{386,amd64}.s
|
||||
|
|
|
@ -47,7 +47,7 @@ type BinOpTest struct {
|
|||
i int
|
||||
}
|
||||
|
||||
var compareTests = []struct {
|
||||
var equalTests = []struct {
|
||||
a, b []byte
|
||||
i int
|
||||
}{
|
||||
|
@ -73,12 +73,8 @@ var compareTests = []struct {
|
|||
{nil, []byte("a"), -1},
|
||||
}
|
||||
|
||||
func TestCompare(t *testing.T) {
|
||||
func TestEqual(t *testing.T) {
|
||||
for _, tt := range compareTests {
|
||||
cmp := Compare(tt.a, tt.b)
|
||||
if cmp != tt.i {
|
||||
t.Errorf(`Compare(%q, %q) = %v`, tt.a, tt.b, cmp)
|
||||
}
|
||||
eql := Equal(tt.a, tt.b)
|
||||
if eql != (tt.i == 0) {
|
||||
t.Errorf(`Equal(%q, %q) = %v`, tt.a, tt.b, eql)
|
||||
|
@ -90,7 +86,7 @@ func TestCompare(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestEqual(t *testing.T) {
|
||||
func TestEqualExhaustive(t *testing.T) {
|
||||
var size = 128
|
||||
if testing.Short() {
|
||||
size = 32
|
||||
|
@ -147,6 +143,7 @@ var indexTests = []BinOpTest{
|
|||
{"", "a", -1},
|
||||
{"", "foo", -1},
|
||||
{"fo", "foo", -1},
|
||||
{"foo", "baz", -1},
|
||||
{"foo", "foo", 0},
|
||||
{"oofofoofooo", "f", 2},
|
||||
{"oofofoofooo", "foo", 4},
|
||||
|
@ -1086,6 +1083,24 @@ func TestTitle(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
var ToTitleTests = []TitleTest{
|
||||
{"", ""},
|
||||
{"a", "A"},
|
||||
{" aaa aaa aaa ", " AAA AAA AAA "},
|
||||
{" Aaa Aaa Aaa ", " AAA AAA AAA "},
|
||||
{"123a456", "123A456"},
|
||||
{"double-blind", "DOUBLE-BLIND"},
|
||||
{"ÿøû", "ŸØÛ"},
|
||||
}
|
||||
|
||||
func TestToTitle(t *testing.T) {
|
||||
for _, tt := range ToTitleTests {
|
||||
if s := string(ToTitle([]byte(tt.in))); s != tt.out {
|
||||
t.Errorf("ToTitle(%q) = %q, want %q", tt.in, s, tt.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var EqualFoldTests = []struct {
|
||||
s, t string
|
||||
out bool
|
||||
|
@ -1114,6 +1129,37 @@ func TestEqualFold(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestBufferGrowNegative(t *testing.T) {
|
||||
defer func() {
|
||||
if err := recover(); err == nil {
|
||||
t.Fatal("Grow(-1) should have paniced")
|
||||
}
|
||||
}()
|
||||
var b Buffer
|
||||
b.Grow(-1)
|
||||
}
|
||||
|
||||
func TestBufferTruncateNegative(t *testing.T) {
|
||||
defer func() {
|
||||
if err := recover(); err == nil {
|
||||
t.Fatal("Truncate(-1) should have paniced")
|
||||
}
|
||||
}()
|
||||
var b Buffer
|
||||
b.Truncate(-1)
|
||||
}
|
||||
|
||||
func TestBufferTruncateOutOfRange(t *testing.T) {
|
||||
defer func() {
|
||||
if err := recover(); err == nil {
|
||||
t.Fatal("Truncate(20) should have paniced")
|
||||
}
|
||||
}()
|
||||
var b Buffer
|
||||
b.Write(make([]byte, 10))
|
||||
b.Truncate(20)
|
||||
}
|
||||
|
||||
var makeFieldsInput = func() []byte {
|
||||
x := make([]byte, 1<<20)
|
||||
// Input is ~10% space, ~10% 2-byte UTF-8, rest ASCII non-space.
|
||||
|
|
204
libgo/go/bytes/compare_test.go
Normal file
204
libgo/go/bytes/compare_test.go
Normal file
|
@ -0,0 +1,204 @@
|
|||
package bytes_test
|
||||
|
||||
import (
|
||||
. "bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var compareTests = []struct {
|
||||
a, b []byte
|
||||
i int
|
||||
}{
|
||||
{[]byte(""), []byte(""), 0},
|
||||
{[]byte("a"), []byte(""), 1},
|
||||
{[]byte(""), []byte("a"), -1},
|
||||
{[]byte("abc"), []byte("abc"), 0},
|
||||
{[]byte("ab"), []byte("abc"), -1},
|
||||
{[]byte("abc"), []byte("ab"), 1},
|
||||
{[]byte("x"), []byte("ab"), 1},
|
||||
{[]byte("ab"), []byte("x"), -1},
|
||||
{[]byte("x"), []byte("a"), 1},
|
||||
{[]byte("b"), []byte("x"), -1},
|
||||
// test runtime·memeq's chunked implementation
|
||||
{[]byte("abcdefgh"), []byte("abcdefgh"), 0},
|
||||
{[]byte("abcdefghi"), []byte("abcdefghi"), 0},
|
||||
{[]byte("abcdefghi"), []byte("abcdefghj"), -1},
|
||||
// nil tests
|
||||
{nil, nil, 0},
|
||||
{[]byte(""), nil, 0},
|
||||
{nil, []byte(""), 0},
|
||||
{[]byte("a"), nil, 1},
|
||||
{nil, []byte("a"), -1},
|
||||
}
|
||||
|
||||
func TestCompare(t *testing.T) {
|
||||
for _, tt := range compareTests {
|
||||
cmp := Compare(tt.a, tt.b)
|
||||
if cmp != tt.i {
|
||||
t.Errorf(`Compare(%q, %q) = %v`, tt.a, tt.b, cmp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompareIdenticalSlice(t *testing.T) {
|
||||
var b = []byte("Hello Gophers!")
|
||||
if Compare(b, b) != 0 {
|
||||
t.Error("b != b")
|
||||
}
|
||||
if Compare(b, b[:1]) != 1 {
|
||||
t.Error("b > b[:1] failed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompareBytes(t *testing.T) {
|
||||
n := 128
|
||||
a := make([]byte, n+1)
|
||||
b := make([]byte, n+1)
|
||||
for len := 0; len < 128; len++ {
|
||||
// randomish but deterministic data. No 0 or 255.
|
||||
for i := 0; i < len; i++ {
|
||||
a[i] = byte(1 + 31*i%254)
|
||||
b[i] = byte(1 + 31*i%254)
|
||||
}
|
||||
// data past the end is different
|
||||
for i := len; i <= n; i++ {
|
||||
a[i] = 8
|
||||
b[i] = 9
|
||||
}
|
||||
cmp := Compare(a[:len], b[:len])
|
||||
if cmp != 0 {
|
||||
t.Errorf(`CompareIdentical(%d) = %d`, len, cmp)
|
||||
}
|
||||
if len > 0 {
|
||||
cmp = Compare(a[:len-1], b[:len])
|
||||
if cmp != -1 {
|
||||
t.Errorf(`CompareAshorter(%d) = %d`, len, cmp)
|
||||
}
|
||||
cmp = Compare(a[:len], b[:len-1])
|
||||
if cmp != 1 {
|
||||
t.Errorf(`CompareBshorter(%d) = %d`, len, cmp)
|
||||
}
|
||||
}
|
||||
for k := 0; k < len; k++ {
|
||||
b[k] = a[k] - 1
|
||||
cmp = Compare(a[:len], b[:len])
|
||||
if cmp != 1 {
|
||||
t.Errorf(`CompareAbigger(%d,%d) = %d`, len, k, cmp)
|
||||
}
|
||||
b[k] = a[k] + 1
|
||||
cmp = Compare(a[:len], b[:len])
|
||||
if cmp != -1 {
|
||||
t.Errorf(`CompareBbigger(%d,%d) = %d`, len, k, cmp)
|
||||
}
|
||||
b[k] = a[k]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCompareBytesEqual(b *testing.B) {
|
||||
b1 := []byte("Hello Gophers!")
|
||||
b2 := []byte("Hello Gophers!")
|
||||
for i := 0; i < b.N; i++ {
|
||||
if Compare(b1, b2) != 0 {
|
||||
b.Fatal("b1 != b2")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCompareBytesToNil(b *testing.B) {
|
||||
b1 := []byte("Hello Gophers!")
|
||||
var b2 []byte
|
||||
for i := 0; i < b.N; i++ {
|
||||
if Compare(b1, b2) != 1 {
|
||||
b.Fatal("b1 > b2 failed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCompareBytesEmpty(b *testing.B) {
|
||||
b1 := []byte("")
|
||||
b2 := b1
|
||||
for i := 0; i < b.N; i++ {
|
||||
if Compare(b1, b2) != 0 {
|
||||
b.Fatal("b1 != b2")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCompareBytesIdentical(b *testing.B) {
|
||||
b1 := []byte("Hello Gophers!")
|
||||
b2 := b1
|
||||
for i := 0; i < b.N; i++ {
|
||||
if Compare(b1, b2) != 0 {
|
||||
b.Fatal("b1 != b2")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCompareBytesSameLength(b *testing.B) {
|
||||
b1 := []byte("Hello Gophers!")
|
||||
b2 := []byte("Hello, Gophers")
|
||||
for i := 0; i < b.N; i++ {
|
||||
if Compare(b1, b2) != -1 {
|
||||
b.Fatal("b1 < b2 failed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCompareBytesDifferentLength(b *testing.B) {
|
||||
b1 := []byte("Hello Gophers!")
|
||||
b2 := []byte("Hello, Gophers!")
|
||||
for i := 0; i < b.N; i++ {
|
||||
if Compare(b1, b2) != -1 {
|
||||
b.Fatal("b1 < b2 failed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCompareBytesBigUnaligned(b *testing.B) {
|
||||
b.StopTimer()
|
||||
b1 := make([]byte, 0, 1<<20)
|
||||
for len(b1) < 1<<20 {
|
||||
b1 = append(b1, "Hello Gophers!"...)
|
||||
}
|
||||
b2 := append([]byte("hello"), b1...)
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if Compare(b1, b2[len("hello"):]) != 0 {
|
||||
b.Fatal("b1 != b2")
|
||||
}
|
||||
}
|
||||
b.SetBytes(int64(len(b1)))
|
||||
}
|
||||
|
||||
func BenchmarkCompareBytesBig(b *testing.B) {
|
||||
b.StopTimer()
|
||||
b1 := make([]byte, 0, 1<<20)
|
||||
for len(b1) < 1<<20 {
|
||||
b1 = append(b1, "Hello Gophers!"...)
|
||||
}
|
||||
b2 := append([]byte{}, b1...)
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if Compare(b1, b2) != 0 {
|
||||
b.Fatal("b1 != b2")
|
||||
}
|
||||
}
|
||||
b.SetBytes(int64(len(b1)))
|
||||
}
|
||||
|
||||
func BenchmarkCompareBytesBigIdentical(b *testing.B) {
|
||||
b.StopTimer()
|
||||
b1 := make([]byte, 0, 1<<20)
|
||||
for len(b1) < 1<<20 {
|
||||
b1 = append(b1, "Hello Gophers!"...)
|
||||
}
|
||||
b2 := b1
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if Compare(b1, b2) != 0 {
|
||||
b.Fatal("b1 != b2")
|
||||
}
|
||||
}
|
||||
b.SetBytes(int64(len(b1)))
|
||||
}
|
|
@ -41,3 +41,33 @@ Equal (struct __go_open_array a, struct __go_open_array b)
|
|||
return 0;
|
||||
return __builtin_memcmp (a.__values, b.__values, a.__count) == 0;
|
||||
}
|
||||
|
||||
intgo Compare (struct __go_open_array a, struct __go_open_array b)
|
||||
__asm__ (GOSYM_PREFIX "bytes.Compare")
|
||||
__attribute__ ((no_split_stack));
|
||||
|
||||
intgo
|
||||
Compare (struct __go_open_array a, struct __go_open_array b)
|
||||
{
|
||||
intgo len;
|
||||
|
||||
len = a.__count;
|
||||
if (len > b.__count)
|
||||
len = b.__count;
|
||||
if (len > 0)
|
||||
{
|
||||
intgo ret;
|
||||
|
||||
ret = __builtin_memcmp (a.__values, b.__values, len);
|
||||
if (ret < 0)
|
||||
return -1;
|
||||
else if (ret > 0)
|
||||
return 1;
|
||||
}
|
||||
if (a.__count < b.__count)
|
||||
return -1;
|
||||
else if (a.__count > b.__count)
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -113,6 +113,41 @@ func TestReaderWriteTo(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestReaderLen(t *testing.T) {
|
||||
const data = "hello world"
|
||||
r := NewReader([]byte(data))
|
||||
if got, want := r.Len(), 11; got != want {
|
||||
t.Errorf("r.Len(): got %d, want %d", got, want)
|
||||
}
|
||||
if n, err := r.Read(make([]byte, 10)); err != nil || n != 10 {
|
||||
t.Errorf("Read failed: read %d %v", n, err)
|
||||
}
|
||||
if got, want := r.Len(), 1; got != want {
|
||||
t.Errorf("r.Len(): got %d, want %d", got, want)
|
||||
}
|
||||
if n, err := r.Read(make([]byte, 1)); err != nil || n != 1 {
|
||||
t.Errorf("Read failed: read %d %v", n, err)
|
||||
}
|
||||
if got, want := r.Len(), 0; got != want {
|
||||
t.Errorf("r.Len(): got %d, want %d", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReaderDoubleUnreadRune(t *testing.T) {
|
||||
buf := NewBuffer([]byte("groucho"))
|
||||
if _, _, err := buf.ReadRune(); err != nil {
|
||||
// should not happen
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := buf.UnreadByte(); err != nil {
|
||||
// should not happen
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := buf.UnreadByte(); err == nil {
|
||||
t.Fatal("UnreadByte: expected error, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
// verify that copying from an empty reader always has the same results,
|
||||
// regardless of the presence of a WriteTo method.
|
||||
func TestReaderCopyNothing(t *testing.T) {
|
||||
|
|
|
@ -77,6 +77,14 @@ func (br *bitReader) ReadBit() bool {
|
|||
return n != 0
|
||||
}
|
||||
|
||||
func (br *bitReader) TryReadBit() (bit byte, ok bool) {
|
||||
if br.bits > 0 {
|
||||
br.bits--
|
||||
return byte(br.n>>br.bits) & 1, true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
func (br *bitReader) Err() error {
|
||||
return br.err
|
||||
}
|
||||
|
|
|
@ -22,14 +22,17 @@ func (s StructuralError) Error() string {
|
|||
|
||||
// A reader decompresses bzip2 compressed data.
|
||||
type reader struct {
|
||||
br bitReader
|
||||
setupDone bool // true if we have parsed the bzip2 header.
|
||||
blockSize int // blockSize in bytes, i.e. 900 * 1024.
|
||||
eof bool
|
||||
buf []byte // stores Burrows-Wheeler transformed data.
|
||||
c [256]uint // the `C' array for the inverse BWT.
|
||||
tt []uint32 // mirrors the `tt' array in the bzip2 source and contains the P array in the upper 24 bits.
|
||||
tPos uint32 // Index of the next output byte in tt.
|
||||
br bitReader
|
||||
fileCRC uint32
|
||||
blockCRC uint32
|
||||
wantBlockCRC uint32
|
||||
setupDone bool // true if we have parsed the bzip2 header.
|
||||
blockSize int // blockSize in bytes, i.e. 900 * 1024.
|
||||
eof bool
|
||||
buf []byte // stores Burrows-Wheeler transformed data.
|
||||
c [256]uint // the `C' array for the inverse BWT.
|
||||
tt []uint32 // mirrors the `tt' array in the bzip2 source and contains the P array in the upper 24 bits.
|
||||
tPos uint32 // Index of the next output byte in tt.
|
||||
|
||||
preRLE []uint32 // contains the RLE data still to be processed.
|
||||
preRLEUsed int // number of entries of preRLE used.
|
||||
|
@ -50,12 +53,14 @@ const bzip2BlockMagic = 0x314159265359
|
|||
const bzip2FinalMagic = 0x177245385090
|
||||
|
||||
// setup parses the bzip2 header.
|
||||
func (bz2 *reader) setup() error {
|
||||
func (bz2 *reader) setup(needMagic bool) error {
|
||||
br := &bz2.br
|
||||
|
||||
magic := br.ReadBits(16)
|
||||
if magic != bzip2FileMagic {
|
||||
return StructuralError("bad magic value")
|
||||
if needMagic {
|
||||
magic := br.ReadBits(16)
|
||||
if magic != bzip2FileMagic {
|
||||
return StructuralError("bad magic value")
|
||||
}
|
||||
}
|
||||
|
||||
t := br.ReadBits(8)
|
||||
|
@ -68,8 +73,11 @@ func (bz2 *reader) setup() error {
|
|||
return StructuralError("invalid compression level")
|
||||
}
|
||||
|
||||
bz2.fileCRC = 0
|
||||
bz2.blockSize = 100 * 1024 * (int(level) - '0')
|
||||
bz2.tt = make([]uint32, bz2.blockSize)
|
||||
if bz2.blockSize > len(bz2.tt) {
|
||||
bz2.tt = make([]uint32, bz2.blockSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -79,7 +87,7 @@ func (bz2 *reader) Read(buf []byte) (n int, err error) {
|
|||
}
|
||||
|
||||
if !bz2.setupDone {
|
||||
err = bz2.setup()
|
||||
err = bz2.setup(true)
|
||||
brErr := bz2.br.Err()
|
||||
if brErr != nil {
|
||||
err = brErr
|
||||
|
@ -98,14 +106,14 @@ func (bz2 *reader) Read(buf []byte) (n int, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
func (bz2 *reader) read(buf []byte) (n int, err error) {
|
||||
func (bz2 *reader) readFromBlock(buf []byte) int {
|
||||
// bzip2 is a block based compressor, except that it has a run-length
|
||||
// preprocessing step. The block based nature means that we can
|
||||
// preallocate fixed-size buffers and reuse them. However, the RLE
|
||||
// preprocessing would require allocating huge buffers to store the
|
||||
// maximum expansion. Thus we process blocks all at once, except for
|
||||
// the RLE which we decompress as required.
|
||||
|
||||
n := 0
|
||||
for (bz2.repeats > 0 || bz2.preRLEUsed < len(bz2.preRLE)) && n < len(buf) {
|
||||
// We have RLE data pending.
|
||||
|
||||
|
@ -148,34 +156,87 @@ func (bz2 *reader) read(buf []byte) (n int, err error) {
|
|||
n++
|
||||
}
|
||||
|
||||
if n > 0 {
|
||||
return
|
||||
return n
|
||||
}
|
||||
|
||||
func (bz2 *reader) read(buf []byte) (int, error) {
|
||||
for {
|
||||
n := bz2.readFromBlock(buf)
|
||||
if n > 0 {
|
||||
bz2.blockCRC = updateCRC(bz2.blockCRC, buf[:n])
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// End of block. Check CRC.
|
||||
if bz2.blockCRC != bz2.wantBlockCRC {
|
||||
bz2.br.err = StructuralError("block checksum mismatch")
|
||||
return 0, bz2.br.err
|
||||
}
|
||||
|
||||
// Find next block.
|
||||
br := &bz2.br
|
||||
switch br.ReadBits64(48) {
|
||||
default:
|
||||
return 0, StructuralError("bad magic value found")
|
||||
|
||||
case bzip2BlockMagic:
|
||||
// Start of block.
|
||||
err := bz2.readBlock()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
case bzip2FinalMagic:
|
||||
// Check end-of-file CRC.
|
||||
wantFileCRC := uint32(br.ReadBits64(32))
|
||||
if br.err != nil {
|
||||
return 0, br.err
|
||||
}
|
||||
if bz2.fileCRC != wantFileCRC {
|
||||
br.err = StructuralError("file checksum mismatch")
|
||||
return 0, br.err
|
||||
}
|
||||
|
||||
// Skip ahead to byte boundary.
|
||||
// Is there a file concatenated to this one?
|
||||
// It would start with BZ.
|
||||
if br.bits%8 != 0 {
|
||||
br.ReadBits(br.bits % 8)
|
||||
}
|
||||
b, err := br.r.ReadByte()
|
||||
if err == io.EOF {
|
||||
br.err = io.EOF
|
||||
bz2.eof = true
|
||||
return 0, io.EOF
|
||||
}
|
||||
if err != nil {
|
||||
br.err = err
|
||||
return 0, err
|
||||
}
|
||||
z, err := br.r.ReadByte()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
br.err = err
|
||||
return 0, err
|
||||
}
|
||||
if b != 'B' || z != 'Z' {
|
||||
return 0, StructuralError("bad magic value in continuation file")
|
||||
}
|
||||
if err := bz2.setup(false); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// No RLE data is pending so we need to read a block.
|
||||
|
||||
br := &bz2.br
|
||||
magic := br.ReadBits64(48)
|
||||
if magic == bzip2FinalMagic {
|
||||
br.ReadBits64(32) // ignored CRC
|
||||
bz2.eof = true
|
||||
return 0, io.EOF
|
||||
} else if magic != bzip2BlockMagic {
|
||||
return 0, StructuralError("bad magic value found")
|
||||
}
|
||||
|
||||
err = bz2.readBlock()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return bz2.read(buf)
|
||||
}
|
||||
|
||||
// readBlock reads a bzip2 block. The magic number should already have been consumed.
|
||||
func (bz2 *reader) readBlock() (err error) {
|
||||
br := &bz2.br
|
||||
br.ReadBits64(32) // skip checksum. TODO: check it if we can figure out what it is.
|
||||
bz2.wantBlockCRC = uint32(br.ReadBits64(32)) // skip checksum. TODO: check it if we can figure out what it is.
|
||||
bz2.blockCRC = 0
|
||||
bz2.fileCRC = (bz2.fileCRC<<1 | bz2.fileCRC>>31) ^ bz2.wantBlockCRC
|
||||
randomized := br.ReadBits(1)
|
||||
if randomized != 0 {
|
||||
return StructuralError("deprecated randomized files")
|
||||
|
@ -316,6 +377,9 @@ func (bz2 *reader) readBlock() (err error) {
|
|||
if repeat > 0 {
|
||||
// We have decoded a complete run-length so we need to
|
||||
// replicate the last output symbol.
|
||||
if repeat > bz2.blockSize-bufIndex {
|
||||
return StructuralError("repeats past end of block")
|
||||
}
|
||||
for i := 0; i < repeat; i++ {
|
||||
b := byte(mtf.First())
|
||||
bz2.tt[bufIndex] = uint32(b)
|
||||
|
@ -339,6 +403,9 @@ func (bz2 *reader) readBlock() (err error) {
|
|||
// doesn't need to be encoded and we have |v-1| in the next
|
||||
// line.
|
||||
b := byte(mtf.Decode(int(v - 1)))
|
||||
if bufIndex >= bz2.blockSize {
|
||||
return StructuralError("data exceeds block size")
|
||||
}
|
||||
bz2.tt[bufIndex] = uint32(b)
|
||||
bz2.c[b]++
|
||||
bufIndex++
|
||||
|
@ -385,3 +452,33 @@ func inverseBWT(tt []uint32, origPtr uint, c []uint) uint32 {
|
|||
|
||||
return tt[origPtr] >> 8
|
||||
}
|
||||
|
||||
// This is a standard CRC32 like in hash/crc32 except that all the shifts are reversed,
|
||||
// causing the bits in the input to be processed in the reverse of the usual order.
|
||||
|
||||
var crctab [256]uint32
|
||||
|
||||
func init() {
|
||||
const poly = 0x04C11DB7
|
||||
for i := range crctab {
|
||||
crc := uint32(i) << 24
|
||||
for j := 0; j < 8; j++ {
|
||||
if crc&0x80000000 != 0 {
|
||||
crc = (crc << 1) ^ poly
|
||||
} else {
|
||||
crc <<= 1
|
||||
}
|
||||
}
|
||||
crctab[i] = crc
|
||||
}
|
||||
}
|
||||
|
||||
// updateCRC updates the crc value to incorporate the data in b.
|
||||
// The initial value is 0.
|
||||
func updateCRC(val uint32, b []byte) uint32 {
|
||||
crc := ^val
|
||||
for _, v := range b {
|
||||
crc = crctab[byte(crc>>24)^v] ^ (crc << 8)
|
||||
}
|
||||
return ^crc
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ package bzip2
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
@ -62,6 +63,19 @@ func TestHelloWorldBZ2(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestConcat(t *testing.T) {
|
||||
out, err := decompressHex(helloWorldBZ2Hex + helloWorldBZ2Hex)
|
||||
if err != nil {
|
||||
t.Errorf("error from Read: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
hello2 := bytes.Repeat(helloWorld, 2)
|
||||
if !bytes.Equal(hello2, out) {
|
||||
t.Errorf("got %x, want %x", out, hello2)
|
||||
}
|
||||
}
|
||||
|
||||
func testZeros(t *testing.T, inHex string, n int) {
|
||||
out, err := decompressHex(inHex)
|
||||
if err != nil {
|
||||
|
@ -155,3 +169,195 @@ const rand2Hex = "92d5652616ac444a4a04af1a8a3964aca0450d43d6cf233bd03233f4ba92f8
|
|||
|
||||
const rand3BZ2Hex = "425a68393141592653593be669d00000327ffffffffffffffffffffffffffffffffffff7ffffffffffffffffffffffffffffffc002b3b2b1b6e2bae400004c00132300004c0d268c004c08c0130026001a008683234c0684c34008c230261a04c0260064d07a8d00034000d27a1268c9931a8d327a3427a41faa69ea0da264c1a34219326869b51b49a6469a3268c689fa53269a62794687a9a68f5189994c9e487a8f534fd49a3d34043629e8c93d04da4f4648d30d4f44d3234c4d3023d0840680984d309934c234d3131a000640984f536a6132601300130130c8d00d04d1841ea7a8d31a02609b40023460010c01a34d4c1a0d04d3069306810034d0d0d4c0046130d034d0131a9a64d321804c68003400098344c13000991808c0001a00000000098004d3d4da4604c47a13012140aadf8d673c922c607ef6212a8c0403adea4b28aee578900e653b9cdeb8d11e6b838815f3ebaad5a01c5408d84a332170aff8734d4e06612d3c2889f31925fb89e33561f5100ae89b1f7047102e729373d3667e58d73aaa80fa7be368a1cc2dadd81d81ec8e1b504bd772ca31d03649269b01ceddaca07bf3d4eba24de141be3f86f93601e03714c0f64654671684f9f9528626fd4e1b76753dc0c54b842486b8d59d8ab314e86ca818e7a1f079463cbbd70d9b79b283c7edc419406311022e4be98c2c1374df9cdde2d008ce1d00e5f06ad1024baf555631f70831fc1023034e62be7c4bcb648caf276963ffa20e96bb50377fe1c113da0db4625b50741c35a058edb009c6ee5dbf93b8a6b060eec568180e8db791b82aab96cbf4326ca98361461379425ba8dcc347be670bdba7641883e5526ae3d833f6e9cb9bac9557747c79e206151072f7f0071dff3880411846f66bf4075c7462f302b53cb3400a74cf35652ad5641ed33572fd54e7ed7f85f58a0acba89327e7c6be5c58cb71528b99df2431f1d0358f8d28d81d95292da631fb06701decabb205fac59ff0fb1df536afc681eece6ea658c4d9eaa45f1342aa1ff70bdaff2ddaf25ec88c22f12829a0553db1ec2505554cb17d7b282e213a5a2aa30431ded2bce665bb199d023840832fedb2c0c350a27291407ff77440792872137df281592e82076a05c64c345ffb058c64f7f7c207ef78420b7010520610f17e302cc4dfcfaef72a0ed091aab4b541eb0531bbe941ca2f792bf7b31ca6162882b68054a8470115bc2c19f2df2023f7800432b39b04d3a304e8085ba3f1f0ca5b1ba4d38d339e6084de979cdea6d0e244c6c9fa0366bd890621e3d30846f5e8497e21597b8f29bbf52c961a485dfbea647600da0fc1f25ce4d203a8352ece310c39073525044e7ac46acf2ed9120bae1b4f6f02364abfe343f80b290983160c103557af1c68416480d024cc31b6c06cfec011456f1e95c420a12b48b1c3fe220c2879a982fb099948ac440db844b9a112a5188c7783fd3b19593290785f908d95c9db4b280bafe89c1313aeec24772046d9bc089645f0d182a21184e143823c5f52de50e5d7e98d3d7ab56f5413bbccd1415c9bcff707def475b643fb7f29842582104d4cc1dbaaca8f10a2f44273c339e0984f2b1e06ab2f0771db01fafa8142298345f3196f23e5847bda024034b6f59b11c29e981c881456e40d211929fd4f766200258aad8212016322bd5c605790dcfdf1bd2a93d99c9b8f498722d311d7eae7ff420496a31804c55f4759a7b13aaaf5f7ce006c3a8a998897d5e0a504398c2b627852545baf440798bcc5cc049357cf3f17d9771e4528a1af3d77dc794a11346e1bdf5efe37a405b127b4c43b616d61fbc5dc914e14240ef99a7400"
|
||||
const rand3Hex = "1744b384d68c042371244e13500d4bfb98c6244e3d71a5b700224420b59c593553f33bd786e3d0ce31626f511bc985f59d1a88aa38ba8ad6218d306abee60dd9172540232b95be1af146c69e72e5fde667a090dc3f93bdc5c5af0ab80acdbaa7a505f628c59dc0247b31a439cacf5010a94376d71521df08c178b02fb96fdb1809144ea38c68536187c53201fea8631fb0a880b4451ccdca7cc61f6aafca21cc7449d920599db61789ac3b1e164b3390124f95022aeea39ccca3ec1053f4fa10de2978e2861ea58e477085c2220021a0927aa94c5d0006b5055abba340e4f9eba22e969978dfd18e278a8b89d877328ae34268bc0174cfe211954c0036f078025217d1269fac1932a03b05a0b616012271bbe1fb554171c7a59b196d8a4479f45a77931b5d97aaf6c0c673cbe597b79b96e2a0c1eae2e66e46ccc8c85798e23ffe972ebdaa3f6caea243c004e60321eb47cd79137d78fd0613be606feacc5b3637bdc96a89c13746db8cad886f3ccf912b2178c823bcac395f06d28080269bdca2debf3419c66c690fd1adcfbd53e32e79443d7a42511a84cb22ca94fffad9149275a075b2f8ae0b021dcde9bf62b102db920733b897560518b06e1ad7f4b03458493ddaa7f4fa2c1609f7a1735aeeb1b3e2cea3ab45fc376323cc91873b7e9c90d07c192e38d3f5dfc9bfab1fd821c854da9e607ea596c391c7ec4161c6c4493929a8176badaa5a5af7211c623f29643a937677d3df0da9266181b7c4da5dd40376db677fe8f4a1dc456adf6f33c1e37cec471dd318c2647644fe52f93707a77da7d1702380a80e14cc0fdce7bf2eed48a529090bae0388ee277ce6c7018c5fb00b88362554362205c641f0d0fab94fd5b8357b5ff08b207fee023709bc126ec90cfb17c006754638f8186aaeb1265e80be0c1189ec07d01d5f6f96cb9ce82744147d18490de7dc72862f42f024a16968891a356f5e7e0e695d8c933ba5b5e43ad4c4ade5399bc2cae9bb6189b7870d7f22956194d277f28b10e01c10c6ffe3e065f7e2d6d056aa790db5649ca84dc64c35566c0af1b68c32b5b7874aaa66467afa44f40e9a0846a07ae75360a641dd2acc69d93219b2891f190621511e62a27f5e4fbe641ece1fa234fc7e9a74f48d2a760d82160d9540f649256b169d1fed6fbefdc491126530f3cbad7913e19fbd7aa53b1e243fbf28d5f38c10ebd77c8b986775975cc1d619efb27cdcd733fa1ca36cffe9c0a33cc9f02463c91a886601fd349efee85ef1462065ef9bd2c8f533220ad93138b8382d5938103ab25b2d9af8ae106e1211eb9b18793fba033900c809c02cd6d17e2f3e6fc84dae873411f8e87c3f0a8f1765b7825d185ce3730f299c3028d4a62da9ee95c2b870fb70c79370d485f9d5d9acb78926d20444033d960524d2776dc31988ec7c0dbf23b9905d"
|
||||
|
||||
const (
|
||||
digits = iota
|
||||
twain
|
||||
)
|
||||
|
||||
var testfiles = []string{
|
||||
// Digits is the digits of the irrational number e. Its decimal representation
|
||||
// does not repeat, but there are only 10 posible digits, so it should be
|
||||
// reasonably compressible.
|
||||
digits: "testdata/e.txt.bz2",
|
||||
// Twain is Project Gutenberg's edition of Mark Twain's classic English novel.
|
||||
twain: "testdata/Mark.Twain-Tom.Sawyer.txt.bz2",
|
||||
}
|
||||
|
||||
func benchmarkDecode(b *testing.B, testfile int) {
|
||||
compressed, err := ioutil.ReadFile(testfiles[testfile])
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.SetBytes(int64(len(compressed)))
|
||||
for i := 0; i < b.N; i++ {
|
||||
r := bytes.NewBuffer(compressed)
|
||||
io.Copy(ioutil.Discard, NewReader(r))
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkDecodeDigits(b *testing.B) { benchmarkDecode(b, digits) }
|
||||
func BenchmarkDecodeTwain(b *testing.B) { benchmarkDecode(b, twain) }
|
||||
|
||||
func TestBufferOverrun(t *testing.T) {
|
||||
// Tests https://code.google.com/p/go/issues/detail?id=5747.
|
||||
buffer := bytes.NewBuffer([]byte(bufferOverrunBase64))
|
||||
decoder := base64.NewDecoder(base64.StdEncoding, buffer)
|
||||
decompressor := NewReader(decoder)
|
||||
// This shouldn't panic.
|
||||
ioutil.ReadAll(decompressor)
|
||||
}
|
||||
|
||||
var bufferOverrunBase64 string = `
|
||||
QlpoNTFBWSZTWTzyiGcACMP/////////////////////////////////3/7f3///
|
||||
////4N/fCZODak2Xo44GIHZgkGzDRbFAuwAAKoFV7T6AO6qwA6APb6s2rOoAkAAD
|
||||
oACUoDtndh0iQAPkAAAAaPWihQoCgr5t97Obju21ChQB0NBm3RbA7apXrRoBooAA
|
||||
AhA+IAHWl2Us3O7t9yieb3udvd76+4+fd33nd3HO1bVvfcGRne6+3vfPvfc++995
|
||||
w7k973eJhasLVec970tzDNXdX28LoPXZ3H3K9z0s5ufWAfes49d5594c3dUYtI+2
|
||||
+h1dvtpRa+uvrVEAG9bl893RVEN7cWvroSqWjPMGgAQi7Gq8TJSgKKdjKFBIB9Ae
|
||||
LqWxleu715eXe7ml9e5098Z6G1vr7t1QZ6ot76YzPd3j7333t2ql2Chm7XrA9ICQ
|
||||
VF77z3rVBWqkSXtlfb099hyezAr6USbGpICTSCFAaqHrKo+tUnm32rpE4Ue+t2mj
|
||||
bKUeipEqwc93EdhhTwmQpOhhesC9iqDSPNTWYNSnUtBdm1nsA0nqqNd7OWwDXtFL
|
||||
ONmmA6Ubke26I9UblvWIPR5VOWOnctai443URunnDy77uVC59OfRvezlDu33Z7Ly
|
||||
3NNuuHW63088xu3t3NHZhkZbG7tXRlj00qOtbaXTJUUdspTbABR9R6EUwQAEAAAA
|
||||
EMEwRpoAAAABMmhoAAjBNNAaCMhponpoGpgJpk9TEyp6niGKZkAaAEfqMQ09U80p
|
||||
+pMGSCKngIAAAAgAAg0AAJhGgABGCEaaTyTKeNI1PE0wkj01GajMSNPSZGnqbU9T
|
||||
anlPUNAHqGQ0DQAMg9TamgAAYRU/IAAICAmjQJgjQBMEwp5DTSaaYmhTeqfplPID
|
||||
U1T9TynoU82pT1NPU/VP0j1NHqRpk9TTR7SnqaNNGmmQAaAD1Aeo0PSAAAAaaBiK
|
||||
eBAQBGgIABGQA0AmBNNBoaAgaJmpglPEyYap6npiTT0agGjJjUaaDTQAAAAAAM1A
|
||||
9QAaAAAADU8iEAQAEyAJk0NNNJgIZTJ5E00YSemiaZNGm1MpGNJ+lPU9qm9U2RDM
|
||||
oY0EzJB6h6nqDID1NMBDDRpo1AGNAjCMmhkMgaYSJIgAAAQyAAEyBoATECCNhTT0
|
||||
U/IZAmCM1DSTxkzUE8p6NDaGiZGJqntTFHvUyU9qPQp7Kn5GgKNPU9QAGg9QAAA3
|
||||
wz0Pk/g/m/m9P9H4vxv2+dH3gCS8nhbbbbbYxtgNsBsG0m2MbG0NNtsbYNsaY0wb
|
||||
bBibGmm22mxptNpsaGNDTY02JsG0MY0xg2MaYNNDbGwG0L5vsK/F9DO+EAA447Kq
|
||||
p7Wdf6Y+5c20T7DfHyMXIzRKrZexw72uiQI+y55vOe52xpqbCLC2uR20JdER7Zvr
|
||||
7ufuKb6zhiBxLuj0eA27v8RpMLucw9Ohwcizi2wrpt+yU1FdpM7ZYPcwS3XTef+A
|
||||
Wzjxwhdrgw3aH1LeC1eZW900x8V9Nv4hTPXp4l067P/4ANVZFF/imOe/d5bdueam
|
||||
/DFFokQWnFaU+ZqLBCM+d0PialJQWnLqRQZk/KhfbbYc2pCUTgffcSYbrCM1N+8l
|
||||
HU6gSz+h2GJXs+tbrNviL83M97X0vcTn/F82P8wen8/3/h3sHY+sf9CSej9ThYTV
|
||||
3lQ+FUHpfpGD4kv7dYMV995dpDX/y3xR8FoXx1bjUxBTNxuutwQ/h/Eedn9wpn6w
|
||||
E3+ND8YhN1HSriIxRE/6uFyMv6/oC6Elarw3aHMMqHJkGiiz6tejmvnYLQa+Qm6G
|
||||
deZ7jXTZV6NlpocgDnRdimS06bTYSkvPAL/xoWNLkX6N6VljU0dfKSBmm2uZE/xu
|
||||
sutQ1EdP7GdjhglIq4xlOFUFEQpmX+xx7R8y6c0GSAaqusOjNZwxZRudOvmXm1tZ
|
||||
T+YnbeB2ir9eiHNrtJNSLD/J/WDyuQpwBUtLKo0krccY/wIILP7f86teb9Z/9oyz
|
||||
OX05qEWbObfhpRw+9+rCvp/35ML8KX3aHaI0n+tudbFRsV5FLW+Oa8ruLN4peyVL
|
||||
DWjTHrXNthq/s7zAJYMeFJZkZt5mT9rfpH+5g3nc+piOSZ+J5nHtOnKI7Ff8Xl+j
|
||||
0t76XTNucCHQ6whav1OHdF53TY5wuv5OzvrdnxoId8fTyUvERr0ERINu/8XxZZ5f
|
||||
B5/kTZ8bBO0wv54Jp+ED/GQI8lZHzIQCP3vfQhwnCTj9TvITic7P4mYLDbH3fyzR
|
||||
i+6EajCcpXLWSGf+ZXkOrWspDWDhXtEKas0v3UqWksqgY1rTj45krX4KihN+daXs
|
||||
pZl5WPlta5p06CX6Xm2SfzqkMw12/3ix1bpnnZ+kFeBNX7A+E9zzG6OZaN78GOpl
|
||||
9Ht/eZn9PqWdav852zr0zqkDK2H5IjdvNah+b1YVGdQGzwR4Nw+f13yEKnV+y66W
|
||||
djfq7zWp7m5w+hzfv+Ly8O7oet5Vvd8/wQvO7qzOZ2vjf9X8Tj8PnMb/nc/nKqRR
|
||||
+ml4UEhOOwfCeJEEI109CMYSh91iAJqPjMyH6KjrPD7W25llZVcREYNCTg6htbQt
|
||||
M38wYoquCWP6tdKYlVIv14xTNUeUf4El/FunCf6csZkmv+9tfWx7t59wuKIa3saU
|
||||
tZs9M+3HFOZtz3OLg/Unoaj9BYazYqA78xBU9tZzrtmF/rQL9CGJt90o/oYnSfcS
|
||||
SL3haaw351LXWQ1XOsv1SmH3v6ymuxEpPPnEDmBELaTYsvvMIWJsmPZFFww++Kd7
|
||||
s/Jo0JFeUU7uNtI+gVosAIpVVuWfI/9tOIycz7I5Z7zjV+NR2OuZbYtW5F08KX4o
|
||||
2k/xuJIchcNFPtxPfw9dkDgscRbMckyFMrzuZ3IvrcGzk0J6iI5ytrv37bGpAXMz
|
||||
WK9mMMPebepNevmLjjo/QWoM968Sjv7ldlPS5AinHcXwsFv6dmmh8lJt7UOJWoKu
|
||||
lMD1cB2ksIGpMdv8iuqR42Rn/kn+17BhhUZcwDBaUXVdX6bKW7fxlUYbq+mlqIcf
|
||||
a9v8HF87M9ANbi9bq9onf9TD7nQ6Xf6vZci8TBPX+/GI0He6j31fTVQYW+NsQxvO
|
||||
J8xrx+e58CCLQNjxeIyPt+F+qk/QMiXw+LyxGVkV/XcGQT9X03jSDP6beJ5QG1JW
|
||||
9Q3qLv/YixWI7gPV9Mrhf2oRYTc/9KLFRhkE3SjKOTKuSSBKQ24fI+hEznamH71D
|
||||
66Hwez8/0et7AtTv9zvamv2OD5He6fMV4k+ePl6+qPfO5CdHtK+eCDZL5+4f5yrl
|
||||
gTcRFiq8fXbc5IaI5fbbc1KMM/2T0Mr7+Hwaco6FtXm0fmhCgTZRqY4pKiEIfmaz
|
||||
QwHNOOCrtMJ2VwsyMumt7xsOolGnizRev6lILH43qPcczQM7Gc5zRin80YvFt1Qm
|
||||
h/57Z0auR2h0fuX50MBO4XQ+26y5l6v4j902R66c0j3z2KHstKQ04J/h6LbuNQE4
|
||||
D6cu/lyfK69DxxX8wb8XaQkMUcJdo1LzqUGDAb3Kfn/A3P/JYc99MO9qv67+SxWb
|
||||
wYTyqKdWTd+1KbR/Rcn0Io5zI/QquX7FA1bxfMytjQ/X+l0fh0Pf+Hx97meH4fQL
|
||||
7/T8/sdTm9Tn8nELvedyhydLlPPTScINdXyLIq9wgIJr4fWPbp9ZhFh/56fdSgOG
|
||||
HDXg+gkXsN2Rddr4HQ5P3u+RhLzmSjhzoqY5EsPC4QvRlX9JXjB84rPV5USR66qa
|
||||
/kjw4156GJnzoXtydKJE53t6PHfZWO+3ujsfI6iAdshc7OFzGXiZB9PtItKodhYq
|
||||
nABkTKdcpu4+TOpf9h5piX5slsaBjkeTnj/Ba02ilboQfcDVigxrYn/iTH5ySWUW
|
||||
/lHtg78s5UZM8sErwhNe3N3w+6ZOMnU+5i86/xFNtqZfDdXTGy1H3PzGbdtZXYT+
|
||||
Ixx2vpwBYzbPVYHxKosM5rPiVmcTllI9nuoSfeh9ib4foFWauOpvdmhBDqpTpKTX
|
||||
u8EO2l2Z195G2RIV7TlKSxGWjR5sl/nALu1uzBeLd9zpSujzMTd1uTX9Qk/Q1S+r
|
||||
vaW6bm8qqPO4jb6Wx6XIkm321nrIF6Ae25d1+Dpv/P5G4NoLd2j6/EtENC3FeR5z
|
||||
oo7bA+tI8yEQRhiF0z1FlJXLD5ZbhNNWQm/j/IbzRfh8JtOFZU7ruShLvHXysW9S
|
||||
9V909tr9jn8/E/Hb5N/1NVNHnZu2HIUvJvHJiHd2ucmeI9PWUMnppmE65GQ5E9xV
|
||||
ZRlGEH0X85EvmHyEupkMrCC0oMv9RCq+/H8gcfpe00Hs/S+regT5p58cyYomh93v
|
||||
qvuw/A06BE/wzJESuYbN9pqYpoXqXFemW1NksHEJ2w+PYMJ27WJyD5FpaXB85VaW
|
||||
qMOhDfO8E3QdH8ybyKt/UgI8/tDGpFbyOlaVdIv1FXJhoLp8soAA4Djg6/KZ066N
|
||||
ZFYuS8WdjpSZGP4/Lw+1yaXlzNznc/k2uHe2uXP3uFuPcHx+Dm44utxldoO1uBPy
|
||||
+jzOs14+MIgOjOHMVNqAbMd8fUedLlhJMCfMtm4uz01enLNKcMrtLlPIR37Yukh1
|
||||
YEMXYpm7eU4XU+j+Jj3pDyaXtXs+p1fWfTN/cy9/Oxs4umUXQ4uHh1kObtayDJ56
|
||||
/QMxiHobjHNKuKfMxsrYEwN+QVIyVjAwMDYuMjQ1AAA9IwJniiBLRkZDAAAXt0Ja
|
||||
aDQxQVkmU1lZtwytAACLf///////////////////+//////v//////////bv78//
|
||||
/+AXO133uwO2xB2UxIvbKXrCqCoURUBL2ytFI82AFdcOwMhVTHtk5rD3szEVNYD4
|
||||
aIQINCaMRoTaSn7SbSMJiYmEwieTEp+psqbMCp+VNPaFNpqbBNR7UmanlPUeKfqm
|
||||
j1PU0/VPU08o9Q9EeKHlPJtKbYqeTCYhN6U9T1NH6mp+lPyoGNTI/Knkyg1MggAg
|
||||
CaMEyQnqZoaaRtRtJpppppoDaTR6hpphGh6mmgHpMQBpkGTTEAAaAAAA00AZDag0
|
||||
ADIBkGgABqemiRNTI0k8aU0PRGRoAZlP0UAAAGgAAAyAADQaAAAaAAAAAAAAAAAA
|
||||
AaAAAAM0kgRBJ5MlPFP1Gj0jTTTUaekxNAbUGjTQMgaZANNAAAAaAADTQAAAAAAA
|
||||
ANAA0AAANADQ0QAAAAAAAAAaGgAAAAAAABoA0AAA0AAAAAAAAAAAAANAAAAAkSEI
|
||||
aTRpomp5DUxNNDTJPTKaep6T09Kemmo2JG0aTQ9ENogaaGhkABo0NHqaBoDTI0DC
|
||||
Gj0gNAMhoDQ9QMQNAGQAaDDwyMPIMlbG1vhRBTFo6JksSupgpAjPbY0ec02IGXjb
|
||||
eS+FBsh01+O4ZOaD+srUZCFaT4DRjVDLx7uKIsFtESIDUg1ZkhyCSYov05C00MtR
|
||||
BdNNa/AYPGOQZWcs+VegXOPrkushFbZ3mBoRD6WamClkpBaHZrUhUl02bIfRXX4w
|
||||
b3/9cW9nHDVxh2qFBxqgRKfmq7/Jc/tdJk05nVrGbckGVy2PnIy30CDhpWmqrSot
|
||||
K2bOnX0NbP1iy2cd0Na0ZmbRstm4MzMzbbMySTd35F7f+zPP8DC+NJLYcakkkkRd
|
||||
NZlupJt3OMFoDAD2g+N3FAMCydhIpoRHRQAdFI5nNg4ugEXHCYxkMyGCwtaJmial
|
||||
y0IMlpSYYM/weXNJAhFqS0GNmvaPEtYGjbvaucMdklOTmBX1vfVAkTYB1uXCSK64
|
||||
UNIixOqRKLuRCFtqIQtgwqaFrCkIYbbewErWABa+VGADWsJXJjfx5SJViLuwiGXq
|
||||
Ru6vCuwmU5CJiJz3UiBpmLv0r2wskxUhY4tzPVGQ9RMXJl65eLSNwZVwaSyGZ9Cm
|
||||
A3jztQUUpFeUryBTskW95iVwRMFrhBCwZBAFJBZvhMEMNoDJJlUoIhQkAkjbExp2
|
||||
YZio+ZYeAZUwmH1qUbdQixmxf0+61+aVgJ1hwxsO1yG3hFx4pfjc09ITVht0pG8u
|
||||
FtVFhPa1KE0gTRUSVXywkITucqk0Waz5Fs6qJpVHYdNrbYRFxnFsQGY1qmsTLjK6
|
||||
4QX5Rddo6krM/Bx9CqIAKq4CzVQYHrmIAd2EBhYmwVYwLvhzKIUrc2EirnGIvyuD
|
||||
O4YZDSwsVTA0BpVvUOjDErkCraBoSutcKwUSSLGhVvNYHLz3klgZD++wWsa/swLw
|
||||
gvNDY2De+sncOv8X2lq4HD95ZdwPuTIMXCwSbg4RrIqv+L0y6F17pqDecyQYPEj3
|
||||
iN/0BBeWZlJAyBMi5U3Q1zAlsK8IlDhaXGmvZrgISq5CfNjmUgxDeMggOKqxu4sI
|
||||
OrilS49Lkl1J3u3GjXTuH+rX+4ccyFAQnizCpPClcY77F59j63S6fr5vr+y99tuO
|
||||
7Ox7Wg/ljwhdyaK4xMmXczeJbx7x07htJNtC4xcQfAtvzeznLrN6MN/ILIBOI65I
|
||||
qIA2D5fHHj1XN4aN6TvOjWDaSbSWqxCSCvXUpzkNJAkWXAuTwF8k5uSJvQj/rVo0
|
||||
hAhEMEIYkCRGx9AX+byIuXWlLMbbVeliHNUL5AQYmNwLFu4SkmGD+UWtBMyVHQOQ
|
||||
ss0ggoVKSKOBUgnVS6ljt7WE1qXqJJ4QA1pEwYNLEaguEE1LtPNoVr5WzjbSbWPk
|
||||
V9OW3y9IneUDLoIV5pAkEFTEFGFVjeTFxtpzBBfGgycBxVCdz8eESBIzsamRchAa
|
||||
TQunQH8DHnpfod9QuAuRvc7JBlKUCYmCjMvynLcxIFohxCaYrDvGw4QbXZB7oWQ7
|
||||
hpoGlz23ayDfB8NrRRzdilsEQyQniu9ASLQg7RrGZnoTr1ai12IbCEUCGdFq03P5
|
||||
nBnRFAGmisQGcyykV9gKtcVMWLhCuVmXg86dndn7slUpRNSSEAU20oaWIm1maFTu
|
||||
E0DT4gTbg0nuhjtz3kNOz+i7sBm0bkXjxQWuLqlZEmp60ZTyRZJDUqKSEKg6hqcy
|
||||
ERxdU22CSNOO10RYUUiDVpKhPNdKTOIE1thp02sBNoNTFSht8WJtaBQ09qN3jd5r
|
||||
dOLX4IA5fevRyCCzDgRXfV4wzik4KROjmxmTMglBySlIMEzcXehnDXCRiZSlvwA2
|
||||
0YsIOROcm4UrIRFxJHctJH7OdN5u1aHVHb5UaLHpv48NgmFRE56KTSoaWunqm2st
|
||||
S0mrAdOiqcR12PWVbdVRJKcQ0DQuhwlAPcRtpxN3D4kbXJjToSYJIFw406G2CSaK
|
||||
jQMIJPZGlQmgyFhoCSzeGS1VSq5SKKQQxs5RqKUcVUNY57YUETb4mXzV84SPngKi
|
||||
nsce0mXByZq5BKUA9puHZWLNwQIYuDaJUNgG+E01E3pDYVNLKYQ0hsVesgV5gZY0
|
||||
htDsRdGtm0+iGnkN6+Ea9YJtUZNAkx2GgSoix12nTW0avTUfxR3oYcpvZ7IdtABE
|
||||
UhBcjG4qZtDZsS1JQHys243vhLaDTSvvTeBiJA2tmokqECTBcSOCAGkAxMKlVAva
|
||||
4IsLRaBBqhxDbcGtgdw03mFcLUaFuhtKuuEIEkUleJQwby/zwu9uvvZK4xTV+ECM
|
||||
a8lmzxKmqkBggYK1+xPdbmJclm6tSZhE/OSJtCEjs+unJIQkT9hCWgBJqGMS07Eh
|
||||
AJNmBiuVEVdTyjkIJkavuZmx2sJF13htgEZUCC23lZFOE6gWbM9WyYNJTM8yCQrb
|
||||
0Sx3OQvBML5cRATAQkSQkAJOAhoxpQkNi4ZiEVDbdtJAME0RXNDXGHA3M3Q0mm1o
|
||||
IEwbWpaM1DQCSMbGRCAu3iRIQiT6RlBpT1n3tfwvUXz3gIVlx3mEximY/kZW1kNG
|
||||
sgEJIrBisaEoGYPJ+1CQUYFBw+eGEHJQBpNHjErXUJY2iWHQ30hXwFBuMSxQ2lB5
|
||||
bg+/LX3euG6HsHUB1lFvBvaiaBrITVwkCTa1d0s9CHZCiDZjbWReKyrpPE2oSa7o
|
||||
LPrR4BJvys9ttjUpzETSSMxh8vsr9dXTwKBtK+1xCTGDQmNIaE29HmHdS5GSxpya
|
||||
MismcAUSEgSxHBrKtgsZzduG7vHZn16l3kFkVITtENIzS2JsiBwFTDlhgexsjBHv
|
||||
5HXOYxHBzoSDCcPZ0ctvkY9aS5XpoQuFYkGJgCsqjJZeUMNUEpDSbKcnUc1PifIA
|
||||
CbR2UoXawBlspkEBr9HBfvUi/MUakZVOf1WKYrqSaIXce62JOyhJLq3qJBloTA0F
|
||||
VbILEtM+heFmNRCFt70GJrExVJri0ArYbCRbADSGDBpBXxxb/6fo+s3C7uaL7RjM
|
||||
LV2IQBNrAJrKFeJwTsPnxbAsemirUx2lk1kaxschzdK4TQNJN5wQnolIFg401OZ4
|
||||
2na11LnT3lR+1k1TMJhiAjXMk0F1ooHnYlt9LKfJ3ZIOmeY+2l9bUQHWFNGyEyfj
|
||||
EAcu3kpGLq0Ez7XOS+EpAASRQTAYMATfVQibHLTT30zG732+pNe9za1JNt8sNJYn
|
||||
RjWuJ6jL5ILV0rcd9vT7X9fObvcXitpvJ2XBJE+PhX2HaTkyWeF9pwnlQNrTe9hV
|
||||
tzhA+ihZrDrHNmLcQjZbnv/IMubqq8egxY80t5n6vZ6U5TR6U9uZJvai1xtqAyCR
|
||||
NWkW52m00rDTEuO6BA4q2RHDWwbETF55rRsWLIgNW9qJCyMHPbTM/dMBmWMQSMxz
|
||||
4M2pRzt47SICxA327UqSCEERqMFybmYi3nUxePtLgHYplqRiw4ynMbXd/kiQ0LE0
|
||||
PKJSSCXA42ymziCpAxNWflzpzQdJZusahRFr6t6m+4p273/Taj7k+hZyNgBAgXAY
|
||||
8F7pTts6orLb8IA6o4TOwkwQYmKvKu9VwMrE7+GUhVIAgY9a8DyQMiDBkEAwh7S1
|
||||
KgCBfao8DK1CwSS8Z3WjL5MEgt93z2koUQCD/YxMBppiCMp7SDVSmkkIHptfGpeh
|
||||
t+M13Ccv1tavIASFiaQl6rBz3K4N3DSGwNkCibrvEAC0fQirOWnc4NVbcLKpFG1l
|
||||
NQXF/eqdT79wq1Mvlap3QSCLhcD2D3fCkKVWid4aSjtp9FOX1Uaf7P9eT93zd9Sv
|
||||
mj2yNLRUGzyI/0oONNSzmmkvJ5Cq2X2CdldIWMGZO57RJ8oyATAWTQmRmNkfh0Sx
|
||||
uuR/J9oUsomVy1AEntc0dlPivkqBkBqrxU3j5PnWkaI3ZRGc0gg9spCQEISh4xEU
|
||||
pMhVrnmDQLfLP8Ouqpx917MAw7hkjQk6BJFTAbXDsz3LSHIxo/gB8qrA1vbvdZZh
|
||||
LtR0frJdfdppX8nAQX/TAxOQ8+H6yw8a9i7/zJEfSYIhop59N/fhcWW2F14cj2Xc
|
||||
fyHaZ04lTO4uPnly91jwuFPaREuZVp8AxImIhlkxkAN61tWdWG7tEbaCgszh6VIz
|
||||
ThFnHo2Vi8SQXPrXCN7J9Tc9ZYiAYqoThV/u6SYsea5aZL8deOvKBQCgZZuIxX1z
|
||||
4EnfcqG176vY4VqMBIC4pMJz0WcHJYqN+j7BiwGoMBwExrIdTB7q4XIFLotcIpS0
|
||||
1MqyVsesvoQq7WObmGQXdMliMirSLcDuSx8Qy+4pIBgGDIyMp1qbonnGdcHYvU8S
|
||||
O0A8s/iua5oFdNZTWvbVI4FUH9sKcLiB3/fIAF+sB4n8q6L+UCfmbPcAo/crQ6b3
|
||||
HqhDBMY9J0q/jdz9GNYZ/1fbXdkUqAQKFePhtzJDRBZba27+LPQNMCcrHMq06F1T
|
||||
4QmLmkHt7LxB2pAczUO+T2O9bHEw/HWw+dYf2MoRDUw=
|
||||
`
|
||||
|
|
|
@ -33,14 +33,17 @@ const invalidNodeValue = 0xffff
|
|||
|
||||
// Decode reads bits from the given bitReader and navigates the tree until a
|
||||
// symbol is found.
|
||||
func (t huffmanTree) Decode(br *bitReader) (v uint16) {
|
||||
func (t *huffmanTree) Decode(br *bitReader) (v uint16) {
|
||||
nodeIndex := uint16(0) // node 0 is the root of the tree.
|
||||
|
||||
for {
|
||||
node := &t.nodes[nodeIndex]
|
||||
bit := br.ReadBit()
|
||||
bit, ok := br.TryReadBit()
|
||||
if !ok && br.ReadBit() {
|
||||
bit = 1
|
||||
}
|
||||
// bzip2 encodes left as a true bit.
|
||||
if bit {
|
||||
if bit != 0 {
|
||||
// left
|
||||
if node.left == invalidNodeValue {
|
||||
return node.leftValue
|
||||
|
|
|
@ -15,10 +15,11 @@ type moveToFrontDecoder struct {
|
|||
// Rather than actually keep the list in memory, the symbols are stored
|
||||
// as a circular, double linked list with the symbol indexed by head
|
||||
// at the front of the list.
|
||||
symbols []byte
|
||||
next []uint8
|
||||
prev []uint8
|
||||
symbols [256]byte
|
||||
next [256]uint8
|
||||
prev [256]uint8
|
||||
head uint8
|
||||
len int
|
||||
}
|
||||
|
||||
// newMTFDecoder creates a move-to-front decoder with an explicit initial list
|
||||
|
@ -28,12 +29,9 @@ func newMTFDecoder(symbols []byte) *moveToFrontDecoder {
|
|||
panic("too many symbols")
|
||||
}
|
||||
|
||||
m := &moveToFrontDecoder{
|
||||
symbols: symbols,
|
||||
next: make([]uint8, len(symbols)),
|
||||
prev: make([]uint8, len(symbols)),
|
||||
}
|
||||
|
||||
m := new(moveToFrontDecoder)
|
||||
copy(m.symbols[:], symbols)
|
||||
m.len = len(symbols)
|
||||
m.threadLinkedList()
|
||||
return m
|
||||
}
|
||||
|
@ -45,34 +43,29 @@ func newMTFDecoderWithRange(n int) *moveToFrontDecoder {
|
|||
panic("newMTFDecoderWithRange: cannot have > 256 symbols")
|
||||
}
|
||||
|
||||
m := &moveToFrontDecoder{
|
||||
symbols: make([]uint8, n),
|
||||
next: make([]uint8, n),
|
||||
prev: make([]uint8, n),
|
||||
}
|
||||
|
||||
m := new(moveToFrontDecoder)
|
||||
for i := 0; i < n; i++ {
|
||||
m.symbols[i] = byte(i)
|
||||
m.symbols[byte(i)] = byte(i)
|
||||
}
|
||||
|
||||
m.len = n
|
||||
m.threadLinkedList()
|
||||
return m
|
||||
}
|
||||
|
||||
// threadLinkedList creates the initial linked-list pointers.
|
||||
func (m *moveToFrontDecoder) threadLinkedList() {
|
||||
if len(m.symbols) == 0 {
|
||||
if m.len == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
m.prev[0] = uint8(len(m.symbols) - 1)
|
||||
m.prev[0] = uint8(m.len - 1)
|
||||
|
||||
for i := 0; i < len(m.symbols)-1; i++ {
|
||||
for i := byte(0); int(i) < m.len-1; i++ {
|
||||
m.next[i] = uint8(i + 1)
|
||||
m.prev[i+1] = uint8(i)
|
||||
}
|
||||
|
||||
m.next[len(m.symbols)-1] = 0
|
||||
m.next[m.len-1] = 0
|
||||
}
|
||||
|
||||
func (m *moveToFrontDecoder) Decode(n int) (b byte) {
|
||||
|
|
|
@ -6,12 +6,27 @@ package flate
|
|||
|
||||
// forwardCopy is like the built-in copy function except that it always goes
|
||||
// forward from the start, even if the dst and src overlap.
|
||||
func forwardCopy(dst, src []byte) int {
|
||||
if len(src) > len(dst) {
|
||||
src = src[:len(dst)]
|
||||
// It is equivalent to:
|
||||
// for i := 0; i < n; i++ {
|
||||
// mem[dst+i] = mem[src+i]
|
||||
// }
|
||||
func forwardCopy(mem []byte, dst, src, n int) {
|
||||
if dst <= src {
|
||||
copy(mem[dst:dst+n], mem[src:src+n])
|
||||
return
|
||||
}
|
||||
for i, x := range src {
|
||||
dst[i] = x
|
||||
for {
|
||||
if dst >= src+n {
|
||||
copy(mem[dst:dst+n], mem[src:src+n])
|
||||
return
|
||||
}
|
||||
// There is some forward overlap. The destination
|
||||
// will be filled with a repeated pattern of mem[src:src+k].
|
||||
// We copy one instance of the pattern here, then repeat.
|
||||
// Each time around this loop k will double.
|
||||
k := dst - src
|
||||
copy(mem[dst:dst+k], mem[src:src+k])
|
||||
n -= k
|
||||
dst += k
|
||||
}
|
||||
return len(src)
|
||||
}
|
||||
|
|
|
@ -30,10 +30,12 @@ func TestForwardCopy(t *testing.T) {
|
|||
}
|
||||
for _, tc := range testCases {
|
||||
b := []byte("0123456789")
|
||||
dst := b[tc.dst0:tc.dst1]
|
||||
src := b[tc.src0:tc.src1]
|
||||
n := forwardCopy(dst, src)
|
||||
got := string(dst[:n])
|
||||
n := tc.dst1 - tc.dst0
|
||||
if tc.src1-tc.src0 < n {
|
||||
n = tc.src1 - tc.src0
|
||||
}
|
||||
forwardCopy(b, tc.dst0, tc.src0, n)
|
||||
got := string(b[tc.dst0 : tc.dst0+n])
|
||||
if got != tc.want {
|
||||
t.Errorf("dst=b[%d:%d], src=b[%d:%d]: got %q, want %q",
|
||||
tc.dst0, tc.dst1, tc.src0, tc.src1, got, tc.want)
|
||||
|
|
|
@ -416,6 +416,50 @@ func (d *compressor) init(w io.Writer, level int) (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
var zeroes [32]int
|
||||
var bzeroes [256]byte
|
||||
|
||||
func (d *compressor) reset(w io.Writer) {
|
||||
d.w.reset(w)
|
||||
d.sync = false
|
||||
d.err = nil
|
||||
switch d.compressionLevel.chain {
|
||||
case 0:
|
||||
// level was NoCompression.
|
||||
for i := range d.window {
|
||||
d.window[i] = 0
|
||||
}
|
||||
d.windowEnd = 0
|
||||
default:
|
||||
d.chainHead = -1
|
||||
for s := d.hashHead; len(s) > 0; {
|
||||
n := copy(s, zeroes[:])
|
||||
s = s[n:]
|
||||
}
|
||||
for s := d.hashPrev; len(s) > 0; s = s[len(zeroes):] {
|
||||
copy(s, zeroes[:])
|
||||
}
|
||||
d.hashOffset = 1
|
||||
|
||||
d.index, d.windowEnd = 0, 0
|
||||
for s := d.window; len(s) > 0; {
|
||||
n := copy(s, bzeroes[:])
|
||||
s = s[n:]
|
||||
}
|
||||
d.blockStart, d.byteAvailable = 0, false
|
||||
|
||||
d.tokens = d.tokens[:maxFlateBlockTokens+1]
|
||||
for i := 0; i <= maxFlateBlockTokens; i++ {
|
||||
d.tokens[i] = 0
|
||||
}
|
||||
d.tokens = d.tokens[:0]
|
||||
d.length = minMatchLength - 1
|
||||
d.offset = 0
|
||||
d.hash = 0
|
||||
d.maxInsertIndex = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (d *compressor) close() error {
|
||||
d.sync = true
|
||||
d.step(d)
|
||||
|
@ -439,7 +483,6 @@ func (d *compressor) close() error {
|
|||
// If level is in the range [-1, 9] then the error returned will be nil.
|
||||
// Otherwise the error returned will be non-nil.
|
||||
func NewWriter(w io.Writer, level int) (*Writer, error) {
|
||||
const logWindowSize = logMaxOffsetSize
|
||||
var dw Writer
|
||||
if err := dw.d.init(w, level); err != nil {
|
||||
return nil, err
|
||||
|
@ -462,6 +505,7 @@ func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
|
|||
zw.Write(dict)
|
||||
zw.Flush()
|
||||
dw.enabled = true
|
||||
zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method.
|
||||
return zw, err
|
||||
}
|
||||
|
||||
|
@ -480,7 +524,8 @@ func (w *dictWriter) Write(b []byte) (n int, err error) {
|
|||
// A Writer takes data written to it and writes the compressed
|
||||
// form of that data to an underlying writer (see NewWriter).
|
||||
type Writer struct {
|
||||
d compressor
|
||||
d compressor
|
||||
dict []byte
|
||||
}
|
||||
|
||||
// Write writes data to w, which will eventually write the
|
||||
|
@ -506,3 +551,21 @@ func (w *Writer) Flush() error {
|
|||
func (w *Writer) Close() error {
|
||||
return w.d.close()
|
||||
}
|
||||
|
||||
// Reset discards the writer's state and makes it equivalent to
|
||||
// the result of NewWriter or NewWriterDict called with dst
|
||||
// and w's level and dictionary.
|
||||
func (w *Writer) Reset(dst io.Writer) {
|
||||
if dw, ok := w.d.w.w.(*dictWriter); ok {
|
||||
// w was created with NewWriterDict
|
||||
dw.w = dst
|
||||
w.d.reset(dw)
|
||||
dw.enabled = false
|
||||
w.Write(w.dict)
|
||||
w.Flush()
|
||||
dw.enabled = true
|
||||
} else {
|
||||
// w was created with NewWriter
|
||||
w.d.reset(dst)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
@ -424,3 +425,66 @@ func TestRegression2508(t *testing.T) {
|
|||
}
|
||||
w.Close()
|
||||
}
|
||||
|
||||
func TestWriterReset(t *testing.T) {
|
||||
for level := 0; level <= 9; level++ {
|
||||
if testing.Short() && level > 1 {
|
||||
break
|
||||
}
|
||||
w, err := NewWriter(ioutil.Discard, level)
|
||||
if err != nil {
|
||||
t.Fatalf("NewWriter: %v", err)
|
||||
}
|
||||
buf := []byte("hello world")
|
||||
for i := 0; i < 1024; i++ {
|
||||
w.Write(buf)
|
||||
}
|
||||
w.Reset(ioutil.Discard)
|
||||
|
||||
wref, err := NewWriter(ioutil.Discard, level)
|
||||
if err != nil {
|
||||
t.Fatalf("NewWriter: %v", err)
|
||||
}
|
||||
|
||||
// DeepEqual doesn't compare functions.
|
||||
w.d.fill, wref.d.fill = nil, nil
|
||||
w.d.step, wref.d.step = nil, nil
|
||||
if !reflect.DeepEqual(w, wref) {
|
||||
t.Errorf("level %d Writer not reset after Reset", level)
|
||||
}
|
||||
}
|
||||
testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriter(w, NoCompression) })
|
||||
testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriter(w, DefaultCompression) })
|
||||
testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriter(w, BestCompression) })
|
||||
dict := []byte("we are the world")
|
||||
testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriterDict(w, NoCompression, dict) })
|
||||
testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriterDict(w, DefaultCompression, dict) })
|
||||
testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriterDict(w, BestCompression, dict) })
|
||||
}
|
||||
|
||||
func testResetOutput(t *testing.T, newWriter func(w io.Writer) (*Writer, error)) {
|
||||
buf := new(bytes.Buffer)
|
||||
w, err := newWriter(buf)
|
||||
if err != nil {
|
||||
t.Fatalf("NewWriter: %v", err)
|
||||
}
|
||||
b := []byte("hello world")
|
||||
for i := 0; i < 1024; i++ {
|
||||
w.Write(b)
|
||||
}
|
||||
w.Close()
|
||||
out1 := buf.String()
|
||||
|
||||
buf2 := new(bytes.Buffer)
|
||||
w.Reset(buf2)
|
||||
for i := 0; i < 1024; i++ {
|
||||
w.Write(b)
|
||||
}
|
||||
w.Close()
|
||||
out2 := buf2.String()
|
||||
|
||||
if out1 != out2 {
|
||||
t.Errorf("got %q, expected %q", out2, out1)
|
||||
}
|
||||
t.Logf("got %d bytes", len(out1))
|
||||
}
|
||||
|
|
|
@ -24,3 +24,39 @@ func TestUncompressedSource(t *testing.T) {
|
|||
t.Errorf("output[0] = %x, want 0x11", output[0])
|
||||
}
|
||||
}
|
||||
|
||||
// The following test should not panic.
|
||||
func TestIssue5915(t *testing.T) {
|
||||
bits := []int{4, 0, 0, 6, 4, 3, 2, 3, 3, 4, 4, 5, 0, 0, 0, 0, 5, 5, 6,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 8, 6, 0, 11, 0, 8, 0, 6, 6, 10, 8}
|
||||
h := new(huffmanDecoder)
|
||||
ok := h.init(bits)
|
||||
if ok == true {
|
||||
t.Fatalf("Given sequence of bits is bad, and should not succeed.")
|
||||
}
|
||||
}
|
||||
|
||||
// The following test should not panic.
|
||||
func TestIssue5962(t *testing.T) {
|
||||
bits := []int{4, 0, 0, 6, 4, 3, 2, 3, 3, 4, 4, 5, 0, 0, 0, 0,
|
||||
5, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11}
|
||||
h := new(huffmanDecoder)
|
||||
ok := h.init(bits)
|
||||
if ok == true {
|
||||
t.Fatalf("Given sequence of bits is bad, and should not succeed.")
|
||||
}
|
||||
}
|
||||
|
||||
// The following test should not panic.
|
||||
func TestIssue6255(t *testing.T) {
|
||||
bits1 := []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 11}
|
||||
bits2 := []int{11, 13}
|
||||
h := new(huffmanDecoder)
|
||||
if !h.init(bits1) {
|
||||
t.Fatalf("Given sequence of bits is good and should succeed.")
|
||||
}
|
||||
if h.init(bits2) {
|
||||
t.Fatalf("Given sequence of bits is bad and should not succeed.")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -97,6 +97,31 @@ func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
|
|||
}
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) reset(writer io.Writer) {
|
||||
w.w = writer
|
||||
w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil
|
||||
w.bytes = [64]byte{}
|
||||
for i := range w.codegen {
|
||||
w.codegen[i] = 0
|
||||
}
|
||||
for _, s := range [...][]int32{w.literalFreq, w.offsetFreq, w.codegenFreq} {
|
||||
for i := range s {
|
||||
s[i] = 0
|
||||
}
|
||||
}
|
||||
for _, enc := range [...]*huffmanEncoder{
|
||||
w.literalEncoding,
|
||||
w.offsetEncoding,
|
||||
w.codegenEncoding} {
|
||||
for i := range enc.code {
|
||||
enc.code[i] = 0
|
||||
}
|
||||
for i := range enc.codeBits {
|
||||
enc.codeBits[i] = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) flushBits() {
|
||||
if w.err != nil {
|
||||
w.nbits = 0
|
||||
|
|
|
@ -19,23 +19,13 @@ type literalNode struct {
|
|||
freq int32
|
||||
}
|
||||
|
||||
type chain struct {
|
||||
// The sum of the leaves in this tree
|
||||
freq int32
|
||||
|
||||
// The number of literals to the left of this item at this level
|
||||
leafCount int32
|
||||
|
||||
// The right child of this chain in the previous level.
|
||||
up *chain
|
||||
}
|
||||
|
||||
// A levelInfo describes the state of the constructed tree for a given depth.
|
||||
type levelInfo struct {
|
||||
// Our level. for better printing
|
||||
level int32
|
||||
|
||||
// The most recent chain generated for this level
|
||||
lastChain *chain
|
||||
// The frequency of the last node at this level
|
||||
lastFreq int32
|
||||
|
||||
// The frequency of the next character to add to this level
|
||||
nextCharFreq int32
|
||||
|
@ -47,12 +37,6 @@ type levelInfo struct {
|
|||
// The number of chains remaining to generate for this level before moving
|
||||
// up to the next level
|
||||
needed int32
|
||||
|
||||
// The levelInfo for level+1
|
||||
up *levelInfo
|
||||
|
||||
// The levelInfo for level-1
|
||||
down *levelInfo
|
||||
}
|
||||
|
||||
func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxInt32} }
|
||||
|
@ -121,6 +105,8 @@ func (h *huffmanEncoder) bitLength(freq []int32) int64 {
|
|||
return total
|
||||
}
|
||||
|
||||
const maxBitsLimit = 16
|
||||
|
||||
// Return the number of literals assigned to each bit size in the Huffman encoding
|
||||
//
|
||||
// This method is only called when list.length >= 3
|
||||
|
@ -131,9 +117,13 @@ func (h *huffmanEncoder) bitLength(freq []int32) int64 {
|
|||
// frequency, and has as its last element a special element with frequency
|
||||
// MaxInt32
|
||||
// maxBits The maximum number of bits that should be used to encode any literal.
|
||||
// Must be less than 16.
|
||||
// return An integer array in which array[i] indicates the number of literals
|
||||
// that should be encoded in i bits.
|
||||
func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
|
||||
if maxBits >= maxBitsLimit {
|
||||
panic("flate: maxBits too large")
|
||||
}
|
||||
n := int32(len(list))
|
||||
list = list[0 : n+1]
|
||||
list[n] = maxNode()
|
||||
|
@ -148,53 +138,61 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
|
|||
// A bogus "Level 0" whose sole purpose is so that
|
||||
// level1.prev.needed==0. This makes level1.nextPairFreq
|
||||
// be a legitimate value that never gets chosen.
|
||||
top := &levelInfo{needed: 0}
|
||||
chain2 := &chain{list[1].freq, 2, new(chain)}
|
||||
var levels [maxBitsLimit]levelInfo
|
||||
// leafCounts[i] counts the number of literals at the left
|
||||
// of ancestors of the rightmost node at level i.
|
||||
// leafCounts[i][j] is the number of literals at the left
|
||||
// of the level j ancestor.
|
||||
var leafCounts [maxBitsLimit][maxBitsLimit]int32
|
||||
|
||||
for level := int32(1); level <= maxBits; level++ {
|
||||
// For every level, the first two items are the first two characters.
|
||||
// We initialize the levels as if we had already figured this out.
|
||||
top = &levelInfo{
|
||||
levels[level] = levelInfo{
|
||||
level: level,
|
||||
lastChain: chain2,
|
||||
lastFreq: list[1].freq,
|
||||
nextCharFreq: list[2].freq,
|
||||
nextPairFreq: list[0].freq + list[1].freq,
|
||||
down: top,
|
||||
}
|
||||
top.down.up = top
|
||||
leafCounts[level][level] = 2
|
||||
if level == 1 {
|
||||
top.nextPairFreq = math.MaxInt32
|
||||
levels[level].nextPairFreq = math.MaxInt32
|
||||
}
|
||||
}
|
||||
|
||||
// We need a total of 2*n - 2 items at top level and have already generated 2.
|
||||
top.needed = 2*n - 4
|
||||
levels[maxBits].needed = 2*n - 4
|
||||
|
||||
l := top
|
||||
level := maxBits
|
||||
for {
|
||||
l := &levels[level]
|
||||
if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
|
||||
// We've run out of both leafs and pairs.
|
||||
// End all calculations for this level.
|
||||
// To m sure we never come back to this level or any lower level,
|
||||
// To make sure we never come back to this level or any lower level,
|
||||
// set nextPairFreq impossibly large.
|
||||
l.lastChain = nil
|
||||
l.needed = 0
|
||||
l = l.up
|
||||
l.nextPairFreq = math.MaxInt32
|
||||
levels[level+1].nextPairFreq = math.MaxInt32
|
||||
level++
|
||||
continue
|
||||
}
|
||||
|
||||
prevFreq := l.lastChain.freq
|
||||
prevFreq := l.lastFreq
|
||||
if l.nextCharFreq < l.nextPairFreq {
|
||||
// The next item on this row is a leaf node.
|
||||
n := l.lastChain.leafCount + 1
|
||||
l.lastChain = &chain{l.nextCharFreq, n, l.lastChain.up}
|
||||
n := leafCounts[level][level] + 1
|
||||
l.lastFreq = l.nextCharFreq
|
||||
// Lower leafCounts are the same of the previous node.
|
||||
leafCounts[level][level] = n
|
||||
l.nextCharFreq = list[n].freq
|
||||
} else {
|
||||
// The next item on this row is a pair from the previous row.
|
||||
// nextPairFreq isn't valid until we generate two
|
||||
// more values in the level below
|
||||
l.lastChain = &chain{l.nextPairFreq, l.lastChain.leafCount, l.down.lastChain}
|
||||
l.down.needed = 2
|
||||
l.lastFreq = l.nextPairFreq
|
||||
// Take leaf counts from the lower level, except counts[level] remains the same.
|
||||
copy(leafCounts[level][:level], leafCounts[level-1][:level])
|
||||
levels[l.level-1].needed = 2
|
||||
}
|
||||
|
||||
if l.needed--; l.needed == 0 {
|
||||
|
@ -202,33 +200,33 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
|
|||
// Continue calculating one level up. Fill in nextPairFreq
|
||||
// of that level with the sum of the two nodes we've just calculated on
|
||||
// this level.
|
||||
up := l.up
|
||||
if up == nil {
|
||||
if l.level == maxBits {
|
||||
// All done!
|
||||
break
|
||||
}
|
||||
up.nextPairFreq = prevFreq + l.lastChain.freq
|
||||
l = up
|
||||
levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq
|
||||
level++
|
||||
} else {
|
||||
// If we stole from below, move down temporarily to replenish it.
|
||||
for l.down.needed > 0 {
|
||||
l = l.down
|
||||
for levels[level-1].needed > 0 {
|
||||
level--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Somethings is wrong if at the end, the top level is null or hasn't used
|
||||
// all of the leaves.
|
||||
if top.lastChain.leafCount != n {
|
||||
panic("top.lastChain.leafCount != n")
|
||||
if leafCounts[maxBits][maxBits] != n {
|
||||
panic("leafCounts[maxBits][maxBits] != n")
|
||||
}
|
||||
|
||||
bitCount := make([]int32, maxBits+1)
|
||||
bits := 1
|
||||
for chain := top.lastChain; chain.up != nil; chain = chain.up {
|
||||
counts := &leafCounts[maxBits]
|
||||
for level := maxBits; level > 0; level-- {
|
||||
// chain.leafCount gives the number of literals requiring at least "bits"
|
||||
// bits to encode.
|
||||
bitCount[bits] = chain.leafCount - chain.up.leafCount
|
||||
bitCount[bits] = counts[level] - counts[level-1]
|
||||
bits++
|
||||
}
|
||||
return bitCount
|
||||
|
|
|
@ -91,6 +91,10 @@ type huffmanDecoder struct {
|
|||
|
||||
// Initialize Huffman decoding tables from array of code lengths.
|
||||
func (h *huffmanDecoder) init(bits []int) bool {
|
||||
if h.min != 0 {
|
||||
*h = huffmanDecoder{}
|
||||
}
|
||||
|
||||
// Count number of codes of each length,
|
||||
// compute min and max length.
|
||||
var count [maxCodeLen]int
|
||||
|
@ -125,6 +129,9 @@ func (h *huffmanDecoder) init(bits []int) bool {
|
|||
if i == huffmanChunkBits+1 {
|
||||
// create link tables
|
||||
link := code >> 1
|
||||
if huffmanNumChunks < link {
|
||||
return false
|
||||
}
|
||||
h.links = make([][]uint32, huffmanNumChunks-link)
|
||||
for j := uint(link); j < huffmanNumChunks; j++ {
|
||||
reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8
|
||||
|
@ -154,7 +161,11 @@ func (h *huffmanDecoder) init(bits []int) bool {
|
|||
h.chunks[off] = chunk
|
||||
}
|
||||
} else {
|
||||
linktab := h.links[h.chunks[reverse&(huffmanNumChunks-1)]>>huffmanValueShift]
|
||||
value := h.chunks[reverse&(huffmanNumChunks-1)] >> huffmanValueShift
|
||||
if value >= uint32(len(h.links)) {
|
||||
return false
|
||||
}
|
||||
linktab := h.links[value]
|
||||
reverse >>= huffmanChunkBits
|
||||
for off := reverse; off < numLinks; off += 1 << uint(n-huffmanChunkBits) {
|
||||
linktab[off] = chunk
|
||||
|
@ -511,7 +522,7 @@ func (f *decompressor) copyHist() bool {
|
|||
if x := len(f.hist) - p; n > x {
|
||||
n = x
|
||||
}
|
||||
forwardCopy(f.hist[f.hp:f.hp+n], f.hist[p:p+n])
|
||||
forwardCopy(f.hist[:], f.hp, p, n)
|
||||
p += n
|
||||
f.hp += n
|
||||
f.copyLen -= n
|
||||
|
@ -633,6 +644,10 @@ func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
|
|||
if n > huffmanChunkBits {
|
||||
chunk = h.links[chunk>>huffmanValueShift][(f.b>>huffmanChunkBits)&h.linkMask]
|
||||
n = uint(chunk & huffmanCountMask)
|
||||
if n == 0 {
|
||||
f.err = CorruptInputError(f.roffset)
|
||||
return 0, f.err
|
||||
}
|
||||
}
|
||||
if n <= f.nb {
|
||||
f.b >>= n
|
||||
|
|
|
@ -37,6 +37,7 @@ var testfiles = []string{
|
|||
}
|
||||
|
||||
func benchmarkDecode(b *testing.B, testfile, level, n int) {
|
||||
b.ReportAllocs()
|
||||
b.StopTimer()
|
||||
b.SetBytes(int64(n))
|
||||
buf0, err := ioutil.ReadFile(testfiles[testfile])
|
||||
|
@ -55,7 +56,7 @@ func benchmarkDecode(b *testing.B, testfile, level, n int) {
|
|||
if len(buf0) > n-i {
|
||||
buf0 = buf0[:n-i]
|
||||
}
|
||||
io.Copy(w, bytes.NewBuffer(buf0))
|
||||
io.Copy(w, bytes.NewReader(buf0))
|
||||
}
|
||||
w.Close()
|
||||
buf1 := compressed.Bytes()
|
||||
|
@ -63,7 +64,7 @@ func benchmarkDecode(b *testing.B, testfile, level, n int) {
|
|||
runtime.GC()
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
io.Copy(ioutil.Discard, NewReader(bytes.NewBuffer(buf1)))
|
||||
io.Copy(ioutil.Discard, NewReader(bytes.NewReader(buf1)))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -7,7 +7,10 @@ package gzip
|
|||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type gunzipTest struct {
|
||||
|
@ -302,3 +305,31 @@ func TestDecompressor(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssue6550(t *testing.T) {
|
||||
f, err := os.Open("testdata/issue6550.gz")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
gzip, err := NewReader(f)
|
||||
if err != nil {
|
||||
t.Fatalf("NewReader(testdata/issue6550.gz): %v", err)
|
||||
}
|
||||
defer gzip.Close()
|
||||
done := make(chan bool, 1)
|
||||
go func() {
|
||||
_, err := io.Copy(ioutil.Discard, gzip)
|
||||
if err == nil {
|
||||
t.Errorf("Copy succeeded")
|
||||
} else {
|
||||
t.Logf("Copy failed (correctly): %v", err)
|
||||
}
|
||||
done <- true
|
||||
}()
|
||||
select {
|
||||
case <-time.After(1 * time.Second):
|
||||
t.Errorf("Copy hung")
|
||||
case <-done:
|
||||
// ok
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,14 +26,15 @@ const (
|
|||
// to its wrapped io.Writer.
|
||||
type Writer struct {
|
||||
Header
|
||||
w io.Writer
|
||||
level int
|
||||
compressor *flate.Writer
|
||||
digest hash.Hash32
|
||||
size uint32
|
||||
closed bool
|
||||
buf [10]byte
|
||||
err error
|
||||
w io.Writer
|
||||
level int
|
||||
wroteHeader bool
|
||||
compressor *flate.Writer
|
||||
digest hash.Hash32
|
||||
size uint32
|
||||
closed bool
|
||||
buf [10]byte
|
||||
err error
|
||||
}
|
||||
|
||||
// NewWriter creates a new Writer that satisfies writes by compressing data
|
||||
|
@ -62,14 +63,39 @@ func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
|
|||
if level < DefaultCompression || level > BestCompression {
|
||||
return nil, fmt.Errorf("gzip: invalid compression level: %d", level)
|
||||
}
|
||||
return &Writer{
|
||||
z := new(Writer)
|
||||
z.init(w, level)
|
||||
return z, nil
|
||||
}
|
||||
|
||||
func (z *Writer) init(w io.Writer, level int) {
|
||||
digest := z.digest
|
||||
if digest != nil {
|
||||
digest.Reset()
|
||||
} else {
|
||||
digest = crc32.NewIEEE()
|
||||
}
|
||||
compressor := z.compressor
|
||||
if compressor != nil {
|
||||
compressor.Reset(w)
|
||||
}
|
||||
*z = Writer{
|
||||
Header: Header{
|
||||
OS: 255, // unknown
|
||||
},
|
||||
w: w,
|
||||
level: level,
|
||||
digest: crc32.NewIEEE(),
|
||||
}, nil
|
||||
w: w,
|
||||
level: level,
|
||||
digest: digest,
|
||||
compressor: compressor,
|
||||
}
|
||||
}
|
||||
|
||||
// Reset discards the Writer z's state and makes it equivalent to the
|
||||
// result of its original state from NewWriter or NewWriterLevel, but
|
||||
// writing to w instead. This permits reusing a Writer rather than
|
||||
// allocating a new one.
|
||||
func (z *Writer) Reset(w io.Writer) {
|
||||
z.init(w, z.level)
|
||||
}
|
||||
|
||||
// GZIP (RFC 1952) is little-endian, unlike ZLIB (RFC 1950).
|
||||
|
@ -138,7 +164,8 @@ func (z *Writer) Write(p []byte) (int, error) {
|
|||
}
|
||||
var n int
|
||||
// Write the GZIP header lazily.
|
||||
if z.compressor == nil {
|
||||
if !z.wroteHeader {
|
||||
z.wroteHeader = true
|
||||
z.buf[0] = gzipID1
|
||||
z.buf[1] = gzipID2
|
||||
z.buf[2] = gzipDeflate
|
||||
|
@ -183,7 +210,9 @@ func (z *Writer) Write(p []byte) (int, error) {
|
|||
return n, z.err
|
||||
}
|
||||
}
|
||||
z.compressor, _ = flate.NewWriter(z.w, z.level)
|
||||
if z.compressor == nil {
|
||||
z.compressor, _ = flate.NewWriter(z.w, z.level)
|
||||
}
|
||||
}
|
||||
z.size += uint32(len(p))
|
||||
z.digest.Write(p)
|
||||
|
@ -206,8 +235,11 @@ func (z *Writer) Flush() error {
|
|||
if z.closed {
|
||||
return nil
|
||||
}
|
||||
if z.compressor == nil {
|
||||
if !z.wroteHeader {
|
||||
z.Write(nil)
|
||||
if z.err != nil {
|
||||
return z.err
|
||||
}
|
||||
}
|
||||
z.err = z.compressor.Flush()
|
||||
return z.err
|
||||
|
@ -222,7 +254,7 @@ func (z *Writer) Close() error {
|
|||
return nil
|
||||
}
|
||||
z.closed = true
|
||||
if z.compressor == nil {
|
||||
if !z.wroteHeader {
|
||||
z.Write(nil)
|
||||
if z.err != nil {
|
||||
return z.err
|
||||
|
|
|
@ -197,3 +197,35 @@ func TestWriterFlush(t *testing.T) {
|
|||
t.Fatal("Flush didn't flush any data")
|
||||
}
|
||||
}
|
||||
|
||||
// Multiple gzip files concatenated form a valid gzip file.
|
||||
func TestConcat(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
w := NewWriter(&buf)
|
||||
w.Write([]byte("hello "))
|
||||
w.Close()
|
||||
w = NewWriter(&buf)
|
||||
w.Write([]byte("world\n"))
|
||||
w.Close()
|
||||
|
||||
r, err := NewReader(&buf)
|
||||
data, err := ioutil.ReadAll(r)
|
||||
if string(data) != "hello world\n" || err != nil {
|
||||
t.Fatalf("ReadAll = %q, %v, want %q, nil", data, err, "hello world")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriterReset(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
buf2 := new(bytes.Buffer)
|
||||
z := NewWriter(buf)
|
||||
msg := []byte("hello world")
|
||||
z.Write(msg)
|
||||
z.Close()
|
||||
z.Reset(buf2)
|
||||
z.Write(msg)
|
||||
z.Close()
|
||||
if buf.String() != buf2.String() {
|
||||
t.Errorf("buf2 %q != original buf of %q", buf2.String(), buf.String())
|
||||
}
|
||||
}
|
||||
|
|
BIN
libgo/go/compress/gzip/testdata/issue6550.gz
vendored
Normal file
BIN
libgo/go/compress/gzip/testdata/issue6550.gz
vendored
Normal file
Binary file not shown.
|
@ -70,6 +70,23 @@ func NewWriterLevelDict(w io.Writer, level int, dict []byte) (*Writer, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
// Reset clears the state of the Writer z such that it is equivalent to its
|
||||
// initial state from NewWriterLevel or NewWriterLevelDict, but instead writing
|
||||
// to w.
|
||||
func (z *Writer) Reset(w io.Writer) {
|
||||
z.w = w
|
||||
// z.level and z.dict left unchanged.
|
||||
if z.compressor != nil {
|
||||
z.compressor.Reset(w)
|
||||
}
|
||||
if z.digest != nil {
|
||||
z.digest.Reset()
|
||||
}
|
||||
z.err = nil
|
||||
z.scratch = [4]byte{}
|
||||
z.wroteHeader = false
|
||||
}
|
||||
|
||||
// writeHeader writes the ZLIB header.
|
||||
func (z *Writer) writeHeader() (err error) {
|
||||
z.wroteHeader = true
|
||||
|
@ -111,11 +128,15 @@ func (z *Writer) writeHeader() (err error) {
|
|||
return err
|
||||
}
|
||||
}
|
||||
z.compressor, err = flate.NewWriterDict(z.w, z.level, z.dict)
|
||||
if err != nil {
|
||||
return err
|
||||
if z.compressor == nil {
|
||||
// Initialize deflater unless the Writer is being reused
|
||||
// after a Reset call.
|
||||
z.compressor, err = flate.NewWriterDict(z.w, z.level, z.dict)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
z.digest = adler32.New()
|
||||
}
|
||||
z.digest = adler32.New()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -89,6 +89,56 @@ func testLevelDict(t *testing.T, fn string, b0 []byte, level int, d string) {
|
|||
}
|
||||
}
|
||||
|
||||
func testFileLevelDictReset(t *testing.T, fn string, level int, dict []byte) {
|
||||
var b0 []byte
|
||||
var err error
|
||||
if fn != "" {
|
||||
b0, err = ioutil.ReadFile(fn)
|
||||
if err != nil {
|
||||
t.Errorf("%s (level=%d): %v", fn, level, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Compress once.
|
||||
buf := new(bytes.Buffer)
|
||||
var zlibw *Writer
|
||||
if dict == nil {
|
||||
zlibw, err = NewWriterLevel(buf, level)
|
||||
} else {
|
||||
zlibw, err = NewWriterLevelDict(buf, level, dict)
|
||||
}
|
||||
if err == nil {
|
||||
_, err = zlibw.Write(b0)
|
||||
}
|
||||
if err == nil {
|
||||
err = zlibw.Close()
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("%s (level=%d): %v", fn, level, err)
|
||||
return
|
||||
}
|
||||
out := buf.String()
|
||||
|
||||
// Reset and comprses again.
|
||||
buf2 := new(bytes.Buffer)
|
||||
zlibw.Reset(buf2)
|
||||
_, err = zlibw.Write(b0)
|
||||
if err == nil {
|
||||
err = zlibw.Close()
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("%s (level=%d): %v", fn, level, err)
|
||||
return
|
||||
}
|
||||
out2 := buf2.String()
|
||||
|
||||
if out2 != out {
|
||||
t.Errorf("%s (level=%d): different output after reset (got %d bytes, expected %d",
|
||||
fn, level, len(out2), len(out))
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriter(t *testing.T) {
|
||||
for i, s := range data {
|
||||
b := []byte(s)
|
||||
|
@ -122,6 +172,21 @@ func TestWriterDict(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestWriterReset(t *testing.T) {
|
||||
const dictionary = "0123456789."
|
||||
for _, fn := range filenames {
|
||||
testFileLevelDictReset(t, fn, NoCompression, nil)
|
||||
testFileLevelDictReset(t, fn, DefaultCompression, nil)
|
||||
testFileLevelDictReset(t, fn, NoCompression, []byte(dictionary))
|
||||
testFileLevelDictReset(t, fn, DefaultCompression, []byte(dictionary))
|
||||
if !testing.Short() {
|
||||
for level := BestSpeed; level <= BestCompression; level++ {
|
||||
testFileLevelDictReset(t, fn, level, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriterDictIsUsed(t *testing.T) {
|
||||
var input = []byte("Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.")
|
||||
var buf bytes.Buffer
|
||||
|
|
|
@ -6,6 +6,8 @@
|
|||
// heap.Interface. A heap is a tree with the property that each node is the
|
||||
// minimum-valued node in its subtree.
|
||||
//
|
||||
// The minimum element in the tree is the root, at index 0.
|
||||
//
|
||||
// A heap is a common way to implement a priority queue. To build a priority
|
||||
// queue, implement the Heap interface with the (negative) priority as the
|
||||
// ordering for the Less method, so Push adds items while Pop removes the
|
||||
|
@ -54,7 +56,7 @@ func Push(h Interface, x interface{}) {
|
|||
|
||||
// Pop removes the minimum element (according to Less) from the heap
|
||||
// and returns it. The complexity is O(log(n)) where n = h.Len().
|
||||
// Same as Remove(h, 0).
|
||||
// It is equivalent to Remove(h, 0).
|
||||
//
|
||||
func Pop(h Interface) interface{} {
|
||||
n := h.Len() - 1
|
||||
|
@ -76,6 +78,15 @@ func Remove(h Interface, i int) interface{} {
|
|||
return h.Pop()
|
||||
}
|
||||
|
||||
// Fix reestablishes the heap ordering after the element at index i has changed its value.
|
||||
// Changing the value of the element at index i and then calling Fix is equivalent to,
|
||||
// but less expensive than, calling Remove(h, i) followed by a Push of the new value.
|
||||
// The complexity is O(log(n)) where n = h.Len().
|
||||
func Fix(h Interface, i int) {
|
||||
down(h, i, h.Len())
|
||||
up(h, i)
|
||||
}
|
||||
|
||||
func up(h Interface, j int) {
|
||||
for {
|
||||
i := (j - 1) / 2 // parent
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
package heap
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
|
@ -182,3 +183,31 @@ func BenchmarkDup(b *testing.B) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFix(t *testing.T) {
|
||||
h := new(myHeap)
|
||||
h.verify(t, 0)
|
||||
|
||||
for i := 200; i > 0; i -= 10 {
|
||||
Push(h, i)
|
||||
}
|
||||
h.verify(t, 0)
|
||||
|
||||
if (*h)[0] != 10 {
|
||||
t.Fatalf("Expected head to be 10, was %d", (*h)[0])
|
||||
}
|
||||
(*h)[0] = 210
|
||||
Fix(h, 0)
|
||||
h.verify(t, 0)
|
||||
|
||||
for i := 100; i > 0; i-- {
|
||||
elem := rand.Intn(h.Len())
|
||||
if i&1 == 0 {
|
||||
(*h)[elem] *= 2
|
||||
} else {
|
||||
(*h)[elem] /= 2
|
||||
}
|
||||
Fix(h, elem)
|
||||
h.verify(t, 0)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ type Element struct {
|
|||
|
||||
// Next returns the next list element or nil.
|
||||
func (e *Element) Next() *Element {
|
||||
if p := e.next; p != &e.list.root {
|
||||
if p := e.next; e.list != nil && p != &e.list.root {
|
||||
return p
|
||||
}
|
||||
return nil
|
||||
|
@ -37,7 +37,7 @@ func (e *Element) Next() *Element {
|
|||
|
||||
// Prev returns the previous list element or nil.
|
||||
func (e *Element) Prev() *Element {
|
||||
if p := e.prev; p != &e.list.root {
|
||||
if p := e.prev; e.list != nil && p != &e.list.root {
|
||||
return p
|
||||
}
|
||||
return nil
|
||||
|
@ -62,6 +62,7 @@ func (l *List) Init() *List {
|
|||
func New() *List { return new(List).Init() }
|
||||
|
||||
// Len returns the number of elements of list l.
|
||||
// The complexity is O(1).
|
||||
func (l *List) Len() int { return l.len }
|
||||
|
||||
// Front returns the first element of list l or nil
|
||||
|
@ -126,7 +127,7 @@ func (l *List) Remove(e *Element) interface{} {
|
|||
return e.Value
|
||||
}
|
||||
|
||||
// Pushfront inserts a new element e with value v at the front of list l and returns e.
|
||||
// PushFront inserts a new element e with value v at the front of list l and returns e.
|
||||
func (l *List) PushFront(v interface{}) *Element {
|
||||
l.lazyInit()
|
||||
return l.insertValue(v, &l.root)
|
||||
|
@ -178,6 +179,24 @@ func (l *List) MoveToBack(e *Element) {
|
|||
l.insert(l.remove(e), l.root.prev)
|
||||
}
|
||||
|
||||
// MoveBefore moves element e to its new position before mark.
|
||||
// If e is not an element of l, or e == mark, the list is not modified.
|
||||
func (l *List) MoveBefore(e, mark *Element) {
|
||||
if e.list != l || e == mark {
|
||||
return
|
||||
}
|
||||
l.insert(l.remove(e), mark.prev)
|
||||
}
|
||||
|
||||
// MoveAfter moves element e to its new position after mark.
|
||||
// If e is not an element of l, or e == mark, the list is not modified.
|
||||
func (l *List) MoveAfter(e, mark *Element) {
|
||||
if e.list != l || e == mark {
|
||||
return
|
||||
}
|
||||
l.insert(l.remove(e), mark)
|
||||
}
|
||||
|
||||
// PushBackList inserts a copy of an other list at the back of list l.
|
||||
// The lists l and other may be the same.
|
||||
func (l *List) PushBackList(other *List) {
|
||||
|
|
|
@ -233,3 +233,55 @@ func TestIssue4103(t *testing.T) {
|
|||
t.Errorf("l1.Len() = %d, want 3", n)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssue6349(t *testing.T) {
|
||||
l := New()
|
||||
l.PushBack(1)
|
||||
l.PushBack(2)
|
||||
|
||||
e := l.Front()
|
||||
l.Remove(e)
|
||||
if e.Value != 1 {
|
||||
t.Errorf("e.value = %d, want 1", e.Value)
|
||||
}
|
||||
if e.Next() != nil {
|
||||
t.Errorf("e.Next() != nil")
|
||||
}
|
||||
if e.Prev() != nil {
|
||||
t.Errorf("e.Prev() != nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMove(t *testing.T) {
|
||||
l := New()
|
||||
e1 := l.PushBack(1)
|
||||
e2 := l.PushBack(2)
|
||||
e3 := l.PushBack(3)
|
||||
e4 := l.PushBack(4)
|
||||
|
||||
l.MoveAfter(e3, e3)
|
||||
checkListPointers(t, l, []*Element{e1, e2, e3, e4})
|
||||
l.MoveBefore(e2, e2)
|
||||
checkListPointers(t, l, []*Element{e1, e2, e3, e4})
|
||||
|
||||
l.MoveAfter(e3, e2)
|
||||
checkListPointers(t, l, []*Element{e1, e2, e3, e4})
|
||||
l.MoveBefore(e2, e3)
|
||||
checkListPointers(t, l, []*Element{e1, e2, e3, e4})
|
||||
|
||||
l.MoveBefore(e2, e4)
|
||||
checkListPointers(t, l, []*Element{e1, e3, e2, e4})
|
||||
e1, e2, e3, e4 = e1, e3, e2, e4
|
||||
|
||||
l.MoveBefore(e4, e1)
|
||||
checkListPointers(t, l, []*Element{e4, e1, e2, e3})
|
||||
e1, e2, e3, e4 = e4, e1, e2, e3
|
||||
|
||||
l.MoveAfter(e4, e1)
|
||||
checkListPointers(t, l, []*Element{e1, e4, e2, e3})
|
||||
e1, e2, e3, e4 = e1, e4, e2, e3
|
||||
|
||||
l.MoveAfter(e2, e3)
|
||||
checkListPointers(t, l, []*Element{e1, e3, e2, e4})
|
||||
e1, e2, e3, e4 = e1, e3, e2, e4
|
||||
}
|
||||
|
|
|
@ -61,6 +61,13 @@ func (x *cbcEncrypter) CryptBlocks(dst, src []byte) {
|
|||
}
|
||||
}
|
||||
|
||||
func (x *cbcEncrypter) SetIV(iv []byte) {
|
||||
if len(iv) != len(x.iv) {
|
||||
panic("cipher: incorrect length IV")
|
||||
}
|
||||
copy(x.iv, iv)
|
||||
}
|
||||
|
||||
type cbcDecrypter cbc
|
||||
|
||||
// NewCBCDecrypter returns a BlockMode which decrypts in cipher block chaining
|
||||
|
@ -94,3 +101,10 @@ func (x *cbcDecrypter) CryptBlocks(dst, src []byte) {
|
|||
dst = dst[x.blockSize:]
|
||||
}
|
||||
}
|
||||
|
||||
func (x *cbcDecrypter) SetIV(iv []byte) {
|
||||
if len(iv) != len(x.iv) {
|
||||
panic("cipher: incorrect length IV")
|
||||
}
|
||||
copy(x.iv, iv)
|
||||
}
|
||||
|
|
350
libgo/go/crypto/cipher/gcm.go
Normal file
350
libgo/go/crypto/cipher/gcm.go
Normal file
|
@ -0,0 +1,350 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cipher
|
||||
|
||||
import (
|
||||
"crypto/subtle"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// AEAD is a cipher mode providing authenticated encryption with associated
|
||||
// data.
|
||||
type AEAD interface {
|
||||
// NonceSize returns the size of the nonce that must be passed to Seal
|
||||
// and Open.
|
||||
NonceSize() int
|
||||
|
||||
// Overhead returns the maximum difference between the lengths of a
|
||||
// plaintext and ciphertext.
|
||||
Overhead() int
|
||||
|
||||
// Seal encrypts and authenticates plaintext, authenticates the
|
||||
// additional data and appends the result to dst, returning the updated
|
||||
// slice. The nonce must be NonceSize() bytes long and unique for all
|
||||
// time, for a given key.
|
||||
//
|
||||
// The plaintext and dst may alias exactly or not at all.
|
||||
Seal(dst, nonce, plaintext, data []byte) []byte
|
||||
|
||||
// Open decrypts and authenticates ciphertext, authenticates the
|
||||
// additional data and, if successful, appends the resulting plaintext
|
||||
// to dst, returning the updated slice and true. On error, nil and
|
||||
// false is returned. The nonce must be NonceSize() bytes long and both
|
||||
// it and the additional data must match the value passed to Seal.
|
||||
//
|
||||
// The ciphertext and dst may alias exactly or not at all.
|
||||
Open(dst, nonce, ciphertext, data []byte) ([]byte, error)
|
||||
}
|
||||
|
||||
// gcmFieldElement represents a value in GF(2¹²⁸). In order to reflect the GCM
|
||||
// standard and make getUint64 suitable for marshaling these values, the bits
|
||||
// are stored backwards. For example:
|
||||
// the coefficient of x⁰ can be obtained by v.low >> 63.
|
||||
// the coefficient of x⁶³ can be obtained by v.low & 1.
|
||||
// the coefficient of x⁶⁴ can be obtained by v.high >> 63.
|
||||
// the coefficient of x¹²⁷ can be obtained by v.high & 1.
|
||||
type gcmFieldElement struct {
|
||||
low, high uint64
|
||||
}
|
||||
|
||||
// gcm represents a Galois Counter Mode with a specific key. See
|
||||
// http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/gcm/gcm-revised-spec.pdf
|
||||
type gcm struct {
|
||||
cipher Block
|
||||
// productTable contains the first sixteen powers of the key, H.
|
||||
// However, they are in bit reversed order. See NewGCM.
|
||||
productTable [16]gcmFieldElement
|
||||
}
|
||||
|
||||
// NewGCM returns the given 128-bit, block cipher wrapped in Galois Counter Mode.
|
||||
func NewGCM(cipher Block) (AEAD, error) {
|
||||
if cipher.BlockSize() != gcmBlockSize {
|
||||
return nil, errors.New("cipher: NewGCM requires 128-bit block cipher")
|
||||
}
|
||||
|
||||
var key [gcmBlockSize]byte
|
||||
cipher.Encrypt(key[:], key[:])
|
||||
|
||||
g := &gcm{cipher: cipher}
|
||||
|
||||
// We precompute 16 multiples of |key|. However, when we do lookups
|
||||
// into this table we'll be using bits from a field element and
|
||||
// therefore the bits will be in the reverse order. So normally one
|
||||
// would expect, say, 4*key to be in index 4 of the table but due to
|
||||
// this bit ordering it will actually be in index 0010 (base 2) = 2.
|
||||
x := gcmFieldElement{
|
||||
getUint64(key[:8]),
|
||||
getUint64(key[8:]),
|
||||
}
|
||||
g.productTable[reverseBits(1)] = x
|
||||
|
||||
for i := 2; i < 16; i += 2 {
|
||||
g.productTable[reverseBits(i)] = gcmDouble(&g.productTable[reverseBits(i/2)])
|
||||
g.productTable[reverseBits(i+1)] = gcmAdd(&g.productTable[reverseBits(i)], &x)
|
||||
}
|
||||
|
||||
return g, nil
|
||||
}
|
||||
|
||||
const (
|
||||
gcmBlockSize = 16
|
||||
gcmTagSize = 16
|
||||
gcmNonceSize = 12
|
||||
)
|
||||
|
||||
func (*gcm) NonceSize() int {
|
||||
return gcmNonceSize
|
||||
}
|
||||
|
||||
func (*gcm) Overhead() int {
|
||||
return gcmTagSize
|
||||
}
|
||||
|
||||
func (g *gcm) Seal(dst, nonce, plaintext, data []byte) []byte {
|
||||
if len(nonce) != gcmNonceSize {
|
||||
panic("cipher: incorrect nonce length given to GCM")
|
||||
}
|
||||
|
||||
ret, out := sliceForAppend(dst, len(plaintext)+gcmTagSize)
|
||||
|
||||
// See GCM spec, section 7.1.
|
||||
var counter, tagMask [gcmBlockSize]byte
|
||||
copy(counter[:], nonce)
|
||||
counter[gcmBlockSize-1] = 1
|
||||
|
||||
g.cipher.Encrypt(tagMask[:], counter[:])
|
||||
gcmInc32(&counter)
|
||||
|
||||
g.counterCrypt(out, plaintext, &counter)
|
||||
g.auth(out[len(plaintext):], out[:len(plaintext)], data, &tagMask)
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
var errOpen = errors.New("cipher: message authentication failed")
|
||||
|
||||
func (g *gcm) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
|
||||
if len(nonce) != gcmNonceSize {
|
||||
panic("cipher: incorrect nonce length given to GCM")
|
||||
}
|
||||
|
||||
if len(ciphertext) < gcmTagSize {
|
||||
return nil, errOpen
|
||||
}
|
||||
tag := ciphertext[len(ciphertext)-gcmTagSize:]
|
||||
ciphertext = ciphertext[:len(ciphertext)-gcmTagSize]
|
||||
|
||||
// See GCM spec, section 7.1.
|
||||
var counter, tagMask [gcmBlockSize]byte
|
||||
copy(counter[:], nonce)
|
||||
counter[gcmBlockSize-1] = 1
|
||||
|
||||
g.cipher.Encrypt(tagMask[:], counter[:])
|
||||
gcmInc32(&counter)
|
||||
|
||||
var expectedTag [gcmTagSize]byte
|
||||
g.auth(expectedTag[:], ciphertext, data, &tagMask)
|
||||
|
||||
if subtle.ConstantTimeCompare(expectedTag[:], tag) != 1 {
|
||||
return nil, errOpen
|
||||
}
|
||||
|
||||
ret, out := sliceForAppend(dst, len(ciphertext))
|
||||
g.counterCrypt(out, ciphertext, &counter)
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// reverseBits reverses the order of the bits of 4-bit number in i.
|
||||
func reverseBits(i int) int {
|
||||
i = ((i << 2) & 0xc) | ((i >> 2) & 0x3)
|
||||
i = ((i << 1) & 0xa) | ((i >> 1) & 0x5)
|
||||
return i
|
||||
}
|
||||
|
||||
// gcmAdd adds two elements of GF(2¹²⁸) and returns the sum.
|
||||
func gcmAdd(x, y *gcmFieldElement) gcmFieldElement {
|
||||
// Addition in a characteristic 2 field is just XOR.
|
||||
return gcmFieldElement{x.low ^ y.low, x.high ^ y.high}
|
||||
}
|
||||
|
||||
// gcmDouble returns the result of doubling an element of GF(2¹²⁸).
|
||||
func gcmDouble(x *gcmFieldElement) (double gcmFieldElement) {
|
||||
msbSet := x.high&1 == 1
|
||||
|
||||
// Because of the bit-ordering, doubling is actually a right shift.
|
||||
double.high = x.high >> 1
|
||||
double.high |= x.low << 63
|
||||
double.low = x.low >> 1
|
||||
|
||||
// If the most-significant bit was set before shifting then it,
|
||||
// conceptually, becomes a term of x^128. This is greater than the
|
||||
// irreducible polynomial so the result has to be reduced. The
|
||||
// irreducible polynomial is 1+x+x^2+x^7+x^128. We can subtract that to
|
||||
// eliminate the term at x^128 which also means subtracting the other
|
||||
// four terms. In characteristic 2 fields, subtraction == addition ==
|
||||
// XOR.
|
||||
if msbSet {
|
||||
double.low ^= 0xe100000000000000
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
var gcmReductionTable = []uint16{
|
||||
0x0000, 0x1c20, 0x3840, 0x2460, 0x7080, 0x6ca0, 0x48c0, 0x54e0,
|
||||
0xe100, 0xfd20, 0xd940, 0xc560, 0x9180, 0x8da0, 0xa9c0, 0xb5e0,
|
||||
}
|
||||
|
||||
// mul sets y to y*H, where H is the GCM key, fixed during NewGCM.
|
||||
func (g *gcm) mul(y *gcmFieldElement) {
|
||||
var z gcmFieldElement
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
word := y.high
|
||||
if i == 1 {
|
||||
word = y.low
|
||||
}
|
||||
|
||||
// Multiplication works by multiplying z by 16 and adding in
|
||||
// one of the precomputed multiples of H.
|
||||
for j := 0; j < 64; j += 4 {
|
||||
msw := z.high & 0xf
|
||||
z.high >>= 4
|
||||
z.high |= z.low << 60
|
||||
z.low >>= 4
|
||||
z.low ^= uint64(gcmReductionTable[msw]) << 48
|
||||
|
||||
// the values in |table| are ordered for
|
||||
// little-endian bit positions. See the comment
|
||||
// in NewGCM.
|
||||
t := &g.productTable[word&0xf]
|
||||
|
||||
z.low ^= t.low
|
||||
z.high ^= t.high
|
||||
word >>= 4
|
||||
}
|
||||
}
|
||||
|
||||
*y = z
|
||||
}
|
||||
|
||||
// updateBlocks extends y with more polynomial terms from blocks, based on
|
||||
// Horner's rule. There must be a multiple of gcmBlockSize bytes in blocks.
|
||||
func (g *gcm) updateBlocks(y *gcmFieldElement, blocks []byte) {
|
||||
for len(blocks) > 0 {
|
||||
y.low ^= getUint64(blocks)
|
||||
y.high ^= getUint64(blocks[8:])
|
||||
g.mul(y)
|
||||
blocks = blocks[gcmBlockSize:]
|
||||
}
|
||||
}
|
||||
|
||||
// update extends y with more polynomial terms from data. If data is not a
|
||||
// multiple of gcmBlockSize bytes long then the remainder is zero padded.
|
||||
func (g *gcm) update(y *gcmFieldElement, data []byte) {
|
||||
fullBlocks := (len(data) >> 4) << 4
|
||||
g.updateBlocks(y, data[:fullBlocks])
|
||||
|
||||
if len(data) != fullBlocks {
|
||||
var partialBlock [gcmBlockSize]byte
|
||||
copy(partialBlock[:], data[fullBlocks:])
|
||||
g.updateBlocks(y, partialBlock[:])
|
||||
}
|
||||
}
|
||||
|
||||
// gcmInc32 treats the final four bytes of counterBlock as a big-endian value
|
||||
// and increments it.
|
||||
func gcmInc32(counterBlock *[16]byte) {
|
||||
c := 1
|
||||
for i := gcmBlockSize - 1; i >= gcmBlockSize-4; i-- {
|
||||
c += int(counterBlock[i])
|
||||
counterBlock[i] = byte(c)
|
||||
c >>= 8
|
||||
}
|
||||
}
|
||||
|
||||
// sliceForAppend takes a slice and a requested number of bytes. It returns a
|
||||
// slice with the contents of the given slice followed by that many bytes and a
|
||||
// second slice that aliases into it and contains only the extra bytes. If the
|
||||
// original slice has sufficient capacity then no allocation is performed.
|
||||
func sliceForAppend(in []byte, n int) (head, tail []byte) {
|
||||
if total := len(in) + n; cap(in) >= total {
|
||||
head = in[:total]
|
||||
} else {
|
||||
head = make([]byte, total)
|
||||
copy(head, in)
|
||||
}
|
||||
tail = head[len(in):]
|
||||
return
|
||||
}
|
||||
|
||||
// counterCrypt crypts in to out using g.cipher in counter mode.
|
||||
func (g *gcm) counterCrypt(out, in []byte, counter *[gcmBlockSize]byte) {
|
||||
var mask [gcmBlockSize]byte
|
||||
|
||||
for len(in) >= gcmBlockSize {
|
||||
g.cipher.Encrypt(mask[:], counter[:])
|
||||
gcmInc32(counter)
|
||||
|
||||
for i := range mask {
|
||||
out[i] = in[i] ^ mask[i]
|
||||
}
|
||||
out = out[gcmBlockSize:]
|
||||
in = in[gcmBlockSize:]
|
||||
}
|
||||
|
||||
if len(in) > 0 {
|
||||
g.cipher.Encrypt(mask[:], counter[:])
|
||||
gcmInc32(counter)
|
||||
|
||||
for i := range in {
|
||||
out[i] = in[i] ^ mask[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// auth calculates GHASH(ciphertext, additionalData), masks the result with
|
||||
// tagMask and writes the result to out.
|
||||
func (g *gcm) auth(out, ciphertext, additionalData []byte, tagMask *[gcmTagSize]byte) {
|
||||
var y gcmFieldElement
|
||||
g.update(&y, additionalData)
|
||||
g.update(&y, ciphertext)
|
||||
|
||||
y.low ^= uint64(len(additionalData)) * 8
|
||||
y.high ^= uint64(len(ciphertext)) * 8
|
||||
|
||||
g.mul(&y)
|
||||
|
||||
putUint64(out, y.low)
|
||||
putUint64(out[8:], y.high)
|
||||
|
||||
for i := range tagMask {
|
||||
out[i] ^= tagMask[i]
|
||||
}
|
||||
}
|
||||
|
||||
func getUint64(data []byte) uint64 {
|
||||
r := uint64(data[0])<<56 |
|
||||
uint64(data[1])<<48 |
|
||||
uint64(data[2])<<40 |
|
||||
uint64(data[3])<<32 |
|
||||
uint64(data[4])<<24 |
|
||||
uint64(data[5])<<16 |
|
||||
uint64(data[6])<<8 |
|
||||
uint64(data[7])
|
||||
return r
|
||||
}
|
||||
|
||||
func putUint64(out []byte, v uint64) {
|
||||
out[0] = byte(v >> 56)
|
||||
out[1] = byte(v >> 48)
|
||||
out[2] = byte(v >> 40)
|
||||
out[3] = byte(v >> 32)
|
||||
out[4] = byte(v >> 24)
|
||||
out[5] = byte(v >> 16)
|
||||
out[6] = byte(v >> 8)
|
||||
out[7] = byte(v)
|
||||
}
|
175
libgo/go/crypto/cipher/gcm_test.go
Normal file
175
libgo/go/crypto/cipher/gcm_test.go
Normal file
|
@ -0,0 +1,175 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cipher_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// AES-GCM test vectors taken from gcmEncryptExtIV128.rsp from
|
||||
// http://csrc.nist.gov/groups/STM/cavp/index.html.
|
||||
var aesGCMTests = []struct {
|
||||
key, nonce, plaintext, ad, result string
|
||||
}{
|
||||
{
|
||||
"11754cd72aec309bf52f7687212e8957",
|
||||
"3c819d9a9bed087615030b65",
|
||||
"",
|
||||
"",
|
||||
"250327c674aaf477aef2675748cf6971",
|
||||
},
|
||||
{
|
||||
"ca47248ac0b6f8372a97ac43508308ed",
|
||||
"ffd2b598feabc9019262d2be",
|
||||
"",
|
||||
"",
|
||||
"60d20404af527d248d893ae495707d1a",
|
||||
},
|
||||
{
|
||||
"77be63708971c4e240d1cb79e8d77feb",
|
||||
"e0e00f19fed7ba0136a797f3",
|
||||
"",
|
||||
"7a43ec1d9c0a5a78a0b16533a6213cab",
|
||||
"209fcc8d3675ed938e9c7166709dd946",
|
||||
},
|
||||
{
|
||||
"7680c5d3ca6154758e510f4d25b98820",
|
||||
"f8f105f9c3df4965780321f8",
|
||||
"",
|
||||
"c94c410194c765e3dcc7964379758ed3",
|
||||
"94dca8edfcf90bb74b153c8d48a17930",
|
||||
},
|
||||
{
|
||||
"7fddb57453c241d03efbed3ac44e371c",
|
||||
"ee283a3fc75575e33efd4887",
|
||||
"d5de42b461646c255c87bd2962d3b9a2",
|
||||
"",
|
||||
"2ccda4a5415cb91e135c2a0f78c9b2fdb36d1df9b9d5e596f83e8b7f52971cb3",
|
||||
},
|
||||
{
|
||||
"ab72c77b97cb5fe9a382d9fe81ffdbed",
|
||||
"54cc7dc2c37ec006bcc6d1da",
|
||||
"007c5e5b3e59df24a7c355584fc1518d",
|
||||
"",
|
||||
"0e1bde206a07a9c2c1b65300f8c649972b4401346697138c7a4891ee59867d0c",
|
||||
},
|
||||
{
|
||||
"fe47fcce5fc32665d2ae399e4eec72ba",
|
||||
"5adb9609dbaeb58cbd6e7275",
|
||||
"7c0e88c88899a779228465074797cd4c2e1498d259b54390b85e3eef1c02df60e743f1b840382c4bccaf3bafb4ca8429bea063",
|
||||
"88319d6e1d3ffa5f987199166c8a9b56c2aeba5a",
|
||||
"98f4826f05a265e6dd2be82db241c0fbbbf9ffb1c173aa83964b7cf5393043736365253ddbc5db8778371495da76d269e5db3e291ef1982e4defedaa2249f898556b47",
|
||||
},
|
||||
{
|
||||
"ec0c2ba17aa95cd6afffe949da9cc3a8",
|
||||
"296bce5b50b7d66096d627ef",
|
||||
"b85b3753535b825cbe5f632c0b843c741351f18aa484281aebec2f45bb9eea2d79d987b764b9611f6c0f8641843d5d58f3a242",
|
||||
"f8d00f05d22bf68599bcdeb131292ad6e2df5d14",
|
||||
"a7443d31c26bdf2a1c945e29ee4bd344a99cfaf3aa71f8b3f191f83c2adfc7a07162995506fde6309ffc19e716eddf1a828c5a890147971946b627c40016da1ecf3e77",
|
||||
},
|
||||
{
|
||||
"2c1f21cf0f6fb3661943155c3e3d8492",
|
||||
"23cb5ff362e22426984d1907",
|
||||
"42f758836986954db44bf37c6ef5e4ac0adaf38f27252a1b82d02ea949c8a1a2dbc0d68b5615ba7c1220ff6510e259f06655d8",
|
||||
"5d3624879d35e46849953e45a32a624d6a6c536ed9857c613b572b0333e701557a713e3f010ecdf9a6bd6c9e3e44b065208645aff4aabee611b391528514170084ccf587177f4488f33cfb5e979e42b6e1cfc0a60238982a7aec",
|
||||
"81824f0e0d523db30d3da369fdc0d60894c7a0a20646dd015073ad2732bd989b14a222b6ad57af43e1895df9dca2a5344a62cc57a3ee28136e94c74838997ae9823f3a",
|
||||
},
|
||||
{
|
||||
"d9f7d2411091f947b4d6f1e2d1f0fb2e",
|
||||
"e1934f5db57cc983e6b180e7",
|
||||
"73ed042327f70fe9c572a61545eda8b2a0c6e1d6c291ef19248e973aee6c312012f490c2c6f6166f4a59431e182663fcaea05a",
|
||||
"0a8a18a7150e940c3d87b38e73baee9a5c049ee21795663e264b694a949822b639092d0e67015e86363583fcf0ca645af9f43375f05fdb4ce84f411dcbca73c2220dea03a20115d2e51398344b16bee1ed7c499b353d6c597af8",
|
||||
"aaadbd5c92e9151ce3db7210b8714126b73e43436d242677afa50384f2149b831f1d573c7891c2a91fbc48db29967ec9542b2321b51ca862cb637cdd03b99a0f93b134",
|
||||
},
|
||||
{
|
||||
"fe9bb47deb3a61e423c2231841cfd1fb",
|
||||
"4d328eb776f500a2f7fb47aa",
|
||||
"f1cc3818e421876bb6b8bbd6c9",
|
||||
"",
|
||||
"b88c5c1977b35b517b0aeae96743fd4727fe5cdb4b5b42818dea7ef8c9",
|
||||
},
|
||||
{
|
||||
"6703df3701a7f54911ca72e24dca046a",
|
||||
"12823ab601c350ea4bc2488c",
|
||||
"793cd125b0b84a043e3ac67717",
|
||||
"",
|
||||
"b2051c80014f42f08735a7b0cd38e6bcd29962e5f2c13626b85a877101",
|
||||
},
|
||||
}
|
||||
|
||||
func TestAESGCM(t *testing.T) {
|
||||
for i, test := range aesGCMTests {
|
||||
key, _ := hex.DecodeString(test.key)
|
||||
aes, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
nonce, _ := hex.DecodeString(test.nonce)
|
||||
plaintext, _ := hex.DecodeString(test.plaintext)
|
||||
ad, _ := hex.DecodeString(test.ad)
|
||||
aesgcm, err := cipher.NewGCM(aes)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ct := aesgcm.Seal(nil, nonce, plaintext, ad)
|
||||
if ctHex := hex.EncodeToString(ct); ctHex != test.result {
|
||||
t.Errorf("#%d: got %s, want %s", i, ctHex, test.result)
|
||||
continue
|
||||
}
|
||||
|
||||
plaintext2, err := aesgcm.Open(nil, nonce, ct, ad)
|
||||
if err != nil {
|
||||
t.Errorf("#%d: Open failed", i)
|
||||
continue
|
||||
}
|
||||
|
||||
if !bytes.Equal(plaintext, plaintext2) {
|
||||
t.Errorf("#%d: plaintext's don't match: got %x vs %x", i, plaintext2, plaintext)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(ad) > 0 {
|
||||
ad[0] ^= 0x80
|
||||
if _, err := aesgcm.Open(nil, nonce, ct, ad); err == nil {
|
||||
t.Errorf("#%d: Open was successful after altering additional data", i)
|
||||
}
|
||||
ad[0] ^= 0x80
|
||||
}
|
||||
|
||||
nonce[0] ^= 0x80
|
||||
if _, err := aesgcm.Open(nil, nonce, ct, ad); err == nil {
|
||||
t.Errorf("#%d: Open was successful after altering nonce", i)
|
||||
}
|
||||
nonce[0] ^= 0x80
|
||||
|
||||
ct[0] ^= 0x80
|
||||
if _, err := aesgcm.Open(nil, nonce, ct, ad); err == nil {
|
||||
t.Errorf("#%d: Open was successful after altering ciphertext", i)
|
||||
}
|
||||
ct[0] ^= 0x80
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAESGCM(b *testing.B) {
|
||||
buf := make([]byte, 1024)
|
||||
b.SetBytes(int64(len(buf)))
|
||||
|
||||
var key [16]byte
|
||||
var nonce [12]byte
|
||||
aes, _ := aes.NewCipher(key[:])
|
||||
aesgcm, _ := cipher.NewGCM(aes)
|
||||
var out []byte
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
out = aesgcm.Seal(out[:0], nonce[:], buf, nonce[:])
|
||||
}
|
||||
}
|
|
@ -25,6 +25,8 @@ func (r StreamReader) Read(dst []byte) (n int, err error) {
|
|||
// StreamWriter wraps a Stream into an io.Writer. It calls XORKeyStream
|
||||
// to process each slice of data which passes through. If any Write call
|
||||
// returns short then the StreamWriter is out of sync and must be discarded.
|
||||
// A StreamWriter has no internal buffering; Close does not need
|
||||
// to be called to flush write data.
|
||||
type StreamWriter struct {
|
||||
S Stream
|
||||
W io.Writer
|
||||
|
@ -43,8 +45,11 @@ func (w StreamWriter) Write(src []byte) (n int, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
// Close closes the underlying Writer and returns its Close return value, if the Writer
|
||||
// is also an io.Closer. Otherwise it returns nil.
|
||||
func (w StreamWriter) Close() error {
|
||||
// This saves us from either requiring a WriteCloser or having a
|
||||
// StreamWriterCloser.
|
||||
return w.W.(io.Closer).Close()
|
||||
if c, ok := w.W.(io.Closer); ok {
|
||||
return c.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ package crypto
|
|||
|
||||
import (
|
||||
"hash"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Hash identifies a cryptographic hash function that is implemented in another
|
||||
|
@ -59,7 +60,7 @@ func (h Hash) New() hash.Hash {
|
|||
return f()
|
||||
}
|
||||
}
|
||||
panic("crypto: requested hash function is unavailable")
|
||||
panic("crypto: requested hash function #" + strconv.Itoa(int(h)) + " is unavailable")
|
||||
}
|
||||
|
||||
// Available reports whether the given hash function is linked into the binary.
|
||||
|
@ -77,5 +78,8 @@ func RegisterHash(h Hash, f func() hash.Hash) {
|
|||
hashes[h] = f
|
||||
}
|
||||
|
||||
// PublicKey represents a public key using an unspecified algorithm.
|
||||
type PublicKey interface{}
|
||||
|
||||
// PrivateKey represents a private key using an unspecified algorithm.
|
||||
type PrivateKey interface{}
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
|
||||
func cryptBlock(subkeys []uint64, dst, src []byte, decrypt bool) {
|
||||
b := binary.BigEndian.Uint64(src)
|
||||
b = permuteBlock(b, initialPermutation[:])
|
||||
b = permuteInitialBlock(b)
|
||||
left, right := uint32(b>>32), uint32(b)
|
||||
|
||||
var subkey uint64
|
||||
|
@ -25,7 +25,7 @@ func cryptBlock(subkeys []uint64, dst, src []byte, decrypt bool) {
|
|||
}
|
||||
// switch left & right and perform final permutation
|
||||
preOutput := (uint64(right) << 32) | uint64(left)
|
||||
binary.BigEndian.PutUint64(dst, permuteBlock(preOutput, finalPermutation[:]))
|
||||
binary.BigEndian.PutUint64(dst, permuteFinalBlock(preOutput))
|
||||
}
|
||||
|
||||
// Encrypt one block from src into dst, using the subkeys.
|
||||
|
@ -40,20 +40,24 @@ func decryptBlock(subkeys []uint64, dst, src []byte) {
|
|||
|
||||
// DES Feistel function
|
||||
func feistel(right uint32, key uint64) (result uint32) {
|
||||
sBoxLocations := key ^ permuteBlock(uint64(right), expansionFunction[:])
|
||||
sBoxLocations := key ^ expandBlock(right)
|
||||
var sBoxResult uint32
|
||||
for i := uint8(0); i < 8; i++ {
|
||||
sBoxLocation := uint8(sBoxLocations>>42) & 0x3f
|
||||
sBoxLocations <<= 6
|
||||
// row determined by 1st and 6th bit
|
||||
row := (sBoxLocation & 0x1) | ((sBoxLocation & 0x20) >> 4)
|
||||
// column is middle four bits
|
||||
row := (sBoxLocation & 0x1) | ((sBoxLocation & 0x20) >> 4)
|
||||
column := (sBoxLocation >> 1) & 0xf
|
||||
sBoxResult |= uint32(sBoxes[i][row][column]) << (4 * (7 - i))
|
||||
sBoxResult ^= feistelBox[i][16*row+column]
|
||||
}
|
||||
return uint32(permuteBlock(uint64(sBoxResult), permutationFunction[:]))
|
||||
return sBoxResult
|
||||
}
|
||||
|
||||
// feistelBox[s][16*i+j] contains the output of permutationFunction
|
||||
// for sBoxes[s][i][j] << 4*(7-s)
|
||||
var feistelBox [8][64]uint32
|
||||
|
||||
// general purpose function to perform DES block permutations
|
||||
func permuteBlock(src uint64, permutation []uint8) (block uint64) {
|
||||
for position, n := range permutation {
|
||||
|
@ -63,6 +67,127 @@ func permuteBlock(src uint64, permutation []uint8) (block uint64) {
|
|||
return
|
||||
}
|
||||
|
||||
func init() {
|
||||
for s := range sBoxes {
|
||||
for i := 0; i < 4; i++ {
|
||||
for j := 0; j < 16; j++ {
|
||||
f := uint64(sBoxes[s][i][j]) << (4 * (7 - uint(s)))
|
||||
f = permuteBlock(uint64(f), permutationFunction[:])
|
||||
feistelBox[s][16*i+j] = uint32(f)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// expandBlock expands an input block of 32 bits,
|
||||
// producing an output block of 48 bits.
|
||||
func expandBlock(src uint32) (block uint64) {
|
||||
// rotate the 5 highest bits to the right.
|
||||
src = (src << 5) | (src >> 27)
|
||||
for i := 0; i < 8; i++ {
|
||||
block <<= 6
|
||||
// take the 6 bits on the right
|
||||
block |= uint64(src) & (1<<6 - 1)
|
||||
// advance by 4 bits.
|
||||
src = (src << 4) | (src >> 28)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// permuteInitialBlock is equivalent to the permutation defined
|
||||
// by initialPermutation.
|
||||
func permuteInitialBlock(block uint64) uint64 {
|
||||
// block = b7 b6 b5 b4 b3 b2 b1 b0 (8 bytes)
|
||||
b1 := block >> 48
|
||||
b2 := block << 48
|
||||
block ^= b1 ^ b2 ^ b1<<48 ^ b2>>48
|
||||
|
||||
// block = b1 b0 b5 b4 b3 b2 b7 b6
|
||||
b1 = block >> 32 & 0xff00ff
|
||||
b2 = (block & 0xff00ff00)
|
||||
block ^= b1<<32 ^ b2 ^ b1<<8 ^ b2<<24 // exchange b0 b4 with b3 b7
|
||||
|
||||
// block is now b1 b3 b5 b7 b0 b2 b4 b7, the permutation:
|
||||
// ... 8
|
||||
// ... 24
|
||||
// ... 40
|
||||
// ... 56
|
||||
// 7 6 5 4 3 2 1 0
|
||||
// 23 22 21 20 19 18 17 16
|
||||
// ... 32
|
||||
// ... 48
|
||||
|
||||
// exchange 4,5,6,7 with 32,33,34,35 etc.
|
||||
b1 = block & 0x0f0f00000f0f0000
|
||||
b2 = block & 0x0000f0f00000f0f0
|
||||
block ^= b1 ^ b2 ^ b1>>12 ^ b2<<12
|
||||
|
||||
// block is the permutation:
|
||||
//
|
||||
// [+8] [+40]
|
||||
//
|
||||
// 7 6 5 4
|
||||
// 23 22 21 20
|
||||
// 3 2 1 0
|
||||
// 19 18 17 16 [+32]
|
||||
|
||||
// exchange 0,1,4,5 with 18,19,22,23
|
||||
b1 = block & 0x3300330033003300
|
||||
b2 = block & 0x00cc00cc00cc00cc
|
||||
block ^= b1 ^ b2 ^ b1>>6 ^ b2<<6
|
||||
|
||||
// block is the permutation:
|
||||
// 15 14
|
||||
// 13 12
|
||||
// 11 10
|
||||
// 9 8
|
||||
// 7 6
|
||||
// 5 4
|
||||
// 3 2
|
||||
// 1 0 [+16] [+32] [+64]
|
||||
|
||||
// exchange 0,2,4,6 with 9,11,13,15:
|
||||
b1 = block & 0xaaaaaaaa55555555
|
||||
block ^= b1 ^ b1>>33 ^ b1<<33
|
||||
|
||||
// block is the permutation:
|
||||
// 6 14 22 30 38 46 54 62
|
||||
// 4 12 20 28 36 44 52 60
|
||||
// 2 10 18 26 34 42 50 58
|
||||
// 0 8 16 24 32 40 48 56
|
||||
// 7 15 23 31 39 47 55 63
|
||||
// 5 13 21 29 37 45 53 61
|
||||
// 3 11 19 27 35 43 51 59
|
||||
// 1 9 17 25 33 41 49 57
|
||||
return block
|
||||
}
|
||||
|
||||
// permuteInitialBlock is equivalent to the permutation defined
|
||||
// by finalPermutation.
|
||||
func permuteFinalBlock(block uint64) uint64 {
|
||||
// Perform the same bit exchanges as permuteInitialBlock
|
||||
// but in reverse order.
|
||||
b1 := block & 0xaaaaaaaa55555555
|
||||
block ^= b1 ^ b1>>33 ^ b1<<33
|
||||
|
||||
b1 = block & 0x3300330033003300
|
||||
b2 := block & 0x00cc00cc00cc00cc
|
||||
block ^= b1 ^ b2 ^ b1>>6 ^ b2<<6
|
||||
|
||||
b1 = block & 0x0f0f00000f0f0000
|
||||
b2 = block & 0x0000f0f00000f0f0
|
||||
block ^= b1 ^ b2 ^ b1>>12 ^ b2<<12
|
||||
|
||||
b1 = block >> 32 & 0xff00ff
|
||||
b2 = (block & 0xff00ff00)
|
||||
block ^= b1<<32 ^ b2 ^ b1<<8 ^ b2<<24
|
||||
|
||||
b1 = block >> 48
|
||||
b2 = block << 48
|
||||
block ^= b1 ^ b2 ^ b1<<48 ^ b2>>48
|
||||
return block
|
||||
}
|
||||
|
||||
// creates 16 28-bit blocks rotated according
|
||||
// to the rotation schedule
|
||||
func ksRotate(in uint32) (out []uint32) {
|
||||
|
|
|
@ -1504,20 +1504,63 @@ func TestSubstitutionTableKnownAnswerDecrypt(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func ExampleNewTripleDESCipher() {
|
||||
// NewTripleDESCipher can also be used when EDE2 is required by
|
||||
// duplicating the first 8 bytes of the 16-byte key.
|
||||
ede2Key := []byte("example key 1234")
|
||||
|
||||
var tripleDESKey []byte
|
||||
tripleDESKey = append(tripleDESKey, ede2Key[:16]...)
|
||||
tripleDESKey = append(tripleDESKey, ede2Key[:8]...)
|
||||
|
||||
_, err := NewTripleDESCipher(tripleDESKey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
func TestInitialPermute(t *testing.T) {
|
||||
for i := uint(0); i < 64; i++ {
|
||||
bit := uint64(1) << i
|
||||
got := permuteInitialBlock(bit)
|
||||
want := uint64(1) << finalPermutation[63-i]
|
||||
if got != want {
|
||||
t.Errorf("permute(%x) = %x, want %x", bit, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFinalPermute(t *testing.T) {
|
||||
for i := uint(0); i < 64; i++ {
|
||||
bit := uint64(1) << i
|
||||
got := permuteFinalBlock(bit)
|
||||
want := uint64(1) << initialPermutation[63-i]
|
||||
if got != want {
|
||||
t.Errorf("permute(%x) = %x, want %x", bit, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExpandBlock(t *testing.T) {
|
||||
for i := uint(0); i < 32; i++ {
|
||||
bit := uint32(1) << i
|
||||
got := expandBlock(bit)
|
||||
want := permuteBlock(uint64(bit), expansionFunction[:])
|
||||
if got != want {
|
||||
t.Errorf("expand(%x) = %x, want %x", bit, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncrypt(b *testing.B) {
|
||||
tt := encryptDESTests[0]
|
||||
c, err := NewCipher(tt.key)
|
||||
if err != nil {
|
||||
b.Fatal("NewCipher:", err)
|
||||
}
|
||||
out := make([]byte, len(tt.in))
|
||||
b.SetBytes(int64(len(out)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
c.Encrypt(out, tt.in)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkDecrypt(b *testing.B) {
|
||||
tt := encryptDESTests[0]
|
||||
c, err := NewCipher(tt.key)
|
||||
if err != nil {
|
||||
b.Fatal("NewCipher:", err)
|
||||
}
|
||||
out := make([]byte, len(tt.out))
|
||||
b.SetBytes(int64(len(out)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
c.Decrypt(out, tt.out)
|
||||
}
|
||||
|
||||
// See crypto/cipher for how to use a cipher.Block for encryption and
|
||||
// decryption.
|
||||
}
|
||||
|
|
|
@ -123,8 +123,8 @@ func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err err
|
|||
return
|
||||
}
|
||||
|
||||
// Verify verifies the signature in r, s of hash using the public key, pub. It
|
||||
// returns true iff the signature is valid.
|
||||
// Verify verifies the signature in r, s of hash using the public key, pub. Its
|
||||
// return value records whether the signature is valid.
|
||||
func Verify(pub *PublicKey, hash []byte, r, s *big.Int) bool {
|
||||
// See [NSA] 3.4.2
|
||||
c := pub.Curve
|
||||
|
|
|
@ -322,7 +322,6 @@ func Unmarshal(curve Curve, data []byte) (x, y *big.Int) {
|
|||
}
|
||||
|
||||
var initonce sync.Once
|
||||
var p256 *CurveParams
|
||||
var p384 *CurveParams
|
||||
var p521 *CurveParams
|
||||
|
||||
|
@ -333,17 +332,6 @@ func initAll() {
|
|||
initP521()
|
||||
}
|
||||
|
||||
func initP256() {
|
||||
// See FIPS 186-3, section D.2.3
|
||||
p256 = new(CurveParams)
|
||||
p256.P, _ = new(big.Int).SetString("115792089210356248762697446949407573530086143415290314195533631308867097853951", 10)
|
||||
p256.N, _ = new(big.Int).SetString("115792089210356248762697446949407573529996955224135760342422259061068512044369", 10)
|
||||
p256.B, _ = new(big.Int).SetString("5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b", 16)
|
||||
p256.Gx, _ = new(big.Int).SetString("6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296", 16)
|
||||
p256.Gy, _ = new(big.Int).SetString("4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5", 16)
|
||||
p256.BitSize = 256
|
||||
}
|
||||
|
||||
func initP384() {
|
||||
// See FIPS 186-3, section D.2.4
|
||||
p384 = new(CurveParams)
|
||||
|
|
|
@ -322,6 +322,52 @@ func TestGenericBaseMult(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestP256BaseMult(t *testing.T) {
|
||||
p256 := P256()
|
||||
p256Generic := p256.Params()
|
||||
|
||||
scalars := make([]*big.Int, 0, len(p224BaseMultTests)+1)
|
||||
for _, e := range p224BaseMultTests {
|
||||
k, _ := new(big.Int).SetString(e.k, 10)
|
||||
scalars = append(scalars, k)
|
||||
}
|
||||
k := new(big.Int).SetInt64(1)
|
||||
k.Lsh(k, 500)
|
||||
scalars = append(scalars, k)
|
||||
|
||||
for i, k := range scalars {
|
||||
x, y := p256.ScalarBaseMult(k.Bytes())
|
||||
x2, y2 := p256Generic.ScalarBaseMult(k.Bytes())
|
||||
if x.Cmp(x2) != 0 || y.Cmp(y2) != 0 {
|
||||
t.Errorf("#%d: got (%x, %x), want (%x, %x)", i, x, y, x2, y2)
|
||||
}
|
||||
|
||||
if testing.Short() && i > 5 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestP256Mult(t *testing.T) {
|
||||
p256 := P256()
|
||||
p256Generic := p256.Params()
|
||||
|
||||
for i, e := range p224BaseMultTests {
|
||||
x, _ := new(big.Int).SetString(e.x, 16)
|
||||
y, _ := new(big.Int).SetString(e.y, 16)
|
||||
k, _ := new(big.Int).SetString(e.k, 10)
|
||||
|
||||
xx, yy := p256.ScalarMult(x, y, k.Bytes())
|
||||
xx2, yy2 := p256Generic.ScalarMult(x, y, k.Bytes())
|
||||
if xx.Cmp(xx2) != 0 || yy.Cmp(yy2) != 0 {
|
||||
t.Errorf("#%d: got (%x, %x), want (%x, %x)", i, xx, yy, xx2, yy2)
|
||||
}
|
||||
if testing.Short() && i > 5 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInfinity(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
@ -371,6 +417,17 @@ func BenchmarkBaseMult(b *testing.B) {
|
|||
}
|
||||
}
|
||||
|
||||
func BenchmarkBaseMultP256(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
p256 := P256()
|
||||
e := p224BaseMultTests[25]
|
||||
k, _ := new(big.Int).SetString(e.k, 10)
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
p256.ScalarBaseMult(k.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshal(t *testing.T) {
|
||||
p224 := P224()
|
||||
_, x, y, err := GenerateKey(p224, rand.Reader)
|
||||
|
|
1186
libgo/go/crypto/elliptic/p256.go
Normal file
1186
libgo/go/crypto/elliptic/p256.go
Normal file
File diff suppressed because it is too large
Load diff
|
@ -164,7 +164,7 @@ var program = `
|
|||
// DO NOT EDIT.
|
||||
// Generate with: go run gen.go{{if .Full}} -full{{end}} | gofmt >md5block.go
|
||||
|
||||
// +build !amd64
|
||||
// +build !amd64,!386,!arm
|
||||
|
||||
package md5
|
||||
|
||||
|
|
|
@ -88,7 +88,11 @@ func (d *digest) Write(p []byte) (nn int, err error) {
|
|||
func (d0 *digest) Sum(in []byte) []byte {
|
||||
// Make a copy of d0 so that caller can keep writing and summing.
|
||||
d := *d0
|
||||
hash := d.checkSum()
|
||||
return append(in, hash[:]...)
|
||||
}
|
||||
|
||||
func (d *digest) checkSum() [Size]byte {
|
||||
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
|
||||
len := d.len
|
||||
var tmp [64]byte
|
||||
|
@ -118,5 +122,13 @@ func (d0 *digest) Sum(in []byte) []byte {
|
|||
digest[i*4+3] = byte(s >> 24)
|
||||
}
|
||||
|
||||
return append(in, digest[:]...)
|
||||
return digest
|
||||
}
|
||||
|
||||
// Sum returns the MD5 checksum of the data.
|
||||
func Sum(data []byte) [Size]byte {
|
||||
var d digest
|
||||
d.Reset()
|
||||
d.Write(data)
|
||||
return d.checkSum()
|
||||
}
|
||||
|
|
|
@ -53,6 +53,10 @@ var golden = []md5Test{
|
|||
func TestGolden(t *testing.T) {
|
||||
for i := 0; i < len(golden); i++ {
|
||||
g := golden[i]
|
||||
s := fmt.Sprintf("%x", Sum([]byte(g.in)))
|
||||
if s != g.out {
|
||||
t.Fatalf("Sum function: md5(%s) = %s want %s", g.in, s, g.out)
|
||||
}
|
||||
c := New()
|
||||
buf := make([]byte, len(g.in)+4)
|
||||
for j := 0; j < 3+4; j++ {
|
||||
|
@ -77,12 +81,28 @@ func TestGolden(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func ExampleNew() {
|
||||
h := New()
|
||||
io.WriteString(h, "The fog is getting thicker!")
|
||||
io.WriteString(h, "And Leon's getting laaarger!")
|
||||
fmt.Printf("%x", h.Sum(nil))
|
||||
// Output: e2c569be17396eca2a2e3c11578123ed
|
||||
func TestLarge(t *testing.T) {
|
||||
const N = 10000
|
||||
ok := "2bb571599a4180e1d542f76904adc3df" // md5sum of "0123456789" * 1000
|
||||
block := make([]byte, 10004)
|
||||
c := New()
|
||||
for offset := 0; offset < 4; offset++ {
|
||||
for i := 0; i < N; i++ {
|
||||
block[offset+i] = '0' + byte(i%10)
|
||||
}
|
||||
for blockSize := 10; blockSize <= N; blockSize *= 10 {
|
||||
blocks := N / blockSize
|
||||
b := block[offset : offset+blockSize]
|
||||
c.Reset()
|
||||
for i := 0; i < blocks; i++ {
|
||||
c.Write(b)
|
||||
}
|
||||
s := fmt.Sprintf("%x", c.Sum(nil))
|
||||
if s != ok {
|
||||
t.Fatalf("md5 TestLarge offset=%d, blockSize=%d = %s want %s", offset, blockSize, s, ok)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var bench = New()
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
// DO NOT EDIT.
|
||||
// Generate with: go run gen.go -full | gofmt >md5block.go
|
||||
|
||||
// +build !amd64,!386
|
||||
// +build !amd64,!386,!arm
|
||||
|
||||
package md5
|
||||
|
||||
|
|
|
@ -2,8 +2,10 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build amd64 386
|
||||
// +build amd64 386 arm
|
||||
|
||||
package md5
|
||||
|
||||
//go:noescape
|
||||
|
||||
func block(dig *digest, p []byte)
|
||||
|
|
|
@ -14,5 +14,8 @@ import "io"
|
|||
// On Windows systems, Reader uses the CryptGenRandom API.
|
||||
var Reader io.Reader
|
||||
|
||||
// Read is a helper function that calls Reader.Read.
|
||||
func Read(b []byte) (n int, err error) { return Reader.Read(b) }
|
||||
// Read is a helper function that calls Reader.Read using io.ReadFull.
|
||||
// On return, n == len(b) if and only if err == nil.
|
||||
func Read(b []byte) (n int, err error) {
|
||||
return io.ReadFull(Reader, b)
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin freebsd linux netbsd openbsd plan9
|
||||
// +build darwin dragonfly freebsd linux netbsd openbsd plan9
|
||||
|
||||
// Unix cryptographically secure pseudorandom number
|
||||
// generator.
|
||||
|
|
|
@ -124,7 +124,11 @@ func decryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) (valid
|
|||
lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex)
|
||||
}
|
||||
|
||||
valid = firstByteIsZero & secondByteIsTwo & (^lookingForIndex & 1)
|
||||
// The PS padding must be at least 8 bytes long, and it starts two
|
||||
// bytes into em.
|
||||
validPS := subtle.ConstantTimeLessOrEq(2+8, index)
|
||||
|
||||
valid = firstByteIsZero & secondByteIsTwo & (^lookingForIndex & 1) & validPS
|
||||
msg = em[index+1:]
|
||||
return
|
||||
}
|
||||
|
|
|
@ -197,6 +197,14 @@ func TestVerifyPKCS1v15(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestOverlongMessagePKCS1v15(t *testing.T) {
|
||||
ciphertext := decodeBase64("fjOVdirUzFoLlukv80dBllMLjXythIf22feqPrNo0YoIjzyzyoMFiLjAc/Y4krkeZ11XFThIrEvw\nkRiZcCq5ng==")
|
||||
_, err := DecryptPKCS1v15(nil, rsaPrivateKey, ciphertext)
|
||||
if err == nil {
|
||||
t.Error("RSA decrypted a message that was too long.")
|
||||
}
|
||||
}
|
||||
|
||||
// In order to generate new test vectors you'll need the PEM form of this key:
|
||||
// -----BEGIN RSA PRIVATE KEY-----
|
||||
// MIIBOgIBAAJBALKZD0nEffqM1ACuak0bijtqE2QrI/KLADv7l3kK3ppMyCuLKoF0
|
||||
|
|
282
libgo/go/crypto/rsa/pss.go
Normal file
282
libgo/go/crypto/rsa/pss.go
Normal file
|
@ -0,0 +1,282 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package rsa
|
||||
|
||||
// This file implementes the PSS signature scheme [1].
|
||||
//
|
||||
// [1] http://www.rsa.com/rsalabs/pkcs/files/h11300-wp-pkcs-1v2-2-rsa-cryptography-standard.pdf
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"errors"
|
||||
"hash"
|
||||
"io"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
func emsaPSSEncode(mHash []byte, emBits int, salt []byte, hash hash.Hash) ([]byte, error) {
|
||||
// See [1], section 9.1.1
|
||||
hLen := hash.Size()
|
||||
sLen := len(salt)
|
||||
emLen := (emBits + 7) / 8
|
||||
|
||||
// 1. If the length of M is greater than the input limitation for the
|
||||
// hash function (2^61 - 1 octets for SHA-1), output "message too
|
||||
// long" and stop.
|
||||
//
|
||||
// 2. Let mHash = Hash(M), an octet string of length hLen.
|
||||
|
||||
if len(mHash) != hLen {
|
||||
return nil, errors.New("crypto/rsa: input must be hashed message")
|
||||
}
|
||||
|
||||
// 3. If emLen < hLen + sLen + 2, output "encoding error" and stop.
|
||||
|
||||
if emLen < hLen+sLen+2 {
|
||||
return nil, errors.New("crypto/rsa: encoding error")
|
||||
}
|
||||
|
||||
em := make([]byte, emLen)
|
||||
db := em[:emLen-sLen-hLen-2+1+sLen]
|
||||
h := em[emLen-sLen-hLen-2+1+sLen : emLen-1]
|
||||
|
||||
// 4. Generate a random octet string salt of length sLen; if sLen = 0,
|
||||
// then salt is the empty string.
|
||||
//
|
||||
// 5. Let
|
||||
// M' = (0x)00 00 00 00 00 00 00 00 || mHash || salt;
|
||||
//
|
||||
// M' is an octet string of length 8 + hLen + sLen with eight
|
||||
// initial zero octets.
|
||||
//
|
||||
// 6. Let H = Hash(M'), an octet string of length hLen.
|
||||
|
||||
var prefix [8]byte
|
||||
|
||||
hash.Write(prefix[:])
|
||||
hash.Write(mHash)
|
||||
hash.Write(salt)
|
||||
|
||||
h = hash.Sum(h[:0])
|
||||
hash.Reset()
|
||||
|
||||
// 7. Generate an octet string PS consisting of emLen - sLen - hLen - 2
|
||||
// zero octets. The length of PS may be 0.
|
||||
//
|
||||
// 8. Let DB = PS || 0x01 || salt; DB is an octet string of length
|
||||
// emLen - hLen - 1.
|
||||
|
||||
db[emLen-sLen-hLen-2] = 0x01
|
||||
copy(db[emLen-sLen-hLen-1:], salt)
|
||||
|
||||
// 9. Let dbMask = MGF(H, emLen - hLen - 1).
|
||||
//
|
||||
// 10. Let maskedDB = DB \xor dbMask.
|
||||
|
||||
mgf1XOR(db, hash, h)
|
||||
|
||||
// 11. Set the leftmost 8 * emLen - emBits bits of the leftmost octet in
|
||||
// maskedDB to zero.
|
||||
|
||||
db[0] &= (0xFF >> uint(8*emLen-emBits))
|
||||
|
||||
// 12. Let EM = maskedDB || H || 0xbc.
|
||||
em[emLen-1] = 0xBC
|
||||
|
||||
// 13. Output EM.
|
||||
return em, nil
|
||||
}
|
||||
|
||||
func emsaPSSVerify(mHash, em []byte, emBits, sLen int, hash hash.Hash) error {
|
||||
// 1. If the length of M is greater than the input limitation for the
|
||||
// hash function (2^61 - 1 octets for SHA-1), output "inconsistent"
|
||||
// and stop.
|
||||
//
|
||||
// 2. Let mHash = Hash(M), an octet string of length hLen.
|
||||
hLen := hash.Size()
|
||||
if hLen != len(mHash) {
|
||||
return ErrVerification
|
||||
}
|
||||
|
||||
// 3. If emLen < hLen + sLen + 2, output "inconsistent" and stop.
|
||||
emLen := (emBits + 7) / 8
|
||||
if emLen < hLen+sLen+2 {
|
||||
return ErrVerification
|
||||
}
|
||||
|
||||
// 4. If the rightmost octet of EM does not have hexadecimal value
|
||||
// 0xbc, output "inconsistent" and stop.
|
||||
if em[len(em)-1] != 0xBC {
|
||||
return ErrVerification
|
||||
}
|
||||
|
||||
// 5. Let maskedDB be the leftmost emLen - hLen - 1 octets of EM, and
|
||||
// let H be the next hLen octets.
|
||||
db := em[:emLen-hLen-1]
|
||||
h := em[emLen-hLen-1 : len(em)-1]
|
||||
|
||||
// 6. If the leftmost 8 * emLen - emBits bits of the leftmost octet in
|
||||
// maskedDB are not all equal to zero, output "inconsistent" and
|
||||
// stop.
|
||||
if em[0]&(0xFF<<uint(8-(8*emLen-emBits))) != 0 {
|
||||
return ErrVerification
|
||||
}
|
||||
|
||||
// 7. Let dbMask = MGF(H, emLen - hLen - 1).
|
||||
//
|
||||
// 8. Let DB = maskedDB \xor dbMask.
|
||||
mgf1XOR(db, hash, h)
|
||||
|
||||
// 9. Set the leftmost 8 * emLen - emBits bits of the leftmost octet in DB
|
||||
// to zero.
|
||||
db[0] &= (0xFF >> uint(8*emLen-emBits))
|
||||
|
||||
if sLen == PSSSaltLengthAuto {
|
||||
FindSaltLength:
|
||||
for sLen = emLen - (hLen + 2); sLen >= 0; sLen-- {
|
||||
switch db[emLen-hLen-sLen-2] {
|
||||
case 1:
|
||||
break FindSaltLength
|
||||
case 0:
|
||||
continue
|
||||
default:
|
||||
return ErrVerification
|
||||
}
|
||||
}
|
||||
if sLen < 0 {
|
||||
return ErrVerification
|
||||
}
|
||||
} else {
|
||||
// 10. If the emLen - hLen - sLen - 2 leftmost octets of DB are not zero
|
||||
// or if the octet at position emLen - hLen - sLen - 1 (the leftmost
|
||||
// position is "position 1") does not have hexadecimal value 0x01,
|
||||
// output "inconsistent" and stop.
|
||||
for _, e := range db[:emLen-hLen-sLen-2] {
|
||||
if e != 0x00 {
|
||||
return ErrVerification
|
||||
}
|
||||
}
|
||||
if db[emLen-hLen-sLen-2] != 0x01 {
|
||||
return ErrVerification
|
||||
}
|
||||
}
|
||||
|
||||
// 11. Let salt be the last sLen octets of DB.
|
||||
salt := db[len(db)-sLen:]
|
||||
|
||||
// 12. Let
|
||||
// M' = (0x)00 00 00 00 00 00 00 00 || mHash || salt ;
|
||||
// M' is an octet string of length 8 + hLen + sLen with eight
|
||||
// initial zero octets.
|
||||
//
|
||||
// 13. Let H' = Hash(M'), an octet string of length hLen.
|
||||
var prefix [8]byte
|
||||
hash.Write(prefix[:])
|
||||
hash.Write(mHash)
|
||||
hash.Write(salt)
|
||||
|
||||
h0 := hash.Sum(nil)
|
||||
|
||||
// 14. If H = H', output "consistent." Otherwise, output "inconsistent."
|
||||
if !bytes.Equal(h0, h) {
|
||||
return ErrVerification
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// signPSSWithSalt calculates the signature of hashed using PSS [1] with specified salt.
|
||||
// Note that hashed must be the result of hashing the input message using the
|
||||
// given hash funcion. salt is a random sequence of bytes whose length will be
|
||||
// later used to verify the signature.
|
||||
func signPSSWithSalt(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed, salt []byte) (s []byte, err error) {
|
||||
nBits := priv.N.BitLen()
|
||||
em, err := emsaPSSEncode(hashed, nBits-1, salt, hash.New())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
m := new(big.Int).SetBytes(em)
|
||||
c, err := decrypt(rand, priv, m)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
s = make([]byte, (nBits+7)/8)
|
||||
copyWithLeftPad(s, c.Bytes())
|
||||
return
|
||||
}
|
||||
|
||||
const (
|
||||
// PSSSaltLengthAuto causes the salt in a PSS signature to be as large
|
||||
// as possible when signing, and to be auto-detected when verifying.
|
||||
PSSSaltLengthAuto = 0
|
||||
// PSSSaltLengthEqualsHash causes the salt length to equal the length
|
||||
// of the hash used in the signature.
|
||||
PSSSaltLengthEqualsHash = -1
|
||||
)
|
||||
|
||||
// PSSOptions contains options for creating and verifying PSS signatures.
|
||||
type PSSOptions struct {
|
||||
// SaltLength controls the length of the salt used in the PSS
|
||||
// signature. It can either be a number of bytes, or one of the special
|
||||
// PSSSaltLength constants.
|
||||
SaltLength int
|
||||
}
|
||||
|
||||
func (opts *PSSOptions) saltLength() int {
|
||||
if opts == nil {
|
||||
return PSSSaltLengthAuto
|
||||
}
|
||||
return opts.SaltLength
|
||||
}
|
||||
|
||||
// SignPSS calculates the signature of hashed using RSASSA-PSS [1].
|
||||
// Note that hashed must be the result of hashing the input message using the
|
||||
// given hash funcion. The opts argument may be nil, in which case sensible
|
||||
// defaults are used.
|
||||
func SignPSS(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []byte, opts *PSSOptions) (s []byte, err error) {
|
||||
saltLength := opts.saltLength()
|
||||
switch saltLength {
|
||||
case PSSSaltLengthAuto:
|
||||
saltLength = (priv.N.BitLen()+7)/8 - 2 - hash.Size()
|
||||
case PSSSaltLengthEqualsHash:
|
||||
saltLength = hash.Size()
|
||||
}
|
||||
|
||||
salt := make([]byte, saltLength)
|
||||
if _, err = io.ReadFull(rand, salt); err != nil {
|
||||
return
|
||||
}
|
||||
return signPSSWithSalt(rand, priv, hash, hashed, salt)
|
||||
}
|
||||
|
||||
// VerifyPSS verifies a PSS signature.
|
||||
// hashed is the result of hashing the input message using the given hash
|
||||
// function and sig is the signature. A valid signature is indicated by
|
||||
// returning a nil error. The opts argument may be nil, in which case sensible
|
||||
// defaults are used.
|
||||
func VerifyPSS(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte, opts *PSSOptions) error {
|
||||
return verifyPSS(pub, hash, hashed, sig, opts.saltLength())
|
||||
}
|
||||
|
||||
// verifyPSS verifies a PSS signature with the given salt length.
|
||||
func verifyPSS(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte, saltLen int) error {
|
||||
nBits := pub.N.BitLen()
|
||||
if len(sig) != (nBits+7)/8 {
|
||||
return ErrVerification
|
||||
}
|
||||
s := new(big.Int).SetBytes(sig)
|
||||
m := encrypt(new(big.Int), pub, s)
|
||||
emBits := nBits - 1
|
||||
emLen := (emBits + 7) / 8
|
||||
if emLen < len(m.Bytes()) {
|
||||
return ErrVerification
|
||||
}
|
||||
em := make([]byte, emLen)
|
||||
copyWithLeftPad(em, m.Bytes())
|
||||
if saltLen == PSSSaltLengthEqualsHash {
|
||||
saltLen = hash.Size()
|
||||
}
|
||||
return emsaPSSVerify(hashed, em, emBits, saltLen, hash.New())
|
||||
}
|
249
libgo/go/crypto/rsa/pss_test.go
Normal file
249
libgo/go/crypto/rsa/pss_test.go
Normal file
|
@ -0,0 +1,249 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package rsa
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/bzip2"
|
||||
"crypto"
|
||||
_ "crypto/md5"
|
||||
"crypto/rand"
|
||||
"crypto/sha1"
|
||||
_ "crypto/sha256"
|
||||
"encoding/hex"
|
||||
"math/big"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEMSAPSS(t *testing.T) {
|
||||
// Test vector in file pss-int.txt from: ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
|
||||
msg := []byte{
|
||||
0x85, 0x9e, 0xef, 0x2f, 0xd7, 0x8a, 0xca, 0x00, 0x30, 0x8b,
|
||||
0xdc, 0x47, 0x11, 0x93, 0xbf, 0x55, 0xbf, 0x9d, 0x78, 0xdb,
|
||||
0x8f, 0x8a, 0x67, 0x2b, 0x48, 0x46, 0x34, 0xf3, 0xc9, 0xc2,
|
||||
0x6e, 0x64, 0x78, 0xae, 0x10, 0x26, 0x0f, 0xe0, 0xdd, 0x8c,
|
||||
0x08, 0x2e, 0x53, 0xa5, 0x29, 0x3a, 0xf2, 0x17, 0x3c, 0xd5,
|
||||
0x0c, 0x6d, 0x5d, 0x35, 0x4f, 0xeb, 0xf7, 0x8b, 0x26, 0x02,
|
||||
0x1c, 0x25, 0xc0, 0x27, 0x12, 0xe7, 0x8c, 0xd4, 0x69, 0x4c,
|
||||
0x9f, 0x46, 0x97, 0x77, 0xe4, 0x51, 0xe7, 0xf8, 0xe9, 0xe0,
|
||||
0x4c, 0xd3, 0x73, 0x9c, 0x6b, 0xbf, 0xed, 0xae, 0x48, 0x7f,
|
||||
0xb5, 0x56, 0x44, 0xe9, 0xca, 0x74, 0xff, 0x77, 0xa5, 0x3c,
|
||||
0xb7, 0x29, 0x80, 0x2f, 0x6e, 0xd4, 0xa5, 0xff, 0xa8, 0xba,
|
||||
0x15, 0x98, 0x90, 0xfc,
|
||||
}
|
||||
salt := []byte{
|
||||
0xe3, 0xb5, 0xd5, 0xd0, 0x02, 0xc1, 0xbc, 0xe5, 0x0c, 0x2b,
|
||||
0x65, 0xef, 0x88, 0xa1, 0x88, 0xd8, 0x3b, 0xce, 0x7e, 0x61,
|
||||
}
|
||||
expected := []byte{
|
||||
0x66, 0xe4, 0x67, 0x2e, 0x83, 0x6a, 0xd1, 0x21, 0xba, 0x24,
|
||||
0x4b, 0xed, 0x65, 0x76, 0xb8, 0x67, 0xd9, 0xa4, 0x47, 0xc2,
|
||||
0x8a, 0x6e, 0x66, 0xa5, 0xb8, 0x7d, 0xee, 0x7f, 0xbc, 0x7e,
|
||||
0x65, 0xaf, 0x50, 0x57, 0xf8, 0x6f, 0xae, 0x89, 0x84, 0xd9,
|
||||
0xba, 0x7f, 0x96, 0x9a, 0xd6, 0xfe, 0x02, 0xa4, 0xd7, 0x5f,
|
||||
0x74, 0x45, 0xfe, 0xfd, 0xd8, 0x5b, 0x6d, 0x3a, 0x47, 0x7c,
|
||||
0x28, 0xd2, 0x4b, 0xa1, 0xe3, 0x75, 0x6f, 0x79, 0x2d, 0xd1,
|
||||
0xdc, 0xe8, 0xca, 0x94, 0x44, 0x0e, 0xcb, 0x52, 0x79, 0xec,
|
||||
0xd3, 0x18, 0x3a, 0x31, 0x1f, 0xc8, 0x96, 0xda, 0x1c, 0xb3,
|
||||
0x93, 0x11, 0xaf, 0x37, 0xea, 0x4a, 0x75, 0xe2, 0x4b, 0xdb,
|
||||
0xfd, 0x5c, 0x1d, 0xa0, 0xde, 0x7c, 0xec, 0xdf, 0x1a, 0x89,
|
||||
0x6f, 0x9d, 0x8b, 0xc8, 0x16, 0xd9, 0x7c, 0xd7, 0xa2, 0xc4,
|
||||
0x3b, 0xad, 0x54, 0x6f, 0xbe, 0x8c, 0xfe, 0xbc,
|
||||
}
|
||||
|
||||
hash := sha1.New()
|
||||
hash.Write(msg)
|
||||
hashed := hash.Sum(nil)
|
||||
|
||||
encoded, err := emsaPSSEncode(hashed, 1023, salt, sha1.New())
|
||||
if err != nil {
|
||||
t.Errorf("Error from emsaPSSEncode: %s\n", err)
|
||||
}
|
||||
if !bytes.Equal(encoded, expected) {
|
||||
t.Errorf("Bad encoding. got %x, want %x", encoded, expected)
|
||||
}
|
||||
|
||||
if err = emsaPSSVerify(hashed, encoded, 1023, len(salt), sha1.New()); err != nil {
|
||||
t.Errorf("Bad verification: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestPSSGolden tests all the test vectors in pss-vect.txt from
|
||||
// ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
|
||||
func TestPSSGolden(t *testing.T) {
|
||||
inFile, err := os.Open("testdata/pss-vect.txt.bz2")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open input file: %s", err)
|
||||
}
|
||||
defer inFile.Close()
|
||||
|
||||
// The pss-vect.txt file contains RSA keys and then a series of
|
||||
// signatures. A goroutine is used to preprocess the input by merging
|
||||
// lines, removing spaces in hex values and identifying the start of
|
||||
// new keys and signature blocks.
|
||||
const newKeyMarker = "START NEW KEY"
|
||||
const newSignatureMarker = "START NEW SIGNATURE"
|
||||
|
||||
values := make(chan string)
|
||||
|
||||
go func() {
|
||||
defer close(values)
|
||||
scanner := bufio.NewScanner(bzip2.NewReader(inFile))
|
||||
var partialValue string
|
||||
lastWasValue := true
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
switch {
|
||||
case len(line) == 0:
|
||||
if len(partialValue) > 0 {
|
||||
values <- strings.Replace(partialValue, " ", "", -1)
|
||||
partialValue = ""
|
||||
lastWasValue = true
|
||||
}
|
||||
continue
|
||||
case strings.HasPrefix(line, "# ======") && lastWasValue:
|
||||
values <- newKeyMarker
|
||||
lastWasValue = false
|
||||
case strings.HasPrefix(line, "# ------") && lastWasValue:
|
||||
values <- newSignatureMarker
|
||||
lastWasValue = false
|
||||
case strings.HasPrefix(line, "#"):
|
||||
continue
|
||||
default:
|
||||
partialValue += line
|
||||
}
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
|
||||
var key *PublicKey
|
||||
var hashed []byte
|
||||
hash := crypto.SHA1
|
||||
h := hash.New()
|
||||
opts := &PSSOptions{
|
||||
SaltLength: PSSSaltLengthEqualsHash,
|
||||
}
|
||||
|
||||
for marker := range values {
|
||||
switch marker {
|
||||
case newKeyMarker:
|
||||
key = new(PublicKey)
|
||||
nHex, ok := <-values
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
key.N = bigFromHex(nHex)
|
||||
key.E = intFromHex(<-values)
|
||||
// We don't care for d, p, q, dP, dQ or qInv.
|
||||
for i := 0; i < 6; i++ {
|
||||
<-values
|
||||
}
|
||||
case newSignatureMarker:
|
||||
msg := fromHex(<-values)
|
||||
<-values // skip salt
|
||||
sig := fromHex(<-values)
|
||||
|
||||
h.Reset()
|
||||
h.Write(msg)
|
||||
hashed = h.Sum(hashed[:0])
|
||||
|
||||
if err := VerifyPSS(key, hash, hashed, sig, opts); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
default:
|
||||
t.Fatalf("unknown marker: " + marker)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestPSSOpenSSL ensures that we can verify a PSS signature from OpenSSL with
|
||||
// the default options. OpenSSL sets the salt length to be maximal.
|
||||
func TestPSSOpenSSL(t *testing.T) {
|
||||
hash := crypto.SHA256
|
||||
h := hash.New()
|
||||
h.Write([]byte("testing"))
|
||||
hashed := h.Sum(nil)
|
||||
|
||||
// Generated with `echo -n testing | openssl dgst -sign key.pem -sigopt rsa_padding_mode:pss -sha256 > sig`
|
||||
sig := []byte{
|
||||
0x95, 0x59, 0x6f, 0xd3, 0x10, 0xa2, 0xe7, 0xa2, 0x92, 0x9d,
|
||||
0x4a, 0x07, 0x2e, 0x2b, 0x27, 0xcc, 0x06, 0xc2, 0x87, 0x2c,
|
||||
0x52, 0xf0, 0x4a, 0xcc, 0x05, 0x94, 0xf2, 0xc3, 0x2e, 0x20,
|
||||
0xd7, 0x3e, 0x66, 0x62, 0xb5, 0x95, 0x2b, 0xa3, 0x93, 0x9a,
|
||||
0x66, 0x64, 0x25, 0xe0, 0x74, 0x66, 0x8c, 0x3e, 0x92, 0xeb,
|
||||
0xc6, 0xe6, 0xc0, 0x44, 0xf3, 0xb4, 0xb4, 0x2e, 0x8c, 0x66,
|
||||
0x0a, 0x37, 0x9c, 0x69,
|
||||
}
|
||||
|
||||
if err := VerifyPSS(&rsaPrivateKey.PublicKey, hash, hashed, sig, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPSSSigning(t *testing.T) {
|
||||
var saltLengthCombinations = []struct {
|
||||
signSaltLength, verifySaltLength int
|
||||
good bool
|
||||
}{
|
||||
{PSSSaltLengthAuto, PSSSaltLengthAuto, true},
|
||||
{PSSSaltLengthEqualsHash, PSSSaltLengthAuto, true},
|
||||
{PSSSaltLengthEqualsHash, PSSSaltLengthEqualsHash, true},
|
||||
{PSSSaltLengthEqualsHash, 8, false},
|
||||
{PSSSaltLengthAuto, PSSSaltLengthEqualsHash, false},
|
||||
{8, 8, true},
|
||||
}
|
||||
|
||||
hash := crypto.MD5
|
||||
h := hash.New()
|
||||
h.Write([]byte("testing"))
|
||||
hashed := h.Sum(nil)
|
||||
var opts PSSOptions
|
||||
|
||||
for i, test := range saltLengthCombinations {
|
||||
opts.SaltLength = test.signSaltLength
|
||||
sig, err := SignPSS(rand.Reader, rsaPrivateKey, hash, hashed, &opts)
|
||||
if err != nil {
|
||||
t.Errorf("#%d: error while signing: %s", i, err)
|
||||
continue
|
||||
}
|
||||
|
||||
opts.SaltLength = test.verifySaltLength
|
||||
err = VerifyPSS(&rsaPrivateKey.PublicKey, hash, hashed, sig, &opts)
|
||||
if (err == nil) != test.good {
|
||||
t.Errorf("#%d: bad result, wanted: %t, got: %s", i, test.good, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func bigFromHex(hex string) *big.Int {
|
||||
n, ok := new(big.Int).SetString(hex, 16)
|
||||
if !ok {
|
||||
panic("bad hex: " + hex)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func intFromHex(hex string) int {
|
||||
i, err := strconv.ParseInt(hex, 16, 32)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return int(i)
|
||||
}
|
||||
|
||||
func fromHex(hexStr string) []byte {
|
||||
s, err := hex.DecodeString(hexStr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return s
|
||||
}
|
|
@ -5,8 +5,6 @@
|
|||
// Package rsa implements RSA encryption as specified in PKCS#1.
|
||||
package rsa
|
||||
|
||||
// TODO(agl): Add support for PSS padding.
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/subtle"
|
||||
|
|
|
@ -120,8 +120,10 @@ func testKeyBasics(t *testing.T, priv *PrivateKey) {
|
|||
}
|
||||
|
||||
func fromBase10(base10 string) *big.Int {
|
||||
i := new(big.Int)
|
||||
i.SetString(base10, 10)
|
||||
i, ok := new(big.Int).SetString(base10, 10)
|
||||
if !ok {
|
||||
panic("bad number: " + base10)
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
|
|
BIN
libgo/go/crypto/rsa/testdata/pss-vect.txt.bz2
vendored
Normal file
BIN
libgo/go/crypto/rsa/testdata/pss-vect.txt.bz2
vendored
Normal file
Binary file not shown.
|
@ -90,9 +90,13 @@ func (d *digest) Write(p []byte) (nn int, err error) {
|
|||
func (d0 *digest) Sum(in []byte) []byte {
|
||||
// Make a copy of d0 so that caller can keep writing and summing.
|
||||
d := *d0
|
||||
hash := d.checkSum()
|
||||
return append(in, hash[:]...)
|
||||
}
|
||||
|
||||
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
|
||||
func (d *digest) checkSum() [Size]byte {
|
||||
len := d.len
|
||||
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
|
||||
var tmp [64]byte
|
||||
tmp[0] = 0x80
|
||||
if len%64 < 56 {
|
||||
|
@ -120,5 +124,13 @@ func (d0 *digest) Sum(in []byte) []byte {
|
|||
digest[i*4+3] = byte(s)
|
||||
}
|
||||
|
||||
return append(in, digest[:]...)
|
||||
return digest
|
||||
}
|
||||
|
||||
// Sum returns the SHA1 checksum of the data.
|
||||
func Sum(data []byte) [Size]byte {
|
||||
var d digest
|
||||
d.Reset()
|
||||
d.Write(data)
|
||||
return d.checkSum()
|
||||
}
|
||||
|
|
|
@ -54,6 +54,10 @@ var golden = []sha1Test{
|
|||
func TestGolden(t *testing.T) {
|
||||
for i := 0; i < len(golden); i++ {
|
||||
g := golden[i]
|
||||
s := fmt.Sprintf("%x", Sum([]byte(g.in)))
|
||||
if s != g.out {
|
||||
t.Fatalf("Sum function: sha1(%s) = %s want %s", g.in, s, g.out)
|
||||
}
|
||||
c := New()
|
||||
for j := 0; j < 3; j++ {
|
||||
if j < 2 {
|
||||
|
@ -72,13 +76,6 @@ func TestGolden(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func ExampleNew() {
|
||||
h := New()
|
||||
io.WriteString(h, "His money is twice tainted: 'taint yours and 'taint mine.")
|
||||
fmt.Printf("% x", h.Sum(nil))
|
||||
// Output: 59 7f 6a 54 00 10 f9 4c 15 d7 18 06 a9 9a 2c 87 10 e7 47 bd
|
||||
}
|
||||
|
||||
var bench = New()
|
||||
var buf = make([]byte, 8192)
|
||||
|
||||
|
|
|
@ -6,4 +6,6 @@
|
|||
|
||||
package sha1
|
||||
|
||||
//go:noescape
|
||||
|
||||
func block(dig *digest, p []byte)
|
||||
|
|
|
@ -134,9 +134,16 @@ func (d *digest) Write(p []byte) (nn int, err error) {
|
|||
func (d0 *digest) Sum(in []byte) []byte {
|
||||
// Make a copy of d0 so that caller can keep writing and summing.
|
||||
d := *d0
|
||||
hash := d.checkSum()
|
||||
if d.is224 {
|
||||
return append(in, hash[:Size224]...)
|
||||
}
|
||||
return append(in, hash[:]...)
|
||||
}
|
||||
|
||||
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
|
||||
func (d *digest) checkSum() [Size]byte {
|
||||
len := d.len
|
||||
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
|
||||
var tmp [64]byte
|
||||
tmp[0] = 0x80
|
||||
if len%64 < 56 {
|
||||
|
@ -157,10 +164,8 @@ func (d0 *digest) Sum(in []byte) []byte {
|
|||
}
|
||||
|
||||
h := d.h[:]
|
||||
size := Size
|
||||
if d.is224 {
|
||||
h = d.h[:7]
|
||||
size = Size224
|
||||
}
|
||||
|
||||
var digest [Size]byte
|
||||
|
@ -171,5 +176,24 @@ func (d0 *digest) Sum(in []byte) []byte {
|
|||
digest[i*4+3] = byte(s)
|
||||
}
|
||||
|
||||
return append(in, digest[:size]...)
|
||||
return digest
|
||||
}
|
||||
|
||||
// Sum256 returns the SHA256 checksum of the data.
|
||||
func Sum256(data []byte) [Size]byte {
|
||||
var d digest
|
||||
d.Reset()
|
||||
d.Write(data)
|
||||
return d.checkSum()
|
||||
}
|
||||
|
||||
// Sum224 returns the SHA224 checksum of the data.
|
||||
func Sum224(data []byte) (sum224 [Size224]byte) {
|
||||
var d digest
|
||||
d.is224 = true
|
||||
d.Reset()
|
||||
d.Write(data)
|
||||
sum := d.checkSum()
|
||||
copy(sum224[:], sum[:Size224])
|
||||
return
|
||||
}
|
||||
|
|
|
@ -88,6 +88,10 @@ var golden224 = []sha256Test{
|
|||
func TestGolden(t *testing.T) {
|
||||
for i := 0; i < len(golden); i++ {
|
||||
g := golden[i]
|
||||
s := fmt.Sprintf("%x", Sum256([]byte(g.in)))
|
||||
if s != g.out {
|
||||
t.Fatalf("Sum256 function: sha256(%s) = %s want %s", g.in, s, g.out)
|
||||
}
|
||||
c := New()
|
||||
for j := 0; j < 3; j++ {
|
||||
if j < 2 {
|
||||
|
@ -106,6 +110,10 @@ func TestGolden(t *testing.T) {
|
|||
}
|
||||
for i := 0; i < len(golden224); i++ {
|
||||
g := golden224[i]
|
||||
s := fmt.Sprintf("%x", Sum224([]byte(g.in)))
|
||||
if s != g.out {
|
||||
t.Fatalf("Sum224 function: sha224(%s) = %s want %s", g.in, s, g.out)
|
||||
}
|
||||
c := New224()
|
||||
for j := 0; j < 3; j++ {
|
||||
if j < 2 {
|
||||
|
|
|
@ -135,7 +135,14 @@ func (d0 *digest) Sum(in []byte) []byte {
|
|||
// Make a copy of d0 so that caller can keep writing and summing.
|
||||
d := new(digest)
|
||||
*d = *d0
|
||||
hash := d.checkSum()
|
||||
if d.is384 {
|
||||
return append(in, hash[:Size384]...)
|
||||
}
|
||||
return append(in, hash[:]...)
|
||||
}
|
||||
|
||||
func (d *digest) checkSum() [Size]byte {
|
||||
// Padding. Add a 1 bit and 0 bits until 112 bytes mod 128.
|
||||
len := d.len
|
||||
var tmp [128]byte
|
||||
|
@ -158,10 +165,8 @@ func (d0 *digest) Sum(in []byte) []byte {
|
|||
}
|
||||
|
||||
h := d.h[:]
|
||||
size := Size
|
||||
if d.is384 {
|
||||
h = d.h[:6]
|
||||
size = Size384
|
||||
}
|
||||
|
||||
var digest [Size]byte
|
||||
|
@ -176,5 +181,24 @@ func (d0 *digest) Sum(in []byte) []byte {
|
|||
digest[i*8+7] = byte(s)
|
||||
}
|
||||
|
||||
return append(in, digest[:size]...)
|
||||
return digest
|
||||
}
|
||||
|
||||
// Sum512 returns the SHA512 checksum of the data.
|
||||
func Sum512(data []byte) [Size]byte {
|
||||
var d digest
|
||||
d.Reset()
|
||||
d.Write(data)
|
||||
return d.checkSum()
|
||||
}
|
||||
|
||||
// Sum384 returns the SHA384 checksum of the data.
|
||||
func Sum384(data []byte) (sum384 [Size384]byte) {
|
||||
var d digest
|
||||
d.is384 = true
|
||||
d.Reset()
|
||||
d.Write(data)
|
||||
sum := d.checkSum()
|
||||
copy(sum384[:], sum[:Size384])
|
||||
return
|
||||
}
|
||||
|
|
|
@ -88,6 +88,10 @@ var golden384 = []sha512Test{
|
|||
func TestGolden(t *testing.T) {
|
||||
for i := 0; i < len(golden); i++ {
|
||||
g := golden[i]
|
||||
s := fmt.Sprintf("%x", Sum512([]byte(g.in)))
|
||||
if s != g.out {
|
||||
t.Fatalf("Sum512 function: sha512(%s) = %s want %s", g.in, s, g.out)
|
||||
}
|
||||
c := New()
|
||||
for j := 0; j < 3; j++ {
|
||||
if j < 2 {
|
||||
|
@ -106,6 +110,10 @@ func TestGolden(t *testing.T) {
|
|||
}
|
||||
for i := 0; i < len(golden384); i++ {
|
||||
g := golden384[i]
|
||||
s := fmt.Sprintf("%x", Sum384([]byte(g.in)))
|
||||
if s != g.out {
|
||||
t.Fatalf("Sum384 function: sha384(%s) = %s want %s", g.in, s, g.out)
|
||||
}
|
||||
c := New384()
|
||||
for j := 0; j < 3; j++ {
|
||||
if j < 2 {
|
||||
|
|
|
@ -55,3 +55,11 @@ func ConstantTimeCopy(v int, x, y []byte) {
|
|||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ConstantTimeLessOrEq returns 1 if x <= y and 0 otherwise.
|
||||
// Its behavior is undefined if x or y are negative or > 2**31 - 1.
|
||||
func ConstantTimeLessOrEq(x, y int) int {
|
||||
x32 := int32(x)
|
||||
y32 := int32(y)
|
||||
return int(((x32 - y32 - 1) >> 31) & 1)
|
||||
}
|
||||
|
|
|
@ -103,3 +103,23 @@ func TestConstantTimeCopy(t *testing.T) {
|
|||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
var lessOrEqTests = []struct {
|
||||
x, y, result int
|
||||
}{
|
||||
{0, 0, 1},
|
||||
{1, 0, 0},
|
||||
{0, 1, 1},
|
||||
{10, 20, 1},
|
||||
{20, 10, 0},
|
||||
{10, 10, 1},
|
||||
}
|
||||
|
||||
func TestConstantTimeLessOrEq(t *testing.T) {
|
||||
for i, test := range lessOrEqTests {
|
||||
result := ConstantTimeLessOrEq(test.x, test.y)
|
||||
if result != test.result {
|
||||
t.Errorf("#%d: %d <= %d gave %d, expected %d", i, test.x, test.y, result, test.result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,6 +34,22 @@ type keyAgreement interface {
|
|||
generateClientKeyExchange(*Config, *clientHelloMsg, *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error)
|
||||
}
|
||||
|
||||
const (
|
||||
// suiteECDH indicates that the cipher suite involves elliptic curve
|
||||
// Diffie-Hellman. This means that it should only be selected when the
|
||||
// client indicates that it supports ECC with a curve and point format
|
||||
// that we're happy with.
|
||||
suiteECDHE = 1 << iota
|
||||
// suiteECDSA indicates that the cipher suite involves an ECDSA
|
||||
// signature and therefore may only be selected when the server's
|
||||
// certificate is ECDSA. If this is not set then the cipher suite is
|
||||
// RSA based.
|
||||
suiteECDSA
|
||||
// suiteTLS12 indicates that the cipher suite should only be advertised
|
||||
// and accepted when using TLS 1.2.
|
||||
suiteTLS12
|
||||
)
|
||||
|
||||
// A cipherSuite is a specific combination of key agreement, cipher and MAC
|
||||
// function. All cipher suites currently assume RSA key agreement.
|
||||
type cipherSuite struct {
|
||||
|
@ -42,24 +58,30 @@ type cipherSuite struct {
|
|||
keyLen int
|
||||
macLen int
|
||||
ivLen int
|
||||
ka func() keyAgreement
|
||||
// If elliptic is set, a server will only consider this ciphersuite if
|
||||
// the ClientHello indicated that the client supports an elliptic curve
|
||||
// and point format that we can handle.
|
||||
elliptic bool
|
||||
cipher func(key, iv []byte, isRead bool) interface{}
|
||||
mac func(version uint16, macKey []byte) macFunction
|
||||
ka func(version uint16) keyAgreement
|
||||
// flags is a bitmask of the suite* values, above.
|
||||
flags int
|
||||
cipher func(key, iv []byte, isRead bool) interface{}
|
||||
mac func(version uint16, macKey []byte) macFunction
|
||||
aead func(key, fixedNonce []byte) cipher.AEAD
|
||||
}
|
||||
|
||||
var cipherSuites = []*cipherSuite{
|
||||
{TLS_RSA_WITH_RC4_128_SHA, 16, 20, 0, rsaKA, false, cipherRC4, macSHA1},
|
||||
{TLS_RSA_WITH_3DES_EDE_CBC_SHA, 24, 20, 8, rsaKA, false, cipher3DES, macSHA1},
|
||||
{TLS_RSA_WITH_AES_128_CBC_SHA, 16, 20, 16, rsaKA, false, cipherAES, macSHA1},
|
||||
{TLS_RSA_WITH_AES_256_CBC_SHA, 32, 20, 16, rsaKA, false, cipherAES, macSHA1},
|
||||
{TLS_ECDHE_RSA_WITH_RC4_128_SHA, 16, 20, 0, ecdheRSAKA, true, cipherRC4, macSHA1},
|
||||
{TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, 24, 20, 8, ecdheRSAKA, true, cipher3DES, macSHA1},
|
||||
{TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, 16, 20, 16, ecdheRSAKA, true, cipherAES, macSHA1},
|
||||
{TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, 32, 20, 16, ecdheRSAKA, true, cipherAES, macSHA1},
|
||||
// Ciphersuite order is chosen so that ECDHE comes before plain RSA
|
||||
// and RC4 comes before AES (because of the Lucky13 attack).
|
||||
{TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, ecdheRSAKA, suiteECDHE | suiteTLS12, nil, nil, aeadAESGCM},
|
||||
{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, ecdheECDSAKA, suiteECDHE | suiteECDSA | suiteTLS12, nil, nil, aeadAESGCM},
|
||||
{TLS_ECDHE_RSA_WITH_RC4_128_SHA, 16, 20, 0, ecdheRSAKA, suiteECDHE, cipherRC4, macSHA1, nil},
|
||||
{TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, 16, 20, 0, ecdheECDSAKA, suiteECDHE | suiteECDSA, cipherRC4, macSHA1, nil},
|
||||
{TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, 16, 20, 16, ecdheRSAKA, suiteECDHE, cipherAES, macSHA1, nil},
|
||||
{TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, 16, 20, 16, ecdheECDSAKA, suiteECDHE | suiteECDSA, cipherAES, macSHA1, nil},
|
||||
{TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, 32, 20, 16, ecdheRSAKA, suiteECDHE, cipherAES, macSHA1, nil},
|
||||
{TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, 32, 20, 16, ecdheECDSAKA, suiteECDHE | suiteECDSA, cipherAES, macSHA1, nil},
|
||||
{TLS_RSA_WITH_RC4_128_SHA, 16, 20, 0, rsaKA, 0, cipherRC4, macSHA1, nil},
|
||||
{TLS_RSA_WITH_AES_128_CBC_SHA, 16, 20, 16, rsaKA, 0, cipherAES, macSHA1, nil},
|
||||
{TLS_RSA_WITH_AES_256_CBC_SHA, 32, 20, 16, rsaKA, 0, cipherAES, macSHA1, nil},
|
||||
{TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, 24, 20, 8, ecdheRSAKA, suiteECDHE, cipher3DES, macSHA1, nil},
|
||||
{TLS_RSA_WITH_3DES_EDE_CBC_SHA, 24, 20, 8, rsaKA, 0, cipher3DES, macSHA1, nil},
|
||||
}
|
||||
|
||||
func cipherRC4(key, iv []byte, isRead bool) interface{} {
|
||||
|
@ -85,7 +107,7 @@ func cipherAES(key, iv []byte, isRead bool) interface{} {
|
|||
|
||||
// macSHA1 returns a macFunction for the given protocol version.
|
||||
func macSHA1(version uint16, key []byte) macFunction {
|
||||
if version == versionSSL30 {
|
||||
if version == VersionSSL30 {
|
||||
mac := ssl30MAC{
|
||||
h: sha1.New(),
|
||||
key: make([]byte, len(key)),
|
||||
|
@ -98,7 +120,47 @@ func macSHA1(version uint16, key []byte) macFunction {
|
|||
|
||||
type macFunction interface {
|
||||
Size() int
|
||||
MAC(digestBuf, seq, data []byte) []byte
|
||||
MAC(digestBuf, seq, header, data []byte) []byte
|
||||
}
|
||||
|
||||
// fixedNonceAEAD wraps an AEAD and prefixes a fixed portion of the nonce to
|
||||
// each call.
|
||||
type fixedNonceAEAD struct {
|
||||
// sealNonce and openNonce are buffers where the larger nonce will be
|
||||
// constructed. Since a seal and open operation may be running
|
||||
// concurrently, there is a separate buffer for each.
|
||||
sealNonce, openNonce []byte
|
||||
aead cipher.AEAD
|
||||
}
|
||||
|
||||
func (f *fixedNonceAEAD) NonceSize() int { return 8 }
|
||||
func (f *fixedNonceAEAD) Overhead() int { return f.aead.Overhead() }
|
||||
|
||||
func (f *fixedNonceAEAD) Seal(out, nonce, plaintext, additionalData []byte) []byte {
|
||||
copy(f.sealNonce[len(f.sealNonce)-8:], nonce)
|
||||
return f.aead.Seal(out, f.sealNonce, plaintext, additionalData)
|
||||
}
|
||||
|
||||
func (f *fixedNonceAEAD) Open(out, nonce, plaintext, additionalData []byte) ([]byte, error) {
|
||||
copy(f.openNonce[len(f.openNonce)-8:], nonce)
|
||||
return f.aead.Open(out, f.openNonce, plaintext, additionalData)
|
||||
}
|
||||
|
||||
func aeadAESGCM(key, fixedNonce []byte) cipher.AEAD {
|
||||
aes, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
aead, err := cipher.NewGCM(aes)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
nonce1, nonce2 := make([]byte, 12), make([]byte, 12)
|
||||
copy(nonce1, fixedNonce)
|
||||
copy(nonce2, fixedNonce)
|
||||
|
||||
return &fixedNonceAEAD{nonce1, nonce2, aead}
|
||||
}
|
||||
|
||||
// ssl30MAC implements the SSLv3 MAC function, as defined in
|
||||
|
@ -116,7 +178,7 @@ var ssl30Pad1 = [48]byte{0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0
|
|||
|
||||
var ssl30Pad2 = [48]byte{0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c}
|
||||
|
||||
func (s ssl30MAC) MAC(digestBuf, seq, record []byte) []byte {
|
||||
func (s ssl30MAC) MAC(digestBuf, seq, header, data []byte) []byte {
|
||||
padLength := 48
|
||||
if s.h.Size() == 20 {
|
||||
padLength = 40
|
||||
|
@ -126,9 +188,9 @@ func (s ssl30MAC) MAC(digestBuf, seq, record []byte) []byte {
|
|||
s.h.Write(s.key)
|
||||
s.h.Write(ssl30Pad1[:padLength])
|
||||
s.h.Write(seq)
|
||||
s.h.Write(record[:1])
|
||||
s.h.Write(record[3:5])
|
||||
s.h.Write(record[recordHeaderLen:])
|
||||
s.h.Write(header[:1])
|
||||
s.h.Write(header[3:5])
|
||||
s.h.Write(data)
|
||||
digestBuf = s.h.Sum(digestBuf[:0])
|
||||
|
||||
s.h.Reset()
|
||||
|
@ -147,19 +209,30 @@ func (s tls10MAC) Size() int {
|
|||
return s.h.Size()
|
||||
}
|
||||
|
||||
func (s tls10MAC) MAC(digestBuf, seq, record []byte) []byte {
|
||||
func (s tls10MAC) MAC(digestBuf, seq, header, data []byte) []byte {
|
||||
s.h.Reset()
|
||||
s.h.Write(seq)
|
||||
s.h.Write(record)
|
||||
s.h.Write(header)
|
||||
s.h.Write(data)
|
||||
return s.h.Sum(digestBuf[:0])
|
||||
}
|
||||
|
||||
func rsaKA() keyAgreement {
|
||||
func rsaKA(version uint16) keyAgreement {
|
||||
return rsaKeyAgreement{}
|
||||
}
|
||||
|
||||
func ecdheRSAKA() keyAgreement {
|
||||
return new(ecdheRSAKeyAgreement)
|
||||
func ecdheECDSAKA(version uint16) keyAgreement {
|
||||
return &ecdheKeyAgreement{
|
||||
sigType: signatureECDSA,
|
||||
version: version,
|
||||
}
|
||||
}
|
||||
|
||||
func ecdheRSAKA(version uint16) keyAgreement {
|
||||
return &ecdheKeyAgreement{
|
||||
sigType: signatureRSA,
|
||||
version: version,
|
||||
}
|
||||
}
|
||||
|
||||
// mutualCipherSuite returns a cipherSuite given a list of supported
|
||||
|
@ -181,12 +254,17 @@ func mutualCipherSuite(have []uint16, want uint16) *cipherSuite {
|
|||
// A list of the possible cipher suite ids. Taken from
|
||||
// http://www.iana.org/assignments/tls-parameters/tls-parameters.xml
|
||||
const (
|
||||
TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005
|
||||
TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000a
|
||||
TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002f
|
||||
TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035
|
||||
TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xc011
|
||||
TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xc012
|
||||
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xc013
|
||||
TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xc014
|
||||
TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005
|
||||
TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000a
|
||||
TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002f
|
||||
TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035
|
||||
TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xc007
|
||||
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xc009
|
||||
TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xc00a
|
||||
TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xc011
|
||||
TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xc012
|
||||
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xc013
|
||||
TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xc014
|
||||
TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xc02f
|
||||
TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xc02b
|
||||
)
|
||||
|
|
|
@ -9,22 +9,27 @@ import (
|
|||
"crypto/rand"
|
||||
"crypto/x509"
|
||||
"io"
|
||||
"math/big"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
VersionSSL30 = 0x0300
|
||||
VersionTLS10 = 0x0301
|
||||
VersionTLS11 = 0x0302
|
||||
VersionTLS12 = 0x0303
|
||||
)
|
||||
|
||||
const (
|
||||
maxPlaintext = 16384 // maximum plaintext payload length
|
||||
maxCiphertext = 16384 + 2048 // maximum ciphertext payload length
|
||||
recordHeaderLen = 5 // record header length
|
||||
maxHandshake = 65536 // maximum handshake we support (protocol max is 16 MB)
|
||||
|
||||
versionSSL30 = 0x0300
|
||||
versionTLS10 = 0x0301
|
||||
|
||||
minVersion = versionSSL30
|
||||
maxVersion = versionTLS10
|
||||
minVersion = VersionSSL30
|
||||
maxVersion = VersionTLS12
|
||||
)
|
||||
|
||||
// TLS record types.
|
||||
|
@ -60,12 +65,13 @@ const (
|
|||
|
||||
// TLS extension numbers
|
||||
var (
|
||||
extensionServerName uint16 = 0
|
||||
extensionStatusRequest uint16 = 5
|
||||
extensionSupportedCurves uint16 = 10
|
||||
extensionSupportedPoints uint16 = 11
|
||||
extensionSessionTicket uint16 = 35
|
||||
extensionNextProtoNeg uint16 = 13172 // not IANA assigned
|
||||
extensionServerName uint16 = 0
|
||||
extensionStatusRequest uint16 = 5
|
||||
extensionSupportedCurves uint16 = 10
|
||||
extensionSupportedPoints uint16 = 11
|
||||
extensionSignatureAlgorithms uint16 = 13
|
||||
extensionSessionTicket uint16 = 35
|
||||
extensionNextProtoNeg uint16 = 13172 // not IANA assigned
|
||||
)
|
||||
|
||||
// TLS Elliptic Curves
|
||||
|
@ -93,25 +99,60 @@ const (
|
|||
certTypeDSSSign = 2 // A certificate containing a DSA key
|
||||
certTypeRSAFixedDH = 3 // A certificate containing a static DH key
|
||||
certTypeDSSFixedDH = 4 // A certificate containing a static DH key
|
||||
|
||||
// See RFC4492 sections 3 and 5.5.
|
||||
certTypeECDSASign = 64 // A certificate containing an ECDSA-capable public key, signed with ECDSA.
|
||||
certTypeRSAFixedECDH = 65 // A certificate containing an ECDH-capable public key, signed with RSA.
|
||||
certTypeECDSAFixedECDH = 66 // A certificate containing an ECDH-capable public key, signed with ECDSA.
|
||||
|
||||
// Rest of these are reserved by the TLS spec
|
||||
)
|
||||
|
||||
// Hash functions for TLS 1.2 (See RFC 5246, section A.4.1)
|
||||
const (
|
||||
hashSHA1 uint8 = 2
|
||||
hashSHA256 uint8 = 4
|
||||
)
|
||||
|
||||
// Signature algorithms for TLS 1.2 (See RFC 5246, section A.4.1)
|
||||
const (
|
||||
signatureRSA uint8 = 1
|
||||
signatureECDSA uint8 = 3
|
||||
)
|
||||
|
||||
// signatureAndHash mirrors the TLS 1.2, SignatureAndHashAlgorithm struct. See
|
||||
// RFC 5246, section A.4.1.
|
||||
type signatureAndHash struct {
|
||||
hash, signature uint8
|
||||
}
|
||||
|
||||
// supportedSKXSignatureAlgorithms contains the signature and hash algorithms
|
||||
// that the code advertises as supported in a TLS 1.2 ClientHello.
|
||||
var supportedSKXSignatureAlgorithms = []signatureAndHash{
|
||||
{hashSHA256, signatureRSA},
|
||||
{hashSHA256, signatureECDSA},
|
||||
{hashSHA1, signatureRSA},
|
||||
{hashSHA1, signatureECDSA},
|
||||
}
|
||||
|
||||
// supportedClientCertSignatureAlgorithms contains the signature and hash
|
||||
// algorithms that the code advertises as supported in a TLS 1.2
|
||||
// CertificateRequest.
|
||||
var supportedClientCertSignatureAlgorithms = []signatureAndHash{
|
||||
{hashSHA256, signatureRSA},
|
||||
{hashSHA256, signatureECDSA},
|
||||
}
|
||||
|
||||
// ConnectionState records basic TLS details about the connection.
|
||||
type ConnectionState struct {
|
||||
HandshakeComplete bool
|
||||
DidResume bool
|
||||
CipherSuite uint16
|
||||
NegotiatedProtocol string
|
||||
NegotiatedProtocolIsMutual bool
|
||||
|
||||
// ServerName contains the server name indicated by the client, if any.
|
||||
// (Only valid for server connections.)
|
||||
ServerName string
|
||||
|
||||
// the certificate chain that was presented by the other side
|
||||
PeerCertificates []*x509.Certificate
|
||||
// the verified certificate chains built from PeerCertificates.
|
||||
VerifiedChains [][]*x509.Certificate
|
||||
HandshakeComplete bool // TLS handshake is complete
|
||||
DidResume bool // connection resumes a previous TLS connection
|
||||
CipherSuite uint16 // cipher suite in use (TLS_RSA_WITH_RC4_128_SHA, ...)
|
||||
NegotiatedProtocol string // negotiated next protocol (from Config.NextProtos)
|
||||
NegotiatedProtocolIsMutual bool // negotiated protocol was advertised by server
|
||||
ServerName string // server name requested by client, if any (server side only)
|
||||
PeerCertificates []*x509.Certificate // certificate chain presented by remote peer
|
||||
VerifiedChains [][]*x509.Certificate // verified chains built from PeerCertificates
|
||||
}
|
||||
|
||||
// ClientAuthType declares the policy the server will follow for
|
||||
|
@ -204,6 +245,15 @@ type Config struct {
|
|||
// connections using that key are compromised.
|
||||
SessionTicketKey [32]byte
|
||||
|
||||
// MinVersion contains the minimum SSL/TLS version that is acceptable.
|
||||
// If zero, then SSLv3 is taken as the minimum.
|
||||
MinVersion uint16
|
||||
|
||||
// MaxVersion contains the maximum SSL/TLS version that is acceptable.
|
||||
// If zero, then the maximum version supported by this package is used,
|
||||
// which is currently TLS 1.2.
|
||||
MaxVersion uint16
|
||||
|
||||
serverInitOnce sync.Once // guards calling (*Config).serverInit
|
||||
}
|
||||
|
||||
|
@ -248,6 +298,35 @@ func (c *Config) cipherSuites() []uint16 {
|
|||
return s
|
||||
}
|
||||
|
||||
func (c *Config) minVersion() uint16 {
|
||||
if c == nil || c.MinVersion == 0 {
|
||||
return minVersion
|
||||
}
|
||||
return c.MinVersion
|
||||
}
|
||||
|
||||
func (c *Config) maxVersion() uint16 {
|
||||
if c == nil || c.MaxVersion == 0 {
|
||||
return maxVersion
|
||||
}
|
||||
return c.MaxVersion
|
||||
}
|
||||
|
||||
// mutualVersion returns the protocol version to use given the advertised
|
||||
// version of the peer.
|
||||
func (c *Config) mutualVersion(vers uint16) (uint16, bool) {
|
||||
minVersion := c.minVersion()
|
||||
maxVersion := c.maxVersion()
|
||||
|
||||
if vers < minVersion {
|
||||
return 0, false
|
||||
}
|
||||
if vers > maxVersion {
|
||||
vers = maxVersion
|
||||
}
|
||||
return vers, true
|
||||
}
|
||||
|
||||
// getCertificateForName returns the best certificate for the given name,
|
||||
// defaulting to the first element of c.Certificates if there are no good
|
||||
// options.
|
||||
|
@ -304,7 +383,7 @@ func (c *Config) BuildNameToCertificate() {
|
|||
// A Certificate is a chain of one or more certificates, leaf first.
|
||||
type Certificate struct {
|
||||
Certificate [][]byte
|
||||
PrivateKey crypto.PrivateKey // supported types: *rsa.PrivateKey
|
||||
PrivateKey crypto.PrivateKey // supported types: *rsa.PrivateKey, *ecdsa.PrivateKey
|
||||
// OCSPStaple contains an optional OCSP response which will be served
|
||||
// to clients that request it.
|
||||
OCSPStaple []byte
|
||||
|
@ -327,18 +406,13 @@ type handshakeMessage interface {
|
|||
unmarshal([]byte) bool
|
||||
}
|
||||
|
||||
// mutualVersion returns the protocol version to use given the advertised
|
||||
// version of the peer.
|
||||
func mutualVersion(vers uint16) (uint16, bool) {
|
||||
if vers < minVersion {
|
||||
return 0, false
|
||||
}
|
||||
if vers > maxVersion {
|
||||
vers = maxVersion
|
||||
}
|
||||
return vers, true
|
||||
// TODO(jsing): Make these available to both crypto/x509 and crypto/tls.
|
||||
type dsaSignature struct {
|
||||
R, S *big.Int
|
||||
}
|
||||
|
||||
type ecdsaSignature dsaSignature
|
||||
|
||||
var emptyConfig Config
|
||||
|
||||
func defaultConfig() *Config {
|
||||
|
|
|
@ -146,6 +146,9 @@ func (hc *halfConn) changeCipherSpec() error {
|
|||
hc.mac = hc.nextMac
|
||||
hc.nextCipher = nil
|
||||
hc.nextMac = nil
|
||||
for i := range hc.seq {
|
||||
hc.seq[i] = 0
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -229,8 +232,16 @@ func roundUp(a, b int) int {
|
|||
return a + (b-a%b)%b
|
||||
}
|
||||
|
||||
// decrypt checks and strips the mac and decrypts the data in b.
|
||||
func (hc *halfConn) decrypt(b *block) (bool, alert) {
|
||||
// cbcMode is an interface for block ciphers using cipher block chaining.
|
||||
type cbcMode interface {
|
||||
cipher.BlockMode
|
||||
SetIV([]byte)
|
||||
}
|
||||
|
||||
// decrypt checks and strips the mac and decrypts the data in b. Returns a
|
||||
// success boolean, the number of bytes to skip from the start of the record in
|
||||
// order to get the application payload, and an optional alert value.
|
||||
func (hc *halfConn) decrypt(b *block) (ok bool, prefixLen int, alertValue alert) {
|
||||
// pull out payload
|
||||
payload := b.data[recordHeaderLen:]
|
||||
|
||||
|
@ -240,26 +251,54 @@ func (hc *halfConn) decrypt(b *block) (bool, alert) {
|
|||
}
|
||||
|
||||
paddingGood := byte(255)
|
||||
explicitIVLen := 0
|
||||
|
||||
// decrypt
|
||||
if hc.cipher != nil {
|
||||
switch c := hc.cipher.(type) {
|
||||
case cipher.Stream:
|
||||
c.XORKeyStream(payload, payload)
|
||||
case cipher.BlockMode:
|
||||
blockSize := c.BlockSize()
|
||||
case cipher.AEAD:
|
||||
explicitIVLen = 8
|
||||
if len(payload) < explicitIVLen {
|
||||
return false, 0, alertBadRecordMAC
|
||||
}
|
||||
nonce := payload[:8]
|
||||
payload = payload[8:]
|
||||
|
||||
if len(payload)%blockSize != 0 || len(payload) < roundUp(macSize+1, blockSize) {
|
||||
return false, alertBadRecordMAC
|
||||
var additionalData [13]byte
|
||||
copy(additionalData[:], hc.seq[:])
|
||||
copy(additionalData[8:], b.data[:3])
|
||||
n := len(payload) - c.Overhead()
|
||||
additionalData[11] = byte(n >> 8)
|
||||
additionalData[12] = byte(n)
|
||||
var err error
|
||||
payload, err = c.Open(payload[:0], nonce, payload, additionalData[:])
|
||||
if err != nil {
|
||||
return false, 0, alertBadRecordMAC
|
||||
}
|
||||
b.resize(recordHeaderLen + explicitIVLen + len(payload))
|
||||
case cbcMode:
|
||||
blockSize := c.BlockSize()
|
||||
if hc.version >= VersionTLS11 {
|
||||
explicitIVLen = blockSize
|
||||
}
|
||||
|
||||
if len(payload)%blockSize != 0 || len(payload) < roundUp(explicitIVLen+macSize+1, blockSize) {
|
||||
return false, 0, alertBadRecordMAC
|
||||
}
|
||||
|
||||
if explicitIVLen > 0 {
|
||||
c.SetIV(payload[:explicitIVLen])
|
||||
payload = payload[explicitIVLen:]
|
||||
}
|
||||
c.CryptBlocks(payload, payload)
|
||||
if hc.version == versionSSL30 {
|
||||
if hc.version == VersionSSL30 {
|
||||
payload, paddingGood = removePaddingSSL30(payload)
|
||||
} else {
|
||||
payload, paddingGood = removePadding(payload)
|
||||
}
|
||||
b.resize(recordHeaderLen + len(payload))
|
||||
b.resize(recordHeaderLen + explicitIVLen + len(payload))
|
||||
|
||||
// note that we still have a timing side-channel in the
|
||||
// MAC check, below. An attacker can align the record
|
||||
|
@ -279,25 +318,25 @@ func (hc *halfConn) decrypt(b *block) (bool, alert) {
|
|||
// check, strip mac
|
||||
if hc.mac != nil {
|
||||
if len(payload) < macSize {
|
||||
return false, alertBadRecordMAC
|
||||
return false, 0, alertBadRecordMAC
|
||||
}
|
||||
|
||||
// strip mac off payload, b.data
|
||||
n := len(payload) - macSize
|
||||
b.data[3] = byte(n >> 8)
|
||||
b.data[4] = byte(n)
|
||||
b.resize(recordHeaderLen + n)
|
||||
b.resize(recordHeaderLen + explicitIVLen + n)
|
||||
remoteMAC := payload[n:]
|
||||
localMAC := hc.mac.MAC(hc.inDigestBuf, hc.seq[0:], b.data)
|
||||
hc.incSeq()
|
||||
localMAC := hc.mac.MAC(hc.inDigestBuf, hc.seq[0:], b.data[:recordHeaderLen], payload[:n])
|
||||
|
||||
if subtle.ConstantTimeCompare(localMAC, remoteMAC) != 1 || paddingGood != 255 {
|
||||
return false, alertBadRecordMAC
|
||||
return false, 0, alertBadRecordMAC
|
||||
}
|
||||
hc.inDigestBuf = localMAC
|
||||
}
|
||||
hc.incSeq()
|
||||
|
||||
return true, 0
|
||||
return true, recordHeaderLen + explicitIVLen, 0
|
||||
}
|
||||
|
||||
// padToBlockSize calculates the needed padding block, if any, for a payload.
|
||||
|
@ -318,11 +357,10 @@ func padToBlockSize(payload []byte, blockSize int) (prefix, finalBlock []byte) {
|
|||
}
|
||||
|
||||
// encrypt encrypts and macs the data in b.
|
||||
func (hc *halfConn) encrypt(b *block) (bool, alert) {
|
||||
func (hc *halfConn) encrypt(b *block, explicitIVLen int) (bool, alert) {
|
||||
// mac
|
||||
if hc.mac != nil {
|
||||
mac := hc.mac.MAC(hc.outDigestBuf, hc.seq[0:], b.data)
|
||||
hc.incSeq()
|
||||
mac := hc.mac.MAC(hc.outDigestBuf, hc.seq[0:], b.data[:recordHeaderLen], b.data[recordHeaderLen+explicitIVLen:])
|
||||
|
||||
n := len(b.data)
|
||||
b.resize(n + len(mac))
|
||||
|
@ -337,11 +375,30 @@ func (hc *halfConn) encrypt(b *block) (bool, alert) {
|
|||
switch c := hc.cipher.(type) {
|
||||
case cipher.Stream:
|
||||
c.XORKeyStream(payload, payload)
|
||||
case cipher.BlockMode:
|
||||
prefix, finalBlock := padToBlockSize(payload, c.BlockSize())
|
||||
b.resize(recordHeaderLen + len(prefix) + len(finalBlock))
|
||||
c.CryptBlocks(b.data[recordHeaderLen:], prefix)
|
||||
c.CryptBlocks(b.data[recordHeaderLen+len(prefix):], finalBlock)
|
||||
case cipher.AEAD:
|
||||
payloadLen := len(b.data) - recordHeaderLen - explicitIVLen
|
||||
b.resize(len(b.data) + c.Overhead())
|
||||
nonce := b.data[recordHeaderLen : recordHeaderLen+explicitIVLen]
|
||||
payload := b.data[recordHeaderLen+explicitIVLen:]
|
||||
payload = payload[:payloadLen]
|
||||
|
||||
var additionalData [13]byte
|
||||
copy(additionalData[:], hc.seq[:])
|
||||
copy(additionalData[8:], b.data[:3])
|
||||
additionalData[11] = byte(payloadLen >> 8)
|
||||
additionalData[12] = byte(payloadLen)
|
||||
|
||||
c.Seal(payload[:0], nonce, payload, additionalData[:])
|
||||
case cbcMode:
|
||||
blockSize := c.BlockSize()
|
||||
if explicitIVLen > 0 {
|
||||
c.SetIV(payload[:explicitIVLen])
|
||||
payload = payload[explicitIVLen:]
|
||||
}
|
||||
prefix, finalBlock := padToBlockSize(payload, blockSize)
|
||||
b.resize(recordHeaderLen + explicitIVLen + len(prefix) + len(finalBlock))
|
||||
c.CryptBlocks(b.data[recordHeaderLen+explicitIVLen:], prefix)
|
||||
c.CryptBlocks(b.data[recordHeaderLen+explicitIVLen+len(prefix):], finalBlock)
|
||||
default:
|
||||
panic("unknown cipher type")
|
||||
}
|
||||
|
@ -351,6 +408,7 @@ func (hc *halfConn) encrypt(b *block) (bool, alert) {
|
|||
n := len(b.data) - recordHeaderLen
|
||||
b.data[3] = byte(n >> 8)
|
||||
b.data[4] = byte(n)
|
||||
hc.incSeq()
|
||||
|
||||
return true, 0
|
||||
}
|
||||
|
@ -534,10 +592,11 @@ Again:
|
|||
|
||||
// Process message.
|
||||
b, c.rawInput = c.in.splitBlock(b, recordHeaderLen+n)
|
||||
b.off = recordHeaderLen
|
||||
if ok, err := c.in.decrypt(b); !ok {
|
||||
ok, off, err := c.in.decrypt(b)
|
||||
if !ok {
|
||||
return c.sendAlert(err)
|
||||
}
|
||||
b.off = off
|
||||
data := b.data[b.off:]
|
||||
if len(data) > maxPlaintext {
|
||||
c.sendAlert(alertRecordOverflow)
|
||||
|
@ -637,18 +696,52 @@ func (c *Conn) writeRecord(typ recordType, data []byte) (n int, err error) {
|
|||
if m > maxPlaintext {
|
||||
m = maxPlaintext
|
||||
}
|
||||
b.resize(recordHeaderLen + m)
|
||||
explicitIVLen := 0
|
||||
explicitIVIsSeq := false
|
||||
|
||||
var cbc cbcMode
|
||||
if c.out.version >= VersionTLS11 {
|
||||
var ok bool
|
||||
if cbc, ok = c.out.cipher.(cbcMode); ok {
|
||||
explicitIVLen = cbc.BlockSize()
|
||||
}
|
||||
}
|
||||
if explicitIVLen == 0 {
|
||||
if _, ok := c.out.cipher.(cipher.AEAD); ok {
|
||||
explicitIVLen = 8
|
||||
// The AES-GCM construction in TLS has an
|
||||
// explicit nonce so that the nonce can be
|
||||
// random. However, the nonce is only 8 bytes
|
||||
// which is too small for a secure, random
|
||||
// nonce. Therefore we use the sequence number
|
||||
// as the nonce.
|
||||
explicitIVIsSeq = true
|
||||
}
|
||||
}
|
||||
b.resize(recordHeaderLen + explicitIVLen + m)
|
||||
b.data[0] = byte(typ)
|
||||
vers := c.vers
|
||||
if vers == 0 {
|
||||
vers = maxVersion
|
||||
// Some TLS servers fail if the record version is
|
||||
// greater than TLS 1.0 for the initial ClientHello.
|
||||
vers = VersionTLS10
|
||||
}
|
||||
b.data[1] = byte(vers >> 8)
|
||||
b.data[2] = byte(vers)
|
||||
b.data[3] = byte(m >> 8)
|
||||
b.data[4] = byte(m)
|
||||
copy(b.data[recordHeaderLen:], data)
|
||||
c.out.encrypt(b)
|
||||
if explicitIVLen > 0 {
|
||||
explicitIV := b.data[recordHeaderLen : recordHeaderLen+explicitIVLen]
|
||||
if explicitIVIsSeq {
|
||||
copy(explicitIV, c.out.seq[:])
|
||||
} else {
|
||||
if _, err = io.ReadFull(c.config.rand(), explicitIV); err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
copy(b.data[recordHeaderLen+explicitIVLen:], data)
|
||||
c.out.encrypt(b, explicitIVLen)
|
||||
_, err = c.conn.Write(b.data)
|
||||
if err != nil {
|
||||
break
|
||||
|
@ -709,7 +802,9 @@ func (c *Conn) readHandshake() (interface{}, error) {
|
|||
case typeCertificate:
|
||||
m = new(certificateMsg)
|
||||
case typeCertificateRequest:
|
||||
m = new(certificateRequestMsg)
|
||||
m = &certificateRequestMsg{
|
||||
hasSignatureAndHash: c.vers >= VersionTLS12,
|
||||
}
|
||||
case typeCertificateStatus:
|
||||
m = new(certificateStatusMsg)
|
||||
case typeServerKeyExchange:
|
||||
|
@ -719,7 +814,9 @@ func (c *Conn) readHandshake() (interface{}, error) {
|
|||
case typeClientKeyExchange:
|
||||
m = new(clientKeyExchangeMsg)
|
||||
case typeCertificateVerify:
|
||||
m = new(certificateVerifyMsg)
|
||||
m = &certificateVerifyMsg{
|
||||
hasSignatureAndHash: c.vers >= VersionTLS12,
|
||||
}
|
||||
case typeNextProtocol:
|
||||
m = new(nextProtoMsg)
|
||||
case typeFinished:
|
||||
|
@ -768,7 +865,7 @@ func (c *Conn) Write(b []byte) (int, error) {
|
|||
// http://www.imperialviolet.org/2012/01/15/beastfollowup.html
|
||||
|
||||
var m int
|
||||
if len(b) > 1 && c.vers <= versionTLS10 {
|
||||
if len(b) > 1 && c.vers <= VersionTLS10 {
|
||||
if _, ok := c.out.cipher.(cipher.BlockMode); ok {
|
||||
n, err := c.writeRecord(recordTypeApplicationData, b[:1])
|
||||
if err != nil {
|
||||
|
@ -792,21 +889,32 @@ func (c *Conn) Read(b []byte) (n int, err error) {
|
|||
c.in.Lock()
|
||||
defer c.in.Unlock()
|
||||
|
||||
for c.input == nil && c.error() == nil {
|
||||
if err := c.readRecord(recordTypeApplicationData); err != nil {
|
||||
// Soft error, like EAGAIN
|
||||
// Some OpenSSL servers send empty records in order to randomize the
|
||||
// CBC IV. So this loop ignores a limited number of empty records.
|
||||
const maxConsecutiveEmptyRecords = 100
|
||||
for emptyRecordCount := 0; emptyRecordCount <= maxConsecutiveEmptyRecords; emptyRecordCount++ {
|
||||
for c.input == nil && c.error() == nil {
|
||||
if err := c.readRecord(recordTypeApplicationData); err != nil {
|
||||
// Soft error, like EAGAIN
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if err := c.error(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
n, err = c.input.Read(b)
|
||||
if c.input.off >= len(c.input.data) {
|
||||
c.in.freeBlock(c.input)
|
||||
c.input = nil
|
||||
}
|
||||
|
||||
if n != 0 || err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
if err := c.error(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err = c.input.Read(b)
|
||||
if c.input.off >= len(c.input.data) {
|
||||
c.in.freeBlock(c.input)
|
||||
c.input = nil
|
||||
}
|
||||
return n, nil
|
||||
|
||||
return 0, io.ErrNoProgress
|
||||
}
|
||||
|
||||
// Close closes the connection.
|
||||
|
|
|
@ -30,7 +30,7 @@ var (
|
|||
validFrom = flag.String("start-date", "", "Creation date formatted as Jan 1 15:04:05 2011")
|
||||
validFor = flag.Duration("duration", 365*24*time.Hour, "Duration that certificate is valid for")
|
||||
isCA = flag.Bool("ca", false, "whether this cert should be its own Certificate Authority")
|
||||
rsaBits = flag.Int("rsa-bits", 1024, "Size of RSA key to generate")
|
||||
rsaBits = flag.Int("rsa-bits", 2048, "Size of RSA key to generate")
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
|
|
@ -6,25 +6,23 @@ package tls
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"crypto/subtle"
|
||||
"crypto/x509"
|
||||
"encoding/asn1"
|
||||
"errors"
|
||||
"io"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func (c *Conn) clientHandshake() error {
|
||||
finishedHash := newFinishedHash(versionTLS10)
|
||||
|
||||
if c.config == nil {
|
||||
c.config = defaultConfig()
|
||||
}
|
||||
|
||||
hello := &clientHelloMsg{
|
||||
vers: maxVersion,
|
||||
cipherSuites: c.config.cipherSuites(),
|
||||
vers: c.config.maxVersion(),
|
||||
compressionMethods: []uint8{compressionNone},
|
||||
random: make([]byte, 32),
|
||||
ocspStapling: true,
|
||||
|
@ -34,6 +32,25 @@ func (c *Conn) clientHandshake() error {
|
|||
nextProtoNeg: len(c.config.NextProtos) > 0,
|
||||
}
|
||||
|
||||
possibleCipherSuites := c.config.cipherSuites()
|
||||
hello.cipherSuites = make([]uint16, 0, len(possibleCipherSuites))
|
||||
|
||||
NextCipherSuite:
|
||||
for _, suiteId := range possibleCipherSuites {
|
||||
for _, suite := range cipherSuites {
|
||||
if suite.id != suiteId {
|
||||
continue
|
||||
}
|
||||
// Don't advertise TLS 1.2-only cipher suites unless
|
||||
// we're attempting TLS 1.2.
|
||||
if hello.vers < VersionTLS12 && suite.flags&suiteTLS12 != 0 {
|
||||
continue
|
||||
}
|
||||
hello.cipherSuites = append(hello.cipherSuites, suiteId)
|
||||
continue NextCipherSuite
|
||||
}
|
||||
}
|
||||
|
||||
t := uint32(c.config.time().Unix())
|
||||
hello.random[0] = byte(t >> 24)
|
||||
hello.random[1] = byte(t >> 16)
|
||||
|
@ -45,7 +62,10 @@ func (c *Conn) clientHandshake() error {
|
|||
return errors.New("short read from Rand")
|
||||
}
|
||||
|
||||
finishedHash.Write(hello.marshal())
|
||||
if hello.vers >= VersionTLS12 {
|
||||
hello.signatureAndHashes = supportedSKXSignatureAlgorithms
|
||||
}
|
||||
|
||||
c.writeRecord(recordTypeHandshake, hello.marshal())
|
||||
|
||||
msg, err := c.readHandshake()
|
||||
|
@ -56,16 +76,19 @@ func (c *Conn) clientHandshake() error {
|
|||
if !ok {
|
||||
return c.sendAlert(alertUnexpectedMessage)
|
||||
}
|
||||
finishedHash.Write(serverHello.marshal())
|
||||
|
||||
vers, ok := mutualVersion(serverHello.vers)
|
||||
if !ok || vers < versionTLS10 {
|
||||
vers, ok := c.config.mutualVersion(serverHello.vers)
|
||||
if !ok || vers < VersionTLS10 {
|
||||
// TLS 1.0 is the minimum version supported as a client.
|
||||
return c.sendAlert(alertProtocolVersion)
|
||||
}
|
||||
c.vers = vers
|
||||
c.haveVers = true
|
||||
|
||||
finishedHash := newFinishedHash(c.vers)
|
||||
finishedHash.Write(hello.marshal())
|
||||
finishedHash.Write(serverHello.marshal())
|
||||
|
||||
if serverHello.compressionMethod != compressionNone {
|
||||
return c.sendAlert(alertUnexpectedMessage)
|
||||
}
|
||||
|
@ -121,7 +144,10 @@ func (c *Conn) clientHandshake() error {
|
|||
}
|
||||
}
|
||||
|
||||
if _, ok := certs[0].PublicKey.(*rsa.PublicKey); !ok {
|
||||
switch certs[0].PublicKey.(type) {
|
||||
case *rsa.PublicKey, *ecdsa.PublicKey:
|
||||
break
|
||||
default:
|
||||
return c.sendAlert(alertUnsupportedCertificate)
|
||||
}
|
||||
|
||||
|
@ -148,7 +174,7 @@ func (c *Conn) clientHandshake() error {
|
|||
return err
|
||||
}
|
||||
|
||||
keyAgreement := suite.ka()
|
||||
keyAgreement := suite.ka(c.vers)
|
||||
|
||||
skx, ok := msg.(*serverKeyExchangeMsg)
|
||||
if ok {
|
||||
|
@ -165,7 +191,7 @@ func (c *Conn) clientHandshake() error {
|
|||
}
|
||||
}
|
||||
|
||||
var certToSend *Certificate
|
||||
var chainToSend *Certificate
|
||||
var certRequested bool
|
||||
certReq, ok := msg.(*certificateRequestMsg)
|
||||
if ok {
|
||||
|
@ -184,12 +210,13 @@ func (c *Conn) clientHandshake() error {
|
|||
|
||||
finishedHash.Write(certReq.marshal())
|
||||
|
||||
// For now, we only know how to sign challenges with RSA
|
||||
rsaAvail := false
|
||||
var rsaAvail, ecdsaAvail bool
|
||||
for _, certType := range certReq.certificateTypes {
|
||||
if certType == certTypeRSASign {
|
||||
switch certType {
|
||||
case certTypeRSASign:
|
||||
rsaAvail = true
|
||||
break
|
||||
case certTypeECDSASign:
|
||||
ecdsaAvail = true
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -197,35 +224,42 @@ func (c *Conn) clientHandshake() error {
|
|||
// where SignatureAlgorithm is RSA and the Issuer is in
|
||||
// certReq.certificateAuthorities
|
||||
findCert:
|
||||
for i, cert := range c.config.Certificates {
|
||||
if !rsaAvail {
|
||||
for i, chain := range c.config.Certificates {
|
||||
if !rsaAvail && !ecdsaAvail {
|
||||
continue
|
||||
}
|
||||
|
||||
leaf := cert.Leaf
|
||||
if leaf == nil {
|
||||
if leaf, err = x509.ParseCertificate(cert.Certificate[0]); err != nil {
|
||||
c.sendAlert(alertInternalError)
|
||||
return errors.New("tls: failed to parse client certificate #" + strconv.Itoa(i) + ": " + err.Error())
|
||||
for j, cert := range chain.Certificate {
|
||||
x509Cert := chain.Leaf
|
||||
// parse the certificate if this isn't the leaf
|
||||
// node, or if chain.Leaf was nil
|
||||
if j != 0 || x509Cert == nil {
|
||||
if x509Cert, err = x509.ParseCertificate(cert); err != nil {
|
||||
c.sendAlert(alertInternalError)
|
||||
return errors.New("tls: failed to parse client certificate #" + strconv.Itoa(i) + ": " + err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if leaf.PublicKeyAlgorithm != x509.RSA {
|
||||
continue
|
||||
}
|
||||
switch {
|
||||
case rsaAvail && x509Cert.PublicKeyAlgorithm == x509.RSA:
|
||||
case ecdsaAvail && x509Cert.PublicKeyAlgorithm == x509.ECDSA:
|
||||
default:
|
||||
continue findCert
|
||||
}
|
||||
|
||||
if len(certReq.certificateAuthorities) == 0 {
|
||||
// they gave us an empty list, so just take the
|
||||
// first RSA cert from c.config.Certificates
|
||||
certToSend = &cert
|
||||
break
|
||||
}
|
||||
|
||||
for _, ca := range certReq.certificateAuthorities {
|
||||
if bytes.Equal(leaf.RawIssuer, ca) {
|
||||
certToSend = &cert
|
||||
if len(certReq.certificateAuthorities) == 0 {
|
||||
// they gave us an empty list, so just take the
|
||||
// first RSA cert from c.config.Certificates
|
||||
chainToSend = &chain
|
||||
break findCert
|
||||
}
|
||||
|
||||
for _, ca := range certReq.certificateAuthorities {
|
||||
if bytes.Equal(x509Cert.RawIssuer, ca) {
|
||||
chainToSend = &chain
|
||||
break findCert
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -246,8 +280,8 @@ func (c *Conn) clientHandshake() error {
|
|||
// certificate to send.
|
||||
if certRequested {
|
||||
certMsg = new(certificateMsg)
|
||||
if certToSend != nil {
|
||||
certMsg.certificates = certToSend.Certificate
|
||||
if chainToSend != nil {
|
||||
certMsg.certificates = chainToSend.Certificate
|
||||
}
|
||||
finishedHash.Write(certMsg.marshal())
|
||||
c.writeRecord(recordTypeHandshake, certMsg.marshal())
|
||||
|
@ -263,12 +297,29 @@ func (c *Conn) clientHandshake() error {
|
|||
c.writeRecord(recordTypeHandshake, ckx.marshal())
|
||||
}
|
||||
|
||||
if certToSend != nil {
|
||||
certVerify := new(certificateVerifyMsg)
|
||||
digest := make([]byte, 0, 36)
|
||||
digest = finishedHash.serverMD5.Sum(digest)
|
||||
digest = finishedHash.serverSHA1.Sum(digest)
|
||||
signed, err := rsa.SignPKCS1v15(c.config.rand(), c.config.Certificates[0].PrivateKey.(*rsa.PrivateKey), crypto.MD5SHA1, digest)
|
||||
if chainToSend != nil {
|
||||
var signed []byte
|
||||
certVerify := &certificateVerifyMsg{
|
||||
hasSignatureAndHash: c.vers >= VersionTLS12,
|
||||
}
|
||||
|
||||
switch key := c.config.Certificates[0].PrivateKey.(type) {
|
||||
case *ecdsa.PrivateKey:
|
||||
digest, _, hashId := finishedHash.hashForClientCertificate(signatureECDSA)
|
||||
r, s, err := ecdsa.Sign(c.config.rand(), key, digest)
|
||||
if err == nil {
|
||||
signed, err = asn1.Marshal(ecdsaSignature{r, s})
|
||||
}
|
||||
certVerify.signatureAndHash.signature = signatureECDSA
|
||||
certVerify.signatureAndHash.hash = hashId
|
||||
case *rsa.PrivateKey:
|
||||
digest, hashFunc, hashId := finishedHash.hashForClientCertificate(signatureRSA)
|
||||
signed, err = rsa.SignPKCS1v15(c.config.rand(), key, hashFunc, digest)
|
||||
certVerify.signatureAndHash.signature = signatureRSA
|
||||
certVerify.signatureAndHash.hash = hashId
|
||||
default:
|
||||
err = errors.New("unknown private key type")
|
||||
}
|
||||
if err != nil {
|
||||
return c.sendAlert(alertInternalError)
|
||||
}
|
||||
|
@ -282,8 +333,14 @@ func (c *Conn) clientHandshake() error {
|
|||
clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV :=
|
||||
keysFromMasterSecret(c.vers, masterSecret, hello.random, serverHello.random, suite.macLen, suite.keyLen, suite.ivLen)
|
||||
|
||||
clientCipher := suite.cipher(clientKey, clientIV, false /* not for reading */)
|
||||
clientHash := suite.mac(c.vers, clientMAC)
|
||||
var clientCipher interface{}
|
||||
var clientHash macFunction
|
||||
if suite.cipher != nil {
|
||||
clientCipher = suite.cipher(clientKey, clientIV, false /* not for reading */)
|
||||
clientHash = suite.mac(c.vers, clientMAC)
|
||||
} else {
|
||||
clientCipher = suite.aead(clientKey, clientIV)
|
||||
}
|
||||
c.out.prepareCipherSpec(c.vers, clientCipher, clientHash)
|
||||
c.writeRecord(recordTypeChangeCipherSpec, []byte{1})
|
||||
|
||||
|
@ -303,8 +360,14 @@ func (c *Conn) clientHandshake() error {
|
|||
finishedHash.Write(finished.marshal())
|
||||
c.writeRecord(recordTypeHandshake, finished.marshal())
|
||||
|
||||
serverCipher := suite.cipher(serverKey, serverIV, true /* for reading */)
|
||||
serverHash := suite.mac(c.vers, serverMAC)
|
||||
var serverCipher interface{}
|
||||
var serverHash macFunction
|
||||
if suite.cipher != nil {
|
||||
serverCipher = suite.cipher(serverKey, serverIV, true /* for reading */)
|
||||
serverHash = suite.mac(c.vers, serverMAC)
|
||||
} else {
|
||||
serverCipher = suite.aead(serverKey, serverIV)
|
||||
}
|
||||
c.in.prepareCipherSpec(c.vers, serverCipher, serverHash)
|
||||
c.readRecord(recordTypeChangeCipherSpec)
|
||||
if err := c.error(); err != nil {
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -20,6 +20,7 @@ type clientHelloMsg struct {
|
|||
supportedPoints []uint8
|
||||
ticketSupported bool
|
||||
sessionTicket []uint8
|
||||
signatureAndHashes []signatureAndHash
|
||||
}
|
||||
|
||||
func (m *clientHelloMsg) equal(i interface{}) bool {
|
||||
|
@ -40,7 +41,8 @@ func (m *clientHelloMsg) equal(i interface{}) bool {
|
|||
eqUint16s(m.supportedCurves, m1.supportedCurves) &&
|
||||
bytes.Equal(m.supportedPoints, m1.supportedPoints) &&
|
||||
m.ticketSupported == m1.ticketSupported &&
|
||||
bytes.Equal(m.sessionTicket, m1.sessionTicket)
|
||||
bytes.Equal(m.sessionTicket, m1.sessionTicket) &&
|
||||
eqSignatureAndHashes(m.signatureAndHashes, m1.signatureAndHashes)
|
||||
}
|
||||
|
||||
func (m *clientHelloMsg) marshal() []byte {
|
||||
|
@ -74,6 +76,10 @@ func (m *clientHelloMsg) marshal() []byte {
|
|||
extensionsLength += len(m.sessionTicket)
|
||||
numExtensions++
|
||||
}
|
||||
if len(m.signatureAndHashes) > 0 {
|
||||
extensionsLength += 2 + 2*len(m.signatureAndHashes)
|
||||
numExtensions++
|
||||
}
|
||||
if numExtensions > 0 {
|
||||
extensionsLength += 4 * numExtensions
|
||||
length += 2 + extensionsLength
|
||||
|
@ -199,6 +205,25 @@ func (m *clientHelloMsg) marshal() []byte {
|
|||
copy(z, m.sessionTicket)
|
||||
z = z[len(m.sessionTicket):]
|
||||
}
|
||||
if len(m.signatureAndHashes) > 0 {
|
||||
// https://tools.ietf.org/html/rfc5246#section-7.4.1.4.1
|
||||
z[0] = byte(extensionSignatureAlgorithms >> 8)
|
||||
z[1] = byte(extensionSignatureAlgorithms)
|
||||
l := 2 + 2*len(m.signatureAndHashes)
|
||||
z[2] = byte(l >> 8)
|
||||
z[3] = byte(l)
|
||||
z = z[4:]
|
||||
|
||||
l -= 2
|
||||
z[0] = byte(l >> 8)
|
||||
z[1] = byte(l)
|
||||
z = z[2:]
|
||||
for _, sigAndHash := range m.signatureAndHashes {
|
||||
z[0] = sigAndHash.hash
|
||||
z[1] = sigAndHash.signature
|
||||
z = z[2:]
|
||||
}
|
||||
}
|
||||
|
||||
m.raw = x
|
||||
|
||||
|
@ -249,6 +274,7 @@ func (m *clientHelloMsg) unmarshal(data []byte) bool {
|
|||
m.ocspStapling = false
|
||||
m.ticketSupported = false
|
||||
m.sessionTicket = nil
|
||||
m.signatureAndHashes = nil
|
||||
|
||||
if len(data) == 0 {
|
||||
// ClientHello is optionally followed by extension data
|
||||
|
@ -336,6 +362,23 @@ func (m *clientHelloMsg) unmarshal(data []byte) bool {
|
|||
// http://tools.ietf.org/html/rfc5077#section-3.2
|
||||
m.ticketSupported = true
|
||||
m.sessionTicket = data[:length]
|
||||
case extensionSignatureAlgorithms:
|
||||
// https://tools.ietf.org/html/rfc5246#section-7.4.1.4.1
|
||||
if length < 2 || length&1 != 0 {
|
||||
return false
|
||||
}
|
||||
l := int(data[0])<<8 | int(data[1])
|
||||
if l != length-2 {
|
||||
return false
|
||||
}
|
||||
n := l / 2
|
||||
d := data[2:]
|
||||
m.signatureAndHashes = make([]signatureAndHash, n)
|
||||
for i := range m.signatureAndHashes {
|
||||
m.signatureAndHashes[i].hash = d[0]
|
||||
m.signatureAndHashes[i].signature = d[1]
|
||||
d = d[2:]
|
||||
}
|
||||
}
|
||||
data = data[length:]
|
||||
}
|
||||
|
@ -899,8 +942,14 @@ func (m *nextProtoMsg) unmarshal(data []byte) bool {
|
|||
}
|
||||
|
||||
type certificateRequestMsg struct {
|
||||
raw []byte
|
||||
raw []byte
|
||||
// hasSignatureAndHash indicates whether this message includes a list
|
||||
// of signature and hash functions. This change was introduced with TLS
|
||||
// 1.2.
|
||||
hasSignatureAndHash bool
|
||||
|
||||
certificateTypes []byte
|
||||
signatureAndHashes []signatureAndHash
|
||||
certificateAuthorities [][]byte
|
||||
}
|
||||
|
||||
|
@ -912,7 +961,8 @@ func (m *certificateRequestMsg) equal(i interface{}) bool {
|
|||
|
||||
return bytes.Equal(m.raw, m1.raw) &&
|
||||
bytes.Equal(m.certificateTypes, m1.certificateTypes) &&
|
||||
eqByteSlices(m.certificateAuthorities, m1.certificateAuthorities)
|
||||
eqByteSlices(m.certificateAuthorities, m1.certificateAuthorities) &&
|
||||
eqSignatureAndHashes(m.signatureAndHashes, m1.signatureAndHashes)
|
||||
}
|
||||
|
||||
func (m *certificateRequestMsg) marshal() (x []byte) {
|
||||
|
@ -928,6 +978,10 @@ func (m *certificateRequestMsg) marshal() (x []byte) {
|
|||
}
|
||||
length += casLength
|
||||
|
||||
if m.hasSignatureAndHash {
|
||||
length += 2 + 2*len(m.signatureAndHashes)
|
||||
}
|
||||
|
||||
x = make([]byte, 4+length)
|
||||
x[0] = typeCertificateRequest
|
||||
x[1] = uint8(length >> 16)
|
||||
|
@ -938,6 +992,19 @@ func (m *certificateRequestMsg) marshal() (x []byte) {
|
|||
|
||||
copy(x[5:], m.certificateTypes)
|
||||
y := x[5+len(m.certificateTypes):]
|
||||
|
||||
if m.hasSignatureAndHash {
|
||||
n := len(m.signatureAndHashes) * 2
|
||||
y[0] = uint8(n >> 8)
|
||||
y[1] = uint8(n)
|
||||
y = y[2:]
|
||||
for _, sigAndHash := range m.signatureAndHashes {
|
||||
y[0] = sigAndHash.hash
|
||||
y[1] = sigAndHash.signature
|
||||
y = y[2:]
|
||||
}
|
||||
}
|
||||
|
||||
y[0] = uint8(casLength >> 8)
|
||||
y[1] = uint8(casLength)
|
||||
y = y[2:]
|
||||
|
@ -978,6 +1045,27 @@ func (m *certificateRequestMsg) unmarshal(data []byte) bool {
|
|||
|
||||
data = data[numCertTypes:]
|
||||
|
||||
if m.hasSignatureAndHash {
|
||||
if len(data) < 2 {
|
||||
return false
|
||||
}
|
||||
sigAndHashLen := uint16(data[0])<<8 | uint16(data[1])
|
||||
data = data[2:]
|
||||
if sigAndHashLen&1 != 0 {
|
||||
return false
|
||||
}
|
||||
if len(data) < int(sigAndHashLen) {
|
||||
return false
|
||||
}
|
||||
numSigAndHash := sigAndHashLen / 2
|
||||
m.signatureAndHashes = make([]signatureAndHash, numSigAndHash)
|
||||
for i := range m.signatureAndHashes {
|
||||
m.signatureAndHashes[i].hash = data[0]
|
||||
m.signatureAndHashes[i].signature = data[1]
|
||||
data = data[2:]
|
||||
}
|
||||
}
|
||||
|
||||
if len(data) < 2 {
|
||||
return false
|
||||
}
|
||||
|
@ -1013,8 +1101,10 @@ func (m *certificateRequestMsg) unmarshal(data []byte) bool {
|
|||
}
|
||||
|
||||
type certificateVerifyMsg struct {
|
||||
raw []byte
|
||||
signature []byte
|
||||
raw []byte
|
||||
hasSignatureAndHash bool
|
||||
signatureAndHash signatureAndHash
|
||||
signature []byte
|
||||
}
|
||||
|
||||
func (m *certificateVerifyMsg) equal(i interface{}) bool {
|
||||
|
@ -1024,6 +1114,9 @@ func (m *certificateVerifyMsg) equal(i interface{}) bool {
|
|||
}
|
||||
|
||||
return bytes.Equal(m.raw, m1.raw) &&
|
||||
m.hasSignatureAndHash == m1.hasSignatureAndHash &&
|
||||
m.signatureAndHash.hash == m1.signatureAndHash.hash &&
|
||||
m.signatureAndHash.signature == m1.signatureAndHash.signature &&
|
||||
bytes.Equal(m.signature, m1.signature)
|
||||
}
|
||||
|
||||
|
@ -1035,14 +1128,23 @@ func (m *certificateVerifyMsg) marshal() (x []byte) {
|
|||
// See http://tools.ietf.org/html/rfc4346#section-7.4.8
|
||||
siglength := len(m.signature)
|
||||
length := 2 + siglength
|
||||
if m.hasSignatureAndHash {
|
||||
length += 2
|
||||
}
|
||||
x = make([]byte, 4+length)
|
||||
x[0] = typeCertificateVerify
|
||||
x[1] = uint8(length >> 16)
|
||||
x[2] = uint8(length >> 8)
|
||||
x[3] = uint8(length)
|
||||
x[4] = uint8(siglength >> 8)
|
||||
x[5] = uint8(siglength)
|
||||
copy(x[6:], m.signature)
|
||||
y := x[4:]
|
||||
if m.hasSignatureAndHash {
|
||||
y[0] = m.signatureAndHash.hash
|
||||
y[1] = m.signatureAndHash.signature
|
||||
y = y[2:]
|
||||
}
|
||||
y[0] = uint8(siglength >> 8)
|
||||
y[1] = uint8(siglength)
|
||||
copy(y[2:], m.signature)
|
||||
|
||||
m.raw = x
|
||||
|
||||
|
@ -1061,12 +1163,23 @@ func (m *certificateVerifyMsg) unmarshal(data []byte) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
siglength := int(data[4])<<8 + int(data[5])
|
||||
if len(data)-6 != siglength {
|
||||
data = data[4:]
|
||||
if m.hasSignatureAndHash {
|
||||
m.signatureAndHash.hash = data[0]
|
||||
m.signatureAndHash.signature = data[1]
|
||||
data = data[2:]
|
||||
}
|
||||
|
||||
if len(data) < 2 {
|
||||
return false
|
||||
}
|
||||
siglength := int(data[0])<<8 + int(data[1])
|
||||
data = data[2:]
|
||||
if len(data) != siglength {
|
||||
return false
|
||||
}
|
||||
|
||||
m.signature = data[6:]
|
||||
m.signature = data
|
||||
|
||||
return true
|
||||
}
|
||||
|
@ -1165,3 +1278,16 @@ func eqByteSlices(x, y [][]byte) bool {
|
|||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func eqSignatureAndHashes(x, y []signatureAndHash) bool {
|
||||
if len(x) != len(y) {
|
||||
return false
|
||||
}
|
||||
for i, v := range x {
|
||||
v2 := y[i]
|
||||
if v.hash != v2.hash || v.signature != v2.signature {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -135,6 +135,9 @@ func (*clientHelloMsg) Generate(rand *rand.Rand, size int) reflect.Value {
|
|||
m.sessionTicket = randomBytes(rand.Intn(300), rand)
|
||||
}
|
||||
}
|
||||
if rand.Intn(10) > 5 {
|
||||
m.signatureAndHashes = supportedSKXSignatureAlgorithms
|
||||
}
|
||||
|
||||
return reflect.ValueOf(m)
|
||||
}
|
||||
|
|
|
@ -6,9 +6,11 @@ package tls
|
|||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"crypto/subtle"
|
||||
"crypto/x509"
|
||||
"encoding/asn1"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
@ -21,10 +23,12 @@ type serverHandshakeState struct {
|
|||
hello *serverHelloMsg
|
||||
suite *cipherSuite
|
||||
ellipticOk bool
|
||||
ecdsaOk bool
|
||||
sessionState *sessionState
|
||||
finishedHash finishedHash
|
||||
masterSecret []byte
|
||||
certsFromClient [][]byte
|
||||
cert *Certificate
|
||||
}
|
||||
|
||||
// serverHandshake performs a TLS handshake as a server.
|
||||
|
@ -98,7 +102,7 @@ func (hs *serverHandshakeState) readClientHello() (isResume bool, err error) {
|
|||
if !ok {
|
||||
return false, c.sendAlert(alertUnexpectedMessage)
|
||||
}
|
||||
c.vers, ok = mutualVersion(hs.clientHello.vers)
|
||||
c.vers, ok = config.mutualVersion(hs.clientHello.vers)
|
||||
if !ok {
|
||||
return false, c.sendAlert(alertProtocolVersion)
|
||||
}
|
||||
|
@ -156,11 +160,25 @@ Curves:
|
|||
if len(hs.clientHello.serverName) > 0 {
|
||||
c.serverName = hs.clientHello.serverName
|
||||
}
|
||||
if hs.clientHello.nextProtoNeg {
|
||||
// Although sending an empty NPN extension is reasonable, Firefox has
|
||||
// had a bug around this. Best to send nothing at all if
|
||||
// config.NextProtos is empty. See
|
||||
// https://code.google.com/p/go/issues/detail?id=5445.
|
||||
if hs.clientHello.nextProtoNeg && len(config.NextProtos) > 0 {
|
||||
hs.hello.nextProtoNeg = true
|
||||
hs.hello.nextProtos = config.NextProtos
|
||||
}
|
||||
|
||||
if len(config.Certificates) == 0 {
|
||||
return false, c.sendAlert(alertInternalError)
|
||||
}
|
||||
hs.cert = &config.Certificates[0]
|
||||
if len(hs.clientHello.serverName) > 0 {
|
||||
hs.cert = config.getCertificateForName(hs.clientHello.serverName)
|
||||
}
|
||||
|
||||
_, hs.ecdsaOk = hs.cert.PrivateKey.(*ecdsa.PrivateKey)
|
||||
|
||||
if hs.checkForResumption() {
|
||||
return true, nil
|
||||
}
|
||||
|
@ -175,7 +193,7 @@ Curves:
|
|||
}
|
||||
|
||||
for _, id := range preferenceList {
|
||||
if hs.suite = c.tryCipherSuite(id, supportedList, hs.ellipticOk); hs.suite != nil {
|
||||
if hs.suite = c.tryCipherSuite(id, supportedList, c.vers, hs.ellipticOk, hs.ecdsaOk); hs.suite != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -199,7 +217,7 @@ func (hs *serverHandshakeState) checkForResumption() bool {
|
|||
if hs.sessionState.vers > hs.clientHello.vers {
|
||||
return false
|
||||
}
|
||||
if vers, ok := mutualVersion(hs.sessionState.vers); !ok || vers != hs.sessionState.vers {
|
||||
if vers, ok := c.config.mutualVersion(hs.sessionState.vers); !ok || vers != hs.sessionState.vers {
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -216,7 +234,7 @@ func (hs *serverHandshakeState) checkForResumption() bool {
|
|||
}
|
||||
|
||||
// Check that we also support the ciphersuite from the session.
|
||||
hs.suite = c.tryCipherSuite(hs.sessionState.cipherSuite, c.config.cipherSuites(), hs.ellipticOk)
|
||||
hs.suite = c.tryCipherSuite(hs.sessionState.cipherSuite, c.config.cipherSuites(), hs.sessionState.vers, hs.ellipticOk, hs.ecdsaOk)
|
||||
if hs.suite == nil {
|
||||
return false
|
||||
}
|
||||
|
@ -258,15 +276,7 @@ func (hs *serverHandshakeState) doFullHandshake() error {
|
|||
config := hs.c.config
|
||||
c := hs.c
|
||||
|
||||
if len(config.Certificates) == 0 {
|
||||
return c.sendAlert(alertInternalError)
|
||||
}
|
||||
cert := &config.Certificates[0]
|
||||
if len(hs.clientHello.serverName) > 0 {
|
||||
cert = config.getCertificateForName(hs.clientHello.serverName)
|
||||
}
|
||||
|
||||
if hs.clientHello.ocspStapling && len(cert.OCSPStaple) > 0 {
|
||||
if hs.clientHello.ocspStapling && len(hs.cert.OCSPStaple) > 0 {
|
||||
hs.hello.ocspStapling = true
|
||||
}
|
||||
|
||||
|
@ -276,20 +286,20 @@ func (hs *serverHandshakeState) doFullHandshake() error {
|
|||
c.writeRecord(recordTypeHandshake, hs.hello.marshal())
|
||||
|
||||
certMsg := new(certificateMsg)
|
||||
certMsg.certificates = cert.Certificate
|
||||
certMsg.certificates = hs.cert.Certificate
|
||||
hs.finishedHash.Write(certMsg.marshal())
|
||||
c.writeRecord(recordTypeHandshake, certMsg.marshal())
|
||||
|
||||
if hs.hello.ocspStapling {
|
||||
certStatus := new(certificateStatusMsg)
|
||||
certStatus.statusType = statusTypeOCSP
|
||||
certStatus.response = cert.OCSPStaple
|
||||
certStatus.response = hs.cert.OCSPStaple
|
||||
hs.finishedHash.Write(certStatus.marshal())
|
||||
c.writeRecord(recordTypeHandshake, certStatus.marshal())
|
||||
}
|
||||
|
||||
keyAgreement := hs.suite.ka()
|
||||
skx, err := keyAgreement.generateServerKeyExchange(config, cert, hs.clientHello, hs.hello)
|
||||
keyAgreement := hs.suite.ka(c.vers)
|
||||
skx, err := keyAgreement.generateServerKeyExchange(config, hs.cert, hs.clientHello, hs.hello)
|
||||
if err != nil {
|
||||
c.sendAlert(alertHandshakeFailure)
|
||||
return err
|
||||
|
@ -302,7 +312,14 @@ func (hs *serverHandshakeState) doFullHandshake() error {
|
|||
if config.ClientAuth >= RequestClientCert {
|
||||
// Request a client certificate
|
||||
certReq := new(certificateRequestMsg)
|
||||
certReq.certificateTypes = []byte{certTypeRSASign}
|
||||
certReq.certificateTypes = []byte{
|
||||
byte(certTypeRSASign),
|
||||
byte(certTypeECDSASign),
|
||||
}
|
||||
if c.vers >= VersionTLS12 {
|
||||
certReq.hasSignatureAndHash = true
|
||||
certReq.signatureAndHashes = supportedClientCertSignatureAlgorithms
|
||||
}
|
||||
|
||||
// An empty list of certificateAuthorities signals to
|
||||
// the client that it may send any certificate in response
|
||||
|
@ -320,7 +337,7 @@ func (hs *serverHandshakeState) doFullHandshake() error {
|
|||
hs.finishedHash.Write(helloDone.marshal())
|
||||
c.writeRecord(recordTypeHandshake, helloDone.marshal())
|
||||
|
||||
var pub *rsa.PublicKey // public key for client auth, if any
|
||||
var pub crypto.PublicKey // public key for client auth, if any
|
||||
|
||||
msg, err := c.readHandshake()
|
||||
if err != nil {
|
||||
|
@ -365,7 +382,7 @@ func (hs *serverHandshakeState) doFullHandshake() error {
|
|||
|
||||
// If we received a client cert in response to our certificate request message,
|
||||
// the client will send us a certificateVerifyMsg immediately after the
|
||||
// clientKeyExchangeMsg. This message is a MD5SHA1 digest of all preceding
|
||||
// clientKeyExchangeMsg. This message is a digest of all preceding
|
||||
// handshake-layer messages that is signed using the private key corresponding
|
||||
// to the client's certificate. This allows us to verify that the client is in
|
||||
// possession of the private key of the certificate.
|
||||
|
@ -379,10 +396,25 @@ func (hs *serverHandshakeState) doFullHandshake() error {
|
|||
return c.sendAlert(alertUnexpectedMessage)
|
||||
}
|
||||
|
||||
digest := make([]byte, 0, 36)
|
||||
digest = hs.finishedHash.serverMD5.Sum(digest)
|
||||
digest = hs.finishedHash.serverSHA1.Sum(digest)
|
||||
err = rsa.VerifyPKCS1v15(pub, crypto.MD5SHA1, digest, certVerify.signature)
|
||||
switch key := pub.(type) {
|
||||
case *ecdsa.PublicKey:
|
||||
ecdsaSig := new(ecdsaSignature)
|
||||
if _, err = asn1.Unmarshal(certVerify.signature, ecdsaSig); err != nil {
|
||||
break
|
||||
}
|
||||
if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 {
|
||||
err = errors.New("ECDSA signature contained zero or negative values")
|
||||
break
|
||||
}
|
||||
digest, _, _ := hs.finishedHash.hashForClientCertificate(signatureECDSA)
|
||||
if !ecdsa.Verify(key, digest, ecdsaSig.R, ecdsaSig.S) {
|
||||
err = errors.New("ECDSA verification failure")
|
||||
break
|
||||
}
|
||||
case *rsa.PublicKey:
|
||||
digest, hashFunc, _ := hs.finishedHash.hashForClientCertificate(signatureRSA)
|
||||
err = rsa.VerifyPKCS1v15(key, hashFunc, digest, certVerify.signature)
|
||||
}
|
||||
if err != nil {
|
||||
c.sendAlert(alertBadCertificate)
|
||||
return errors.New("could not validate signature of connection nonces: " + err.Error())
|
||||
|
@ -391,7 +423,7 @@ func (hs *serverHandshakeState) doFullHandshake() error {
|
|||
hs.finishedHash.Write(certVerify.marshal())
|
||||
}
|
||||
|
||||
preMasterSecret, err := keyAgreement.processClientKeyExchange(config, cert, ckx, c.vers)
|
||||
preMasterSecret, err := keyAgreement.processClientKeyExchange(config, hs.cert, ckx, c.vers)
|
||||
if err != nil {
|
||||
c.sendAlert(alertHandshakeFailure)
|
||||
return err
|
||||
|
@ -407,12 +439,20 @@ func (hs *serverHandshakeState) establishKeys() error {
|
|||
clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV :=
|
||||
keysFromMasterSecret(c.vers, hs.masterSecret, hs.clientHello.random, hs.hello.random, hs.suite.macLen, hs.suite.keyLen, hs.suite.ivLen)
|
||||
|
||||
clientCipher := hs.suite.cipher(clientKey, clientIV, true /* for reading */)
|
||||
clientHash := hs.suite.mac(c.vers, clientMAC)
|
||||
c.in.prepareCipherSpec(c.vers, clientCipher, clientHash)
|
||||
var clientCipher, serverCipher interface{}
|
||||
var clientHash, serverHash macFunction
|
||||
|
||||
serverCipher := hs.suite.cipher(serverKey, serverIV, false /* not for reading */)
|
||||
serverHash := hs.suite.mac(c.vers, serverMAC)
|
||||
if hs.suite.aead == nil {
|
||||
clientCipher = hs.suite.cipher(clientKey, clientIV, true /* for reading */)
|
||||
clientHash = hs.suite.mac(c.vers, clientMAC)
|
||||
serverCipher = hs.suite.cipher(serverKey, serverIV, false /* not for reading */)
|
||||
serverHash = hs.suite.mac(c.vers, serverMAC)
|
||||
} else {
|
||||
clientCipher = hs.suite.aead(clientKey, clientIV)
|
||||
serverCipher = hs.suite.aead(serverKey, serverIV)
|
||||
}
|
||||
|
||||
c.in.prepareCipherSpec(c.vers, clientCipher, clientHash)
|
||||
c.out.prepareCipherSpec(c.vers, serverCipher, serverHash)
|
||||
|
||||
return nil
|
||||
|
@ -502,7 +542,7 @@ func (hs *serverHandshakeState) sendFinished() error {
|
|||
// processCertsFromClient takes a chain of client certificates either from a
|
||||
// Certificates message or from a sessionState and verifies them. It returns
|
||||
// the public key of the leaf certificate.
|
||||
func (hs *serverHandshakeState) processCertsFromClient(certificates [][]byte) (*rsa.PublicKey, error) {
|
||||
func (hs *serverHandshakeState) processCertsFromClient(certificates [][]byte) (crypto.PublicKey, error) {
|
||||
c := hs.c
|
||||
|
||||
hs.certsFromClient = certificates
|
||||
|
@ -549,8 +589,11 @@ func (hs *serverHandshakeState) processCertsFromClient(certificates [][]byte) (*
|
|||
}
|
||||
|
||||
if len(certs) > 0 {
|
||||
pub, ok := certs[0].PublicKey.(*rsa.PublicKey)
|
||||
if !ok {
|
||||
var pub crypto.PublicKey
|
||||
switch key := certs[0].PublicKey.(type) {
|
||||
case *ecdsa.PublicKey, *rsa.PublicKey:
|
||||
pub = key
|
||||
default:
|
||||
return nil, c.sendAlert(alertUnsupportedCertificate)
|
||||
}
|
||||
c.peerCertificates = certs
|
||||
|
@ -562,7 +605,7 @@ func (hs *serverHandshakeState) processCertsFromClient(certificates [][]byte) (*
|
|||
|
||||
// tryCipherSuite returns a cipherSuite with the given id if that cipher suite
|
||||
// is acceptable to use.
|
||||
func (c *Conn) tryCipherSuite(id uint16, supportedCipherSuites []uint16, ellipticOk bool) *cipherSuite {
|
||||
func (c *Conn) tryCipherSuite(id uint16, supportedCipherSuites []uint16, version uint16, ellipticOk, ecdsaOk bool) *cipherSuite {
|
||||
for _, supported := range supportedCipherSuites {
|
||||
if id == supported {
|
||||
var candidate *cipherSuite
|
||||
|
@ -578,7 +621,13 @@ func (c *Conn) tryCipherSuite(id uint16, supportedCipherSuites []uint16, ellipti
|
|||
}
|
||||
// Don't select a ciphersuite which we can't
|
||||
// support for this client.
|
||||
if candidate.elliptic && !ellipticOk {
|
||||
if (candidate.flags&suiteECDHE != 0) && !ellipticOk {
|
||||
continue
|
||||
}
|
||||
if (candidate.flags&suiteECDSA != 0) != ecdsaOk {
|
||||
continue
|
||||
}
|
||||
if version < VersionTLS12 && candidate.flags&suiteTLS12 != 0 {
|
||||
continue
|
||||
}
|
||||
return candidate
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -6,11 +6,14 @@ package tls
|
|||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/md5"
|
||||
"crypto/rsa"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"encoding/asn1"
|
||||
"errors"
|
||||
"io"
|
||||
"math/big"
|
||||
|
@ -36,7 +39,7 @@ func (ka rsaKeyAgreement) processClientKeyExchange(config *Config, cert *Certifi
|
|||
}
|
||||
|
||||
ciphertext := ckx.ciphertext
|
||||
if version != versionSSL30 {
|
||||
if version != VersionSSL30 {
|
||||
ciphertextLen := int(ckx.ciphertext[0])<<8 | int(ckx.ciphertext[1])
|
||||
if ciphertextLen != len(ckx.ciphertext)-2 {
|
||||
return nil, errors.New("bad ClientKeyExchange")
|
||||
|
@ -82,34 +85,94 @@ func (ka rsaKeyAgreement) generateClientKeyExchange(config *Config, clientHello
|
|||
return preMasterSecret, ckx, nil
|
||||
}
|
||||
|
||||
// sha1Hash calculates a SHA1 hash over the given byte slices.
|
||||
func sha1Hash(slices [][]byte) []byte {
|
||||
hsha1 := sha1.New()
|
||||
for _, slice := range slices {
|
||||
hsha1.Write(slice)
|
||||
}
|
||||
return hsha1.Sum(nil)
|
||||
}
|
||||
|
||||
// md5SHA1Hash implements TLS 1.0's hybrid hash function which consists of the
|
||||
// concatenation of an MD5 and SHA1 hash.
|
||||
func md5SHA1Hash(slices ...[]byte) []byte {
|
||||
func md5SHA1Hash(slices [][]byte) []byte {
|
||||
md5sha1 := make([]byte, md5.Size+sha1.Size)
|
||||
hmd5 := md5.New()
|
||||
for _, slice := range slices {
|
||||
hmd5.Write(slice)
|
||||
}
|
||||
copy(md5sha1, hmd5.Sum(nil))
|
||||
|
||||
hsha1 := sha1.New()
|
||||
for _, slice := range slices {
|
||||
hsha1.Write(slice)
|
||||
}
|
||||
copy(md5sha1[md5.Size:], hsha1.Sum(nil))
|
||||
copy(md5sha1[md5.Size:], sha1Hash(slices))
|
||||
return md5sha1
|
||||
}
|
||||
|
||||
// sha256Hash implements TLS 1.2's hash function.
|
||||
func sha256Hash(slices [][]byte) []byte {
|
||||
h := sha256.New()
|
||||
for _, slice := range slices {
|
||||
h.Write(slice)
|
||||
}
|
||||
return h.Sum(nil)
|
||||
}
|
||||
|
||||
// hashForServerKeyExchange hashes the given slices and returns their digest
|
||||
// and the identifier of the hash function used. The hashFunc argument is only
|
||||
// used for >= TLS 1.2 and precisely identifies the hash function to use.
|
||||
func hashForServerKeyExchange(sigType, hashFunc uint8, version uint16, slices ...[]byte) ([]byte, crypto.Hash, error) {
|
||||
if version >= VersionTLS12 {
|
||||
switch hashFunc {
|
||||
case hashSHA256:
|
||||
return sha256Hash(slices), crypto.SHA256, nil
|
||||
case hashSHA1:
|
||||
return sha1Hash(slices), crypto.SHA1, nil
|
||||
default:
|
||||
return nil, crypto.Hash(0), errors.New("tls: unknown hash function used by peer")
|
||||
}
|
||||
}
|
||||
if sigType == signatureECDSA {
|
||||
return sha1Hash(slices), crypto.SHA1, nil
|
||||
}
|
||||
return md5SHA1Hash(slices), crypto.MD5SHA1, nil
|
||||
}
|
||||
|
||||
// pickTLS12HashForSignature returns a TLS 1.2 hash identifier for signing a
|
||||
// ServerKeyExchange given the signature type being used and the client's
|
||||
// advertized list of supported signature and hash combinations.
|
||||
func pickTLS12HashForSignature(sigType uint8, clientSignatureAndHashes []signatureAndHash) (uint8, error) {
|
||||
if len(clientSignatureAndHashes) == 0 {
|
||||
// If the client didn't specify any signature_algorithms
|
||||
// extension then we can assume that it supports SHA1. See
|
||||
// http://tools.ietf.org/html/rfc5246#section-7.4.1.4.1
|
||||
return hashSHA1, nil
|
||||
}
|
||||
|
||||
for _, sigAndHash := range clientSignatureAndHashes {
|
||||
if sigAndHash.signature != sigType {
|
||||
continue
|
||||
}
|
||||
switch sigAndHash.hash {
|
||||
case hashSHA1, hashSHA256:
|
||||
return sigAndHash.hash, nil
|
||||
}
|
||||
}
|
||||
|
||||
return 0, errors.New("tls: client doesn't support any common hash functions")
|
||||
}
|
||||
|
||||
// ecdheRSAKeyAgreement implements a TLS key agreement where the server
|
||||
// generates a ephemeral EC public/private key pair and signs it. The
|
||||
// pre-master secret is then calculated using ECDH.
|
||||
type ecdheRSAKeyAgreement struct {
|
||||
// pre-master secret is then calculated using ECDH. The signature may
|
||||
// either be ECDSA or RSA.
|
||||
type ecdheKeyAgreement struct {
|
||||
version uint16
|
||||
sigType uint8
|
||||
privateKey []byte
|
||||
curve elliptic.Curve
|
||||
x, y *big.Int
|
||||
}
|
||||
|
||||
func (ka *ecdheRSAKeyAgreement) generateServerKeyExchange(config *Config, cert *Certificate, clientHello *clientHelloMsg, hello *serverHelloMsg) (*serverKeyExchangeMsg, error) {
|
||||
func (ka *ecdheKeyAgreement) generateServerKeyExchange(config *Config, cert *Certificate, clientHello *clientHelloMsg, hello *serverHelloMsg) (*serverKeyExchangeMsg, error) {
|
||||
var curveid uint16
|
||||
|
||||
Curve:
|
||||
|
@ -150,16 +213,55 @@ Curve:
|
|||
serverECDHParams[3] = byte(len(ecdhePublic))
|
||||
copy(serverECDHParams[4:], ecdhePublic)
|
||||
|
||||
md5sha1 := md5SHA1Hash(clientHello.random, hello.random, serverECDHParams)
|
||||
sig, err := rsa.SignPKCS1v15(config.rand(), cert.PrivateKey.(*rsa.PrivateKey), crypto.MD5SHA1, md5sha1)
|
||||
var tls12HashId uint8
|
||||
if ka.version >= VersionTLS12 {
|
||||
if tls12HashId, err = pickTLS12HashForSignature(ka.sigType, clientHello.signatureAndHashes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
digest, hashFunc, err := hashForServerKeyExchange(ka.sigType, tls12HashId, ka.version, clientHello.random, hello.random, serverECDHParams)
|
||||
if err != nil {
|
||||
return nil, errors.New("failed to sign ECDHE parameters: " + err.Error())
|
||||
return nil, err
|
||||
}
|
||||
var sig []byte
|
||||
switch ka.sigType {
|
||||
case signatureECDSA:
|
||||
privKey, ok := cert.PrivateKey.(*ecdsa.PrivateKey)
|
||||
if !ok {
|
||||
return nil, errors.New("ECDHE ECDSA requires an ECDSA server private key")
|
||||
}
|
||||
r, s, err := ecdsa.Sign(config.rand(), privKey, digest)
|
||||
if err != nil {
|
||||
return nil, errors.New("failed to sign ECDHE parameters: " + err.Error())
|
||||
}
|
||||
sig, err = asn1.Marshal(ecdsaSignature{r, s})
|
||||
case signatureRSA:
|
||||
privKey, ok := cert.PrivateKey.(*rsa.PrivateKey)
|
||||
if !ok {
|
||||
return nil, errors.New("ECDHE RSA requires a RSA server private key")
|
||||
}
|
||||
sig, err = rsa.SignPKCS1v15(config.rand(), privKey, hashFunc, digest)
|
||||
if err != nil {
|
||||
return nil, errors.New("failed to sign ECDHE parameters: " + err.Error())
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("unknown ECDHE signature algorithm")
|
||||
}
|
||||
|
||||
skx := new(serverKeyExchangeMsg)
|
||||
skx.key = make([]byte, len(serverECDHParams)+2+len(sig))
|
||||
sigAndHashLen := 0
|
||||
if ka.version >= VersionTLS12 {
|
||||
sigAndHashLen = 2
|
||||
}
|
||||
skx.key = make([]byte, len(serverECDHParams)+sigAndHashLen+2+len(sig))
|
||||
copy(skx.key, serverECDHParams)
|
||||
k := skx.key[len(serverECDHParams):]
|
||||
if ka.version >= VersionTLS12 {
|
||||
k[0] = tls12HashId
|
||||
k[1] = ka.sigType
|
||||
k = k[2:]
|
||||
}
|
||||
k[0] = byte(len(sig) >> 8)
|
||||
k[1] = byte(len(sig))
|
||||
copy(k[2:], sig)
|
||||
|
@ -167,7 +269,7 @@ Curve:
|
|||
return skx, nil
|
||||
}
|
||||
|
||||
func (ka *ecdheRSAKeyAgreement) processClientKeyExchange(config *Config, cert *Certificate, ckx *clientKeyExchangeMsg, version uint16) ([]byte, error) {
|
||||
func (ka *ecdheKeyAgreement) processClientKeyExchange(config *Config, cert *Certificate, ckx *clientKeyExchangeMsg, version uint16) ([]byte, error) {
|
||||
if len(ckx.ciphertext) == 0 || int(ckx.ciphertext[0]) != len(ckx.ciphertext)-1 {
|
||||
return nil, errors.New("bad ClientKeyExchange")
|
||||
}
|
||||
|
@ -185,7 +287,7 @@ func (ka *ecdheRSAKeyAgreement) processClientKeyExchange(config *Config, cert *C
|
|||
|
||||
var errServerKeyExchange = errors.New("invalid ServerKeyExchange")
|
||||
|
||||
func (ka *ecdheRSAKeyAgreement) processServerKeyExchange(config *Config, clientHello *clientHelloMsg, serverHello *serverHelloMsg, cert *x509.Certificate, skx *serverKeyExchangeMsg) error {
|
||||
func (ka *ecdheKeyAgreement) processServerKeyExchange(config *Config, clientHello *clientHelloMsg, serverHello *serverHelloMsg, cert *x509.Certificate, skx *serverKeyExchangeMsg) error {
|
||||
if len(skx.key) < 4 {
|
||||
return errServerKeyExchange
|
||||
}
|
||||
|
@ -219,17 +321,62 @@ func (ka *ecdheRSAKeyAgreement) processServerKeyExchange(config *Config, clientH
|
|||
if len(sig) < 2 {
|
||||
return errServerKeyExchange
|
||||
}
|
||||
|
||||
var tls12HashId uint8
|
||||
if ka.version >= VersionTLS12 {
|
||||
// handle SignatureAndHashAlgorithm
|
||||
var sigAndHash []uint8
|
||||
sigAndHash, sig = sig[:2], sig[2:]
|
||||
if sigAndHash[1] != ka.sigType {
|
||||
return errServerKeyExchange
|
||||
}
|
||||
tls12HashId = sigAndHash[0]
|
||||
if len(sig) < 2 {
|
||||
return errServerKeyExchange
|
||||
}
|
||||
}
|
||||
sigLen := int(sig[0])<<8 | int(sig[1])
|
||||
if sigLen+2 != len(sig) {
|
||||
return errServerKeyExchange
|
||||
}
|
||||
sig = sig[2:]
|
||||
|
||||
md5sha1 := md5SHA1Hash(clientHello.random, serverHello.random, serverECDHParams)
|
||||
return rsa.VerifyPKCS1v15(cert.PublicKey.(*rsa.PublicKey), crypto.MD5SHA1, md5sha1, sig)
|
||||
digest, hashFunc, err := hashForServerKeyExchange(ka.sigType, tls12HashId, ka.version, clientHello.random, serverHello.random, serverECDHParams)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch ka.sigType {
|
||||
case signatureECDSA:
|
||||
pubKey, ok := cert.PublicKey.(*ecdsa.PublicKey)
|
||||
if !ok {
|
||||
return errors.New("ECDHE ECDSA requires a ECDSA server public key")
|
||||
}
|
||||
ecdsaSig := new(ecdsaSignature)
|
||||
if _, err := asn1.Unmarshal(sig, ecdsaSig); err != nil {
|
||||
return err
|
||||
}
|
||||
if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 {
|
||||
return errors.New("ECDSA signature contained zero or negative values")
|
||||
}
|
||||
if !ecdsa.Verify(pubKey, digest, ecdsaSig.R, ecdsaSig.S) {
|
||||
return errors.New("ECDSA verification failure")
|
||||
}
|
||||
case signatureRSA:
|
||||
pubKey, ok := cert.PublicKey.(*rsa.PublicKey)
|
||||
if !ok {
|
||||
return errors.New("ECDHE RSA requires a RSA server public key")
|
||||
}
|
||||
if err := rsa.VerifyPKCS1v15(pubKey, hashFunc, digest, sig); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return errors.New("unknown ECDHE signature algorithm")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ka *ecdheRSAKeyAgreement) generateClientKeyExchange(config *Config, clientHello *clientHelloMsg, cert *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error) {
|
||||
func (ka *ecdheKeyAgreement) generateClientKeyExchange(config *Config, clientHello *clientHelloMsg, cert *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error) {
|
||||
if ka.curve == nil {
|
||||
return nil, nil, errors.New("missing ServerKeyExchange message")
|
||||
}
|
||||
|
|
|
@ -5,9 +5,11 @@
|
|||
package tls
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/hmac"
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"hash"
|
||||
)
|
||||
|
||||
|
@ -43,8 +45,8 @@ func pHash(result, secret, seed []byte, hash func() hash.Hash) {
|
|||
}
|
||||
}
|
||||
|
||||
// pRF10 implements the TLS 1.0 pseudo-random function, as defined in RFC 2246, section 5.
|
||||
func pRF10(result, secret, label, seed []byte) {
|
||||
// prf10 implements the TLS 1.0 pseudo-random function, as defined in RFC 2246, section 5.
|
||||
func prf10(result, secret, label, seed []byte) {
|
||||
hashSHA1 := sha1.New
|
||||
hashMD5 := md5.New
|
||||
|
||||
|
@ -62,9 +64,18 @@ func pRF10(result, secret, label, seed []byte) {
|
|||
}
|
||||
}
|
||||
|
||||
// pRF30 implements the SSL 3.0 pseudo-random function, as defined in
|
||||
// prf12 implements the TLS 1.2 pseudo-random function, as defined in RFC 5246, section 5.
|
||||
func prf12(result, secret, label, seed []byte) {
|
||||
labelAndSeed := make([]byte, len(label)+len(seed))
|
||||
copy(labelAndSeed, label)
|
||||
copy(labelAndSeed[len(label):], seed)
|
||||
|
||||
pHash(result, secret, labelAndSeed, sha256.New)
|
||||
}
|
||||
|
||||
// prf30 implements the SSL 3.0 pseudo-random function, as defined in
|
||||
// www.mozilla.org/projects/security/pki/nss/ssl/draft302.txt section 6.
|
||||
func pRF30(result, secret, label, seed []byte) {
|
||||
func prf30(result, secret, label, seed []byte) {
|
||||
hashSHA1 := sha1.New()
|
||||
hashMD5 := md5.New()
|
||||
|
||||
|
@ -106,19 +117,27 @@ var keyExpansionLabel = []byte("key expansion")
|
|||
var clientFinishedLabel = []byte("client finished")
|
||||
var serverFinishedLabel = []byte("server finished")
|
||||
|
||||
func prfForVersion(version uint16) func(result, secret, label, seed []byte) {
|
||||
switch version {
|
||||
case VersionSSL30:
|
||||
return prf30
|
||||
case VersionTLS10, VersionTLS11:
|
||||
return prf10
|
||||
case VersionTLS12:
|
||||
return prf12
|
||||
default:
|
||||
panic("unknown version")
|
||||
}
|
||||
}
|
||||
|
||||
// masterFromPreMasterSecret generates the master secret from the pre-master
|
||||
// secret. See http://tools.ietf.org/html/rfc5246#section-8.1
|
||||
func masterFromPreMasterSecret(version uint16, preMasterSecret, clientRandom, serverRandom []byte) []byte {
|
||||
prf := pRF10
|
||||
if version == versionSSL30 {
|
||||
prf = pRF30
|
||||
}
|
||||
|
||||
var seed [tlsRandomLength * 2]byte
|
||||
copy(seed[0:len(clientRandom)], clientRandom)
|
||||
copy(seed[len(clientRandom):], serverRandom)
|
||||
masterSecret := make([]byte, masterSecretLength)
|
||||
prf(masterSecret, preMasterSecret, masterSecretLabel, seed[0:])
|
||||
prfForVersion(version)(masterSecret, preMasterSecret, masterSecretLabel, seed[0:])
|
||||
return masterSecret
|
||||
}
|
||||
|
||||
|
@ -126,18 +145,13 @@ func masterFromPreMasterSecret(version uint16, preMasterSecret, clientRandom, se
|
|||
// secret, given the lengths of the MAC key, cipher key and IV, as defined in
|
||||
// RFC 2246, section 6.3.
|
||||
func keysFromMasterSecret(version uint16, masterSecret, clientRandom, serverRandom []byte, macLen, keyLen, ivLen int) (clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV []byte) {
|
||||
prf := pRF10
|
||||
if version == versionSSL30 {
|
||||
prf = pRF30
|
||||
}
|
||||
|
||||
var seed [tlsRandomLength * 2]byte
|
||||
copy(seed[0:len(clientRandom)], serverRandom)
|
||||
copy(seed[len(serverRandom):], clientRandom)
|
||||
|
||||
n := 2*macLen + 2*keyLen + 2*ivLen
|
||||
keyMaterial := make([]byte, n)
|
||||
prf(keyMaterial, masterSecret, keyExpansionLabel, seed[0:])
|
||||
prfForVersion(version)(keyMaterial, masterSecret, keyExpansionLabel, seed[0:])
|
||||
clientMAC = keyMaterial[:macLen]
|
||||
keyMaterial = keyMaterial[macLen:]
|
||||
serverMAC = keyMaterial[:macLen]
|
||||
|
@ -153,37 +167,34 @@ func keysFromMasterSecret(version uint16, masterSecret, clientRandom, serverRand
|
|||
}
|
||||
|
||||
func newFinishedHash(version uint16) finishedHash {
|
||||
return finishedHash{md5.New(), sha1.New(), md5.New(), sha1.New(), version}
|
||||
if version >= VersionTLS12 {
|
||||
return finishedHash{sha256.New(), sha256.New(), nil, nil, version}
|
||||
}
|
||||
return finishedHash{sha1.New(), sha1.New(), md5.New(), md5.New(), version}
|
||||
}
|
||||
|
||||
// A finishedHash calculates the hash of a set of handshake messages suitable
|
||||
// for including in a Finished message.
|
||||
type finishedHash struct {
|
||||
clientMD5 hash.Hash
|
||||
clientSHA1 hash.Hash
|
||||
serverMD5 hash.Hash
|
||||
serverSHA1 hash.Hash
|
||||
version uint16
|
||||
client hash.Hash
|
||||
server hash.Hash
|
||||
|
||||
// Prior to TLS 1.2, an additional MD5 hash is required.
|
||||
clientMD5 hash.Hash
|
||||
serverMD5 hash.Hash
|
||||
|
||||
version uint16
|
||||
}
|
||||
|
||||
func (h finishedHash) Write(msg []byte) (n int, err error) {
|
||||
h.clientMD5.Write(msg)
|
||||
h.clientSHA1.Write(msg)
|
||||
h.serverMD5.Write(msg)
|
||||
h.serverSHA1.Write(msg)
|
||||
return len(msg), nil
|
||||
}
|
||||
h.client.Write(msg)
|
||||
h.server.Write(msg)
|
||||
|
||||
// finishedSum10 calculates the contents of the verify_data member of a TLSv1
|
||||
// Finished message given the MD5 and SHA1 hashes of a set of handshake
|
||||
// messages.
|
||||
func finishedSum10(md5, sha1, label, masterSecret []byte) []byte {
|
||||
seed := make([]byte, len(md5)+len(sha1))
|
||||
copy(seed, md5)
|
||||
copy(seed[len(md5):], sha1)
|
||||
out := make([]byte, finishedVerifyLength)
|
||||
pRF10(out, masterSecret, label, seed)
|
||||
return out
|
||||
if h.version < VersionTLS12 {
|
||||
h.clientMD5.Write(msg)
|
||||
h.serverMD5.Write(msg)
|
||||
}
|
||||
return len(msg), nil
|
||||
}
|
||||
|
||||
// finishedSum30 calculates the contents of the verify_data member of a SSLv3
|
||||
|
@ -224,23 +235,57 @@ var ssl3ServerFinishedMagic = [4]byte{0x53, 0x52, 0x56, 0x52}
|
|||
// clientSum returns the contents of the verify_data member of a client's
|
||||
// Finished message.
|
||||
func (h finishedHash) clientSum(masterSecret []byte) []byte {
|
||||
if h.version == versionSSL30 {
|
||||
return finishedSum30(h.clientMD5, h.clientSHA1, masterSecret, ssl3ClientFinishedMagic)
|
||||
if h.version == VersionSSL30 {
|
||||
return finishedSum30(h.clientMD5, h.client, masterSecret, ssl3ClientFinishedMagic)
|
||||
}
|
||||
|
||||
md5 := h.clientMD5.Sum(nil)
|
||||
sha1 := h.clientSHA1.Sum(nil)
|
||||
return finishedSum10(md5, sha1, clientFinishedLabel, masterSecret)
|
||||
out := make([]byte, finishedVerifyLength)
|
||||
if h.version >= VersionTLS12 {
|
||||
seed := h.client.Sum(nil)
|
||||
prf12(out, masterSecret, clientFinishedLabel, seed)
|
||||
} else {
|
||||
seed := make([]byte, 0, md5.Size+sha1.Size)
|
||||
seed = h.clientMD5.Sum(seed)
|
||||
seed = h.client.Sum(seed)
|
||||
prf10(out, masterSecret, clientFinishedLabel, seed)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// serverSum returns the contents of the verify_data member of a server's
|
||||
// Finished message.
|
||||
func (h finishedHash) serverSum(masterSecret []byte) []byte {
|
||||
if h.version == versionSSL30 {
|
||||
return finishedSum30(h.serverMD5, h.serverSHA1, masterSecret, ssl3ServerFinishedMagic)
|
||||
if h.version == VersionSSL30 {
|
||||
return finishedSum30(h.serverMD5, h.server, masterSecret, ssl3ServerFinishedMagic)
|
||||
}
|
||||
|
||||
md5 := h.serverMD5.Sum(nil)
|
||||
sha1 := h.serverSHA1.Sum(nil)
|
||||
return finishedSum10(md5, sha1, serverFinishedLabel, masterSecret)
|
||||
out := make([]byte, finishedVerifyLength)
|
||||
if h.version >= VersionTLS12 {
|
||||
seed := h.server.Sum(nil)
|
||||
prf12(out, masterSecret, serverFinishedLabel, seed)
|
||||
} else {
|
||||
seed := make([]byte, 0, md5.Size+sha1.Size)
|
||||
seed = h.serverMD5.Sum(seed)
|
||||
seed = h.server.Sum(seed)
|
||||
prf10(out, masterSecret, serverFinishedLabel, seed)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// hashForClientCertificate returns a digest, hash function, and TLS 1.2 hash
|
||||
// id suitable for signing by a TLS client certificate.
|
||||
func (h finishedHash) hashForClientCertificate(sigType uint8) ([]byte, crypto.Hash, uint8) {
|
||||
if h.version >= VersionTLS12 {
|
||||
digest := h.server.Sum(nil)
|
||||
return digest, crypto.SHA256, hashSHA256
|
||||
}
|
||||
if sigType == signatureECDSA {
|
||||
digest := h.server.Sum(nil)
|
||||
return digest, crypto.SHA1, hashSHA1
|
||||
}
|
||||
|
||||
digest := make([]byte, 0, 36)
|
||||
digest = h.serverMD5.Sum(digest)
|
||||
digest = h.server.Sum(digest)
|
||||
return digest, crypto.MD5SHA1, 0 /* not specified in TLS 1.2. */
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue