diff options
author | ian <ian@138bc75d-0d04-0410-961f-82ee72b054a4> | 2015-01-15 00:27:56 +0000 |
---|---|---|
committer | ian <ian@138bc75d-0d04-0410-961f-82ee72b054a4> | 2015-01-15 00:27:56 +0000 |
commit | f11c215565ea0c863197b9666581d69c225619c2 (patch) | |
tree | 58a1724fee16d2b03c65678c4dd9b50bb97137a9 /libgo | |
parent | 2bbc72db3287d2bff87b9640e85830337aac0672 (diff) | |
download | gcc-f11c215565ea0c863197b9666581d69c225619c2.tar.gz |
libgo, compiler: Upgrade libgo to Go 1.4, except for runtime.
This upgrades all of libgo other than the runtime package to
the Go 1.4 release. In Go 1.4 much of the runtime was
rewritten into Go. Merging that code will take more time and
will not change the API, so I'm putting it off for now.
There are a few runtime changes anyhow, to accomodate other
packages that rely on minor modifications to the runtime
support.
The compiler changes slightly to add a one-bit flag to each
type descriptor kind that is stored directly in an interface,
which for gccgo is currently only pointer types. Another
one-bit flag (gcprog) is reserved because it is used by the gc
compiler, but gccgo does not currently use it.
There is another error check in the compiler since I ran
across it during testing.
gotools/:
* Makefile.am (go_cmd_go_files): Sort entries. Add generate.go.
* Makefile.in: Rebuild.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@219627 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libgo')
613 files changed, 29850 insertions, 6871 deletions
diff --git a/libgo/MERGE b/libgo/MERGE index 99d33679a14..0e12671be4e 100644 --- a/libgo/MERGE +++ b/libgo/MERGE @@ -1,4 +1,4 @@ -f44017549ff9 +14854533dcc7 The first line of this file holds the Mercurial revision number of the last merge done from the master library sources. diff --git a/libgo/Makefile.am b/libgo/Makefile.am index 526b656a309..0ce357690a8 100644 --- a/libgo/Makefile.am +++ b/libgo/Makefile.am @@ -495,6 +495,7 @@ runtime_files = \ runtime/go-unsafe-new.c \ runtime/go-unsafe-newarray.c \ runtime/go-unsafe-pointer.c \ + runtime/go-unsetenv.c \ runtime/go-unwind.c \ runtime/go-varargs.c \ runtime/env_posix.c \ @@ -695,7 +696,7 @@ go_net_sockoptip_file = go/net/sockoptip_linux.go go/net/sockoptip_posix.go else if LIBGO_IS_SOLARIS go_net_cgo_file = go/net/cgo_linux.go -go_net_sock_file = go/net/sock_solaris.go +go_net_sock_file = go/net/sock_stub.go go_net_sockopt_file = go/net/sockopt_solaris.go go_net_sockoptip_file = go/net/sockoptip_stub.go else @@ -761,9 +762,6 @@ else if LIBGO_IS_DARWIN go_net_tcpsockopt_file = go/net/tcpsockopt_darwin.go else -if LIBGO_IS_SOLARIS -go_net_tcpsockopt_file = go/net/tcpsockopt_solaris.go -else if LIBGO_IS_DRAGONFLY go_net_tcpsockopt_file = go/net/tcpsockopt_dragonfly.go else @@ -771,7 +769,6 @@ go_net_tcpsockopt_file = go/net/tcpsockopt_unix.go endif endif endif -endif go_net_files = \ go/net/cgo_unix.go \ @@ -997,7 +994,6 @@ go_runtime_files = \ go/runtime/extern.go \ go/runtime/mem.go \ go/runtime/softfloat64.go \ - go/runtime/type.go \ version.go version.go: s-version; @true @@ -1187,10 +1183,19 @@ go_crypto_md5_files = \ go/crypto/md5/md5.go \ go/crypto/md5/md5block.go \ go/crypto/md5/md5block_generic.go + +if LIBGO_IS_LINUX +crypto_rand_file = go/crypto/rand/rand_linux.go +else +crypto_rand_file = +endif + go_crypto_rand_files = \ go/crypto/rand/rand.go \ go/crypto/rand/rand_unix.go \ + $(crypto_rand_file) \ go/crypto/rand/util.go + go_crypto_rc4_files = \ go/crypto/rc4/rc4.go \ go/crypto/rc4/rc4_ref.go @@ -1289,9 +1294,11 @@ go_encoding_csv_files = \ go_encoding_gob_files = \ go/encoding/gob/decode.go \ go/encoding/gob/decoder.go \ + go/encoding/gob/dec_helpers.go \ go/encoding/gob/doc.go \ go/encoding/gob/encode.go \ go/encoding/gob/encoder.go \ + go/encoding/gob/enc_helpers.go \ go/encoding/gob/error.go \ go/encoding/gob/type.go go_encoding_hex_files = \ @@ -1452,7 +1459,6 @@ go_mime_multipart_files = \ go/mime/multipart/writer.go go_net_http_files = \ - go/net/http/chunked.go \ go/net/http/client.go \ go/net/http/cookie.go \ go/net/http/filetransport.go \ @@ -1496,12 +1502,12 @@ go_net_http_httptest_files = \ go_net_http_pprof_files = \ go/net/http/pprof/pprof.go go_net_http_httputil_files = \ - go/net/http/httputil/chunked.go \ go/net/http/httputil/dump.go \ go/net/http/httputil/httputil.go \ go/net/http/httputil/persist.go \ go/net/http/httputil/reverseproxy.go - +go_net_http_internal_files = \ + go/net/http/internal/chunked.go go_old_regexp_files = \ go/old/regexp/regexp.go @@ -1535,7 +1541,8 @@ go_path_filepath_files = \ go/path/filepath/match.go \ go/path/filepath/path.go \ go/path/filepath/path_unix.go \ - go/path/filepath/symlink.go + go/path/filepath/symlink.go \ + go/path/filepath/symlink_unix.go go_regexp_syntax_files = \ go/regexp/syntax/compile.go \ @@ -1570,7 +1577,8 @@ go_text_template_parse_files = \ go/text/template/parse/parse.go go_sync_atomic_files = \ - go/sync/atomic/doc.go + go/sync/atomic/doc.go \ + go/sync/atomic/value.go go_sync_atomic_c_files = \ go/sync/atomic/atomic.c @@ -1784,10 +1792,21 @@ go_syscall_c_files = \ go_syscall_test_files = \ $(syscall_creds_test_file) \ + go/syscall/export_test.go \ go/syscall/mmap_unix_test.go \ go/syscall/syscall_test.go \ go/syscall/syscall_unix_test.go +if LIBGO_IS_LINUX +internal_syscall_getrandom_file = go/internal/syscall/getrandom_linux.go +else +internal_syscall_getrandom_file = +endif + +go_internal_syscall_files = \ + go/internal/syscall/dummy.go \ + $(internal_syscall_getrandom_file) + libcalls.go: s-libcalls; @true s-libcalls: libcalls-list go/syscall/mksyscall.awk $(go_base_syscall_files) rm -f libcalls.go.tmp @@ -1957,6 +1976,7 @@ libgo_go_objs = \ net/http/fcgi.lo \ net/http/httptest.lo \ net/http/httputil.lo \ + net/http/internal.lo \ net/http/pprof.lo \ image/color.lo \ image/color/palette.lo \ @@ -1965,6 +1985,7 @@ libgo_go_objs = \ image/jpeg.lo \ image/png.lo \ index/suffixarray.lo \ + internal/syscall.lo \ io/ioutil.lo \ log/syslog.lo \ log/syslog/syslog_c.lo \ @@ -3160,6 +3181,15 @@ net/http/httputil/check: $(check_deps) @$(CHECK) .PHONY: net/http/httputil/check +@go_include@ net/http/internal.lo.dep +net/http/internal.lo.dep: $(go_net_http_internal_files) + $(BUILDDEPS) +net/http/internal.lo: $(go_net_http_internal_files) + $(BUILDPACKAGE) +net/http/internal/check: $(CHECK_DEPS) + @$(CHECK) +.PHONY: net/http/internal/check + @go_include@ net/http/pprof.lo.dep net/http/pprof.lo.dep: $(go_net_http_pprof_files) $(BUILDDEPS) @@ -3260,7 +3290,8 @@ runtime/pprof/check: $(CHECK_DEPS) .PHONY: runtime/pprof/check # At least for now, we need -static-libgo for this test, because # otherwise we can't get the line numbers. -runtime_pprof_check_GOCFLAGS = -static-libgo +# Also use -fno-inline to get better results from the memory profiler. +runtime_pprof_check_GOCFLAGS = -static-libgo -fno-inline @go_include@ sync/atomic.lo.dep sync/atomic.lo.dep: $(go_sync_atomic_files) @@ -3363,6 +3394,15 @@ syscall/check: $(CHECK_DEPS) @$(CHECK) .PHONY: syscall/check +@go_include@ internal/syscall.lo.dep +internal/syscall.lo.dep: $(go_internal_syscall_files) + $(BUILDDEPS) +internal/syscall.lo: $(go_internal_syscall_files) + $(BUILDPACKAGE) +internal/syscall/check: $(CHECK_DEPS) + @$(CHECK) +.PHONY: internal/syscall/check + # How to build a .gox file from a .lo file. BUILDGOX = \ f=`echo $< | sed -e 's/.lo$$/.o/'`; \ @@ -3623,6 +3663,9 @@ net/http/httputil.gox: net/http/httputil.lo net/http/pprof.gox: net/http/pprof.lo $(BUILDGOX) +net/http/internal.gox: net/http/internal.lo + $(BUILDGOX) + net/rpc/jsonrpc.gox: net/rpc/jsonrpc.lo $(BUILDGOX) @@ -3652,6 +3695,9 @@ runtime/pprof.gox: runtime/pprof.lo sync/atomic.gox: sync/atomic.lo $(BUILDGOX) +internal/syscall.gox: internal/syscall.lo + $(BUILDGOX) + text/scanner.gox: text/scanner.lo $(BUILDGOX) text/tabwriter.gox: text/tabwriter.lo @@ -3774,6 +3820,7 @@ TEST_PACKAGES = \ net/http/fcgi/check \ net/http/httptest/check \ net/http/httputil/check \ + net/http/internal/check \ net/mail/check \ net/rpc/check \ net/smtp/check \ diff --git a/libgo/Makefile.in b/libgo/Makefile.in index 2254478aebe..495dc350b70 100644 --- a/libgo/Makefile.in +++ b/libgo/Makefile.in @@ -164,14 +164,15 @@ am__DEPENDENCIES_2 = bufio.lo bytes.lo bytes/index.lo crypto.lo \ go/printer.lo go/scanner.lo go/token.lo hash/adler32.lo \ hash/crc32.lo hash/crc64.lo hash/fnv.lo net/http/cgi.lo \ net/http/cookiejar.lo net/http/fcgi.lo net/http/httptest.lo \ - net/http/httputil.lo net/http/pprof.lo image/color.lo \ - image/color/palette.lo image/draw.lo image/gif.lo \ - image/jpeg.lo image/png.lo index/suffixarray.lo io/ioutil.lo \ - log/syslog.lo log/syslog/syslog_c.lo math/big.lo math/cmplx.lo \ - math/rand.lo mime/multipart.lo net/http.lo net/mail.lo \ - net/rpc.lo net/smtp.lo net/textproto.lo net/url.lo \ - old/regexp.lo old/template.lo os/exec.lo $(am__DEPENDENCIES_1) \ - os/signal.lo os/user.lo path/filepath.lo regexp/syntax.lo \ + net/http/httputil.lo net/http/internal.lo net/http/pprof.lo \ + image/color.lo image/color/palette.lo image/draw.lo \ + image/gif.lo image/jpeg.lo image/png.lo index/suffixarray.lo \ + internal/syscall.lo io/ioutil.lo log/syslog.lo \ + log/syslog/syslog_c.lo math/big.lo math/cmplx.lo math/rand.lo \ + mime/multipart.lo net/http.lo net/mail.lo net/rpc.lo \ + net/smtp.lo net/textproto.lo net/url.lo old/regexp.lo \ + old/template.lo os/exec.lo $(am__DEPENDENCIES_1) os/signal.lo \ + os/user.lo path/filepath.lo regexp/syntax.lo \ net/rpc/jsonrpc.lo runtime/debug.lo runtime/pprof.lo \ sync/atomic.lo sync/atomic_c.lo text/scanner.lo \ text/tabwriter.lo text/template.lo text/template/parse.lo \ @@ -218,15 +219,15 @@ am__objects_6 = go-append.lo go-assert.lo go-assert-interface.lo \ go-type-complex.lo go-type-eface.lo go-type-error.lo \ go-type-float.lo go-type-identity.lo go-type-interface.lo \ go-type-string.lo go-typedesc-equal.lo go-unsafe-new.lo \ - go-unsafe-newarray.lo go-unsafe-pointer.lo go-unwind.lo \ - go-varargs.lo env_posix.lo heapdump.lo $(am__objects_1) \ - mcache.lo mcentral.lo $(am__objects_2) mfixalloc.lo mgc0.lo \ - mheap.lo msize.lo $(am__objects_3) panic.lo parfor.lo print.lo \ - proc.lo runtime.lo signal_unix.lo thread.lo yield.lo \ - $(am__objects_4) chan.lo cpuprof.lo go-iface.lo lfstack.lo \ - malloc.lo map.lo mprof.lo netpoll.lo rdebug.lo reflect.lo \ - runtime1.lo sema.lo sigqueue.lo string.lo time.lo \ - $(am__objects_5) + go-unsafe-newarray.lo go-unsafe-pointer.lo go-unsetenv.lo \ + go-unwind.lo go-varargs.lo env_posix.lo heapdump.lo \ + $(am__objects_1) mcache.lo mcentral.lo $(am__objects_2) \ + mfixalloc.lo mgc0.lo mheap.lo msize.lo $(am__objects_3) \ + panic.lo parfor.lo print.lo proc.lo runtime.lo signal_unix.lo \ + thread.lo yield.lo $(am__objects_4) chan.lo cpuprof.lo \ + go-iface.lo lfstack.lo malloc.lo map.lo mprof.lo netpoll.lo \ + rdebug.lo reflect.lo runtime1.lo sema.lo sigqueue.lo string.lo \ + time.lo $(am__objects_5) am_libgo_llgo_la_OBJECTS = $(am__objects_6) libgo_llgo_la_OBJECTS = $(am_libgo_llgo_la_OBJECTS) libgo_llgo_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ @@ -838,6 +839,7 @@ runtime_files = \ runtime/go-unsafe-new.c \ runtime/go-unsafe-newarray.c \ runtime/go-unsafe-pointer.c \ + runtime/go-unsetenv.c \ runtime/go-unwind.c \ runtime/go-varargs.c \ runtime/env_posix.c \ @@ -992,7 +994,7 @@ go_mime_files = \ @LIBGO_IS_FREEBSD_FALSE@@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_NETBSD_FALSE@@LIBGO_IS_SOLARIS_FALSE@go_net_sock_file = go/net/sock_bsd.go @LIBGO_IS_FREEBSD_FALSE@@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_NETBSD_TRUE@@LIBGO_IS_SOLARIS_FALSE@go_net_sock_file = go/net/sock_bsd.go @LIBGO_IS_FREEBSD_TRUE@@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_FALSE@go_net_sock_file = go/net/sock_bsd.go -@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_TRUE@go_net_sock_file = go/net/sock_solaris.go +@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_SOLARIS_TRUE@go_net_sock_file = go/net/sock_stub.go @LIBGO_IS_IRIX_TRUE@@LIBGO_IS_LINUX_FALSE@go_net_sock_file = go/net/sock_linux.go @LIBGO_IS_LINUX_TRUE@go_net_sock_file = go/net/sock_linux.go @LIBGO_IS_FREEBSD_FALSE@@LIBGO_IS_IRIX_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_NETBSD_FALSE@@LIBGO_IS_SOLARIS_FALSE@go_net_sockopt_file = go/net/sockopt_bsd.go @@ -1017,9 +1019,8 @@ go_mime_files = \ @LIBGO_IS_LINUX_TRUE@go_net_interface_file = go/net/interface_linux.go @LIBGO_IS_LINUX_FALSE@go_net_cloexec_file = go/net/sys_cloexec.go @LIBGO_IS_LINUX_TRUE@go_net_cloexec_file = go/net/sock_cloexec.go -@LIBGO_IS_DARWIN_FALSE@@LIBGO_IS_DRAGONFLY_FALSE@@LIBGO_IS_OPENBSD_FALSE@@LIBGO_IS_SOLARIS_FALSE@go_net_tcpsockopt_file = go/net/tcpsockopt_unix.go -@LIBGO_IS_DARWIN_FALSE@@LIBGO_IS_DRAGONFLY_TRUE@@LIBGO_IS_OPENBSD_FALSE@@LIBGO_IS_SOLARIS_FALSE@go_net_tcpsockopt_file = go/net/tcpsockopt_dragonfly.go -@LIBGO_IS_DARWIN_FALSE@@LIBGO_IS_OPENBSD_FALSE@@LIBGO_IS_SOLARIS_TRUE@go_net_tcpsockopt_file = go/net/tcpsockopt_solaris.go +@LIBGO_IS_DARWIN_FALSE@@LIBGO_IS_DRAGONFLY_FALSE@@LIBGO_IS_OPENBSD_FALSE@go_net_tcpsockopt_file = go/net/tcpsockopt_unix.go +@LIBGO_IS_DARWIN_FALSE@@LIBGO_IS_DRAGONFLY_TRUE@@LIBGO_IS_OPENBSD_FALSE@go_net_tcpsockopt_file = go/net/tcpsockopt_dragonfly.go @LIBGO_IS_DARWIN_TRUE@@LIBGO_IS_OPENBSD_FALSE@go_net_tcpsockopt_file = go/net/tcpsockopt_darwin.go @LIBGO_IS_OPENBSD_TRUE@go_net_tcpsockopt_file = go/net/tcpsockopt_openbsd.go go_net_files = \ @@ -1180,7 +1181,6 @@ go_runtime_files = \ go/runtime/extern.go \ go/runtime/mem.go \ go/runtime/softfloat64.go \ - go/runtime/type.go \ version.go go_sort_files = \ @@ -1348,9 +1348,12 @@ go_crypto_md5_files = \ go/crypto/md5/md5block.go \ go/crypto/md5/md5block_generic.go +@LIBGO_IS_LINUX_FALSE@crypto_rand_file = +@LIBGO_IS_LINUX_TRUE@crypto_rand_file = go/crypto/rand/rand_linux.go go_crypto_rand_files = \ go/crypto/rand/rand.go \ go/crypto/rand/rand_unix.go \ + $(crypto_rand_file) \ go/crypto/rand/util.go go_crypto_rc4_files = \ @@ -1469,9 +1472,11 @@ go_encoding_csv_files = \ go_encoding_gob_files = \ go/encoding/gob/decode.go \ go/encoding/gob/decoder.go \ + go/encoding/gob/dec_helpers.go \ go/encoding/gob/doc.go \ go/encoding/gob/encode.go \ go/encoding/gob/encoder.go \ + go/encoding/gob/enc_helpers.go \ go/encoding/gob/error.go \ go/encoding/gob/type.go @@ -1649,7 +1654,6 @@ go_mime_multipart_files = \ go/mime/multipart/writer.go go_net_http_files = \ - go/net/http/chunked.go \ go/net/http/client.go \ go/net/http/cookie.go \ go/net/http/filetransport.go \ @@ -1702,12 +1706,14 @@ go_net_http_pprof_files = \ go/net/http/pprof/pprof.go go_net_http_httputil_files = \ - go/net/http/httputil/chunked.go \ go/net/http/httputil/dump.go \ go/net/http/httputil/httputil.go \ go/net/http/httputil/persist.go \ go/net/http/httputil/reverseproxy.go +go_net_http_internal_files = \ + go/net/http/internal/chunked.go + go_old_regexp_files = \ go/old/regexp/regexp.go @@ -1737,7 +1743,8 @@ go_path_filepath_files = \ go/path/filepath/match.go \ go/path/filepath/path.go \ go/path/filepath/path_unix.go \ - go/path/filepath/symlink.go + go/path/filepath/symlink.go \ + go/path/filepath/symlink_unix.go go_regexp_syntax_files = \ go/regexp/syntax/compile.go \ @@ -1775,7 +1782,8 @@ go_text_template_parse_files = \ go/text/template/parse/parse.go go_sync_atomic_files = \ - go/sync/atomic/doc.go + go/sync/atomic/doc.go \ + go/sync/atomic/value.go go_sync_atomic_c_files = \ go/sync/atomic/atomic.c @@ -1918,10 +1926,17 @@ go_syscall_c_files = \ go_syscall_test_files = \ $(syscall_creds_test_file) \ + go/syscall/export_test.go \ go/syscall/mmap_unix_test.go \ go/syscall/syscall_test.go \ go/syscall/syscall_unix_test.go +@LIBGO_IS_LINUX_FALSE@internal_syscall_getrandom_file = +@LIBGO_IS_LINUX_TRUE@internal_syscall_getrandom_file = go/internal/syscall/getrandom_linux.go +go_internal_syscall_files = \ + go/internal/syscall/dummy.go \ + $(internal_syscall_getrandom_file) + @LIBGO_IS_LINUX_FALSE@os_lib_inotify_lo = # os_lib_inotify_lo = os/inotify.lo @@ -2030,6 +2045,7 @@ libgo_go_objs = \ net/http/fcgi.lo \ net/http/httptest.lo \ net/http/httputil.lo \ + net/http/internal.lo \ net/http/pprof.lo \ image/color.lo \ image/color/palette.lo \ @@ -2038,6 +2054,7 @@ libgo_go_objs = \ image/jpeg.lo \ image/png.lo \ index/suffixarray.lo \ + internal/syscall.lo \ io/ioutil.lo \ log/syslog.lo \ log/syslog/syslog_c.lo \ @@ -2169,7 +2186,8 @@ CHECK_DEPS = $(toolexeclibgo_DATA) $(toolexeclibgoarchive_DATA) \ $(toolexeclibgounicode_DATA) $(am__append_1) $(am__append_2) # At least for now, we need -static-libgo for this test, because # otherwise we can't get the line numbers. -runtime_pprof_check_GOCFLAGS = -static-libgo +# Also use -fno-inline to get better results from the memory profiler. +runtime_pprof_check_GOCFLAGS = -static-libgo -fno-inline # How to build a .gox file from a .lo file. BUILDGOX = \ @@ -2279,6 +2297,7 @@ TEST_PACKAGES = \ net/http/fcgi/check \ net/http/httptest/check \ net/http/httputil/check \ + net/http/internal/check \ net/mail/check \ net/rpc/check \ net/smtp/check \ @@ -2515,6 +2534,7 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-unsafe-new.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-unsafe-newarray.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-unsafe-pointer.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-unsetenv.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-unwind.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-varargs.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/heapdump.Plo@am__quote@ @@ -3031,6 +3051,13 @@ go-unsafe-pointer.lo: runtime/go-unsafe-pointer.c @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-unsafe-pointer.lo `test -f 'runtime/go-unsafe-pointer.c' || echo '$(srcdir)/'`runtime/go-unsafe-pointer.c +go-unsetenv.lo: runtime/go-unsetenv.c +@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-unsetenv.lo -MD -MP -MF $(DEPDIR)/go-unsetenv.Tpo -c -o go-unsetenv.lo `test -f 'runtime/go-unsetenv.c' || echo '$(srcdir)/'`runtime/go-unsetenv.c +@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-unsetenv.Tpo $(DEPDIR)/go-unsetenv.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='runtime/go-unsetenv.c' object='go-unsetenv.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-unsetenv.lo `test -f 'runtime/go-unsetenv.c' || echo '$(srcdir)/'`runtime/go-unsetenv.c + go-unwind.lo: runtime/go-unwind.c @am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-unwind.lo -MD -MP -MF $(DEPDIR)/go-unwind.Tpo -c -o go-unwind.lo `test -f 'runtime/go-unwind.c' || echo '$(srcdir)/'`runtime/go-unwind.c @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-unwind.Tpo $(DEPDIR)/go-unwind.Plo @@ -5498,6 +5525,15 @@ net/http/httputil/check: $(check_deps) @$(CHECK) .PHONY: net/http/httputil/check +@go_include@ net/http/internal.lo.dep +net/http/internal.lo.dep: $(go_net_http_internal_files) + $(BUILDDEPS) +net/http/internal.lo: $(go_net_http_internal_files) + $(BUILDPACKAGE) +net/http/internal/check: $(CHECK_DEPS) + @$(CHECK) +.PHONY: net/http/internal/check + @go_include@ net/http/pprof.lo.dep net/http/pprof.lo.dep: $(go_net_http_pprof_files) $(BUILDDEPS) @@ -5698,6 +5734,15 @@ syscall/check: $(CHECK_DEPS) @$(CHECK) .PHONY: syscall/check +@go_include@ internal/syscall.lo.dep +internal/syscall.lo.dep: $(go_internal_syscall_files) + $(BUILDDEPS) +internal/syscall.lo: $(go_internal_syscall_files) + $(BUILDPACKAGE) +internal/syscall/check: $(CHECK_DEPS) + @$(CHECK) +.PHONY: internal/syscall/check + bufio.gox: bufio.lo $(BUILDGOX) bytes.gox: bytes.lo @@ -5953,6 +5998,9 @@ net/http/httputil.gox: net/http/httputil.lo net/http/pprof.gox: net/http/pprof.lo $(BUILDGOX) +net/http/internal.gox: net/http/internal.lo + $(BUILDGOX) + net/rpc/jsonrpc.gox: net/rpc/jsonrpc.lo $(BUILDGOX) @@ -5982,6 +6030,9 @@ runtime/pprof.gox: runtime/pprof.lo sync/atomic.gox: sync/atomic.lo $(BUILDGOX) +internal/syscall.gox: internal/syscall.lo + $(BUILDGOX) + text/scanner.gox: text/scanner.lo $(BUILDGOX) text/tabwriter.gox: text/tabwriter.lo diff --git a/libgo/config.h.in b/libgo/config.h.in index 9e622c64ff3..629c603e380 100644 --- a/libgo/config.h.in +++ b/libgo/config.h.in @@ -319,6 +319,9 @@ /* Define to 1 if you have the `unlinkat' function. */ #undef HAVE_UNLINKAT +/* Define to 1 if you have the `unsetenv' function. */ +#undef HAVE_UNSETENV + /* Define to 1 if you have the `unshare' function. */ #undef HAVE_UNSHARE diff --git a/libgo/configure b/libgo/configure index ae98e3d187d..b8e776f13eb 100755 --- a/libgo/configure +++ b/libgo/configure @@ -14805,7 +14805,7 @@ else fi -for ac_func in strerror_r strsignal wait4 mincore setenv dl_iterate_phdr +for ac_func in strerror_r strsignal wait4 mincore setenv unsetenv dl_iterate_phdr do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" diff --git a/libgo/configure.ac b/libgo/configure.ac index 6f2c6653130..4482dd07f68 100644 --- a/libgo/configure.ac +++ b/libgo/configure.ac @@ -551,7 +551,7 @@ fi AM_CONDITIONAL(HAVE_SYS_MMAN_H, test "$ac_cv_header_sys_mman_h" = yes) -AC_CHECK_FUNCS(strerror_r strsignal wait4 mincore setenv dl_iterate_phdr) +AC_CHECK_FUNCS(strerror_r strsignal wait4 mincore setenv unsetenv dl_iterate_phdr) AM_CONDITIONAL(HAVE_STRERROR_R, test "$ac_cv_func_strerror_r" = yes) AM_CONDITIONAL(HAVE_WAIT4, test "$ac_cv_func_wait4" = yes) diff --git a/libgo/go/archive/tar/reader.go b/libgo/go/archive/tar/reader.go index 920a9b08f90..a27559d0f04 100644 --- a/libgo/go/archive/tar/reader.go +++ b/libgo/go/archive/tar/reader.go @@ -29,10 +29,11 @@ const maxNanoSecondIntSize = 9 // The Next method advances to the next file in the archive (including the first), // and then it can be treated as an io.Reader to access the file's data. type Reader struct { - r io.Reader - err error - pad int64 // amount of padding (ignored) after current file entry - curr numBytesReader // reader for current file entry + r io.Reader + err error + pad int64 // amount of padding (ignored) after current file entry + curr numBytesReader // reader for current file entry + hdrBuff [blockSize]byte // buffer to use in readHeader } // A numBytesReader is an io.Reader with a numBytes method, returning the number @@ -426,7 +427,9 @@ func (tr *Reader) verifyChecksum(header []byte) bool { } func (tr *Reader) readHeader() *Header { - header := make([]byte, blockSize) + header := tr.hdrBuff[:] + copy(header, zeroBlock) + if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil { return nil } diff --git a/libgo/go/archive/tar/writer.go b/libgo/go/archive/tar/writer.go index 6eff6f6f84d..dafb2cabf37 100644 --- a/libgo/go/archive/tar/writer.go +++ b/libgo/go/archive/tar/writer.go @@ -37,8 +37,10 @@ type Writer struct { nb int64 // number of unwritten bytes for current file entry pad int64 // amount of padding to write after current file entry closed bool - usedBinary bool // whether the binary numeric field extension was used - preferPax bool // use pax header instead of binary numeric header + usedBinary bool // whether the binary numeric field extension was used + preferPax bool // use pax header instead of binary numeric header + hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header + paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header } // NewWriter creates a new Writer writing to w. @@ -160,7 +162,18 @@ func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error { // subsecond time resolution, but for now let's just capture // too long fields or non ascii characters - header := make([]byte, blockSize) + var header []byte + + // We need to select which scratch buffer to use carefully, + // since this method is called recursively to write PAX headers. + // If allowPax is true, this is the non-recursive call, and we will use hdrBuff. + // If allowPax is false, we are being called by writePAXHeader, and hdrBuff is + // already being used by the non-recursive call, so we must use paxHdrBuff. + header = tw.hdrBuff[:] + if !allowPax { + header = tw.paxHdrBuff[:] + } + copy(header, zeroBlock) s := slicer(header) // keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax diff --git a/libgo/go/archive/tar/writer_test.go b/libgo/go/archive/tar/writer_test.go index 512fab1a6f1..5e42e322f9c 100644 --- a/libgo/go/archive/tar/writer_test.go +++ b/libgo/go/archive/tar/writer_test.go @@ -454,3 +454,38 @@ func TestUSTARLongName(t *testing.T) { t.Fatal("Couldn't recover long name") } } + +func TestValidTypeflagWithPAXHeader(t *testing.T) { + var buffer bytes.Buffer + tw := NewWriter(&buffer) + + fileName := strings.Repeat("ab", 100) + + hdr := &Header{ + Name: fileName, + Size: 4, + Typeflag: 0, + } + if err := tw.WriteHeader(hdr); err != nil { + t.Fatalf("Failed to write header: %s", err) + } + if _, err := tw.Write([]byte("fooo")); err != nil { + t.Fatalf("Failed to write the file's data: %s", err) + } + tw.Close() + + tr := NewReader(&buffer) + + for { + header, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatalf("Failed to read header: %s", err) + } + if header.Typeflag != 0 { + t.Fatalf("Typeflag should've been 0, found %d", header.Typeflag) + } + } +} diff --git a/libgo/go/archive/zip/reader.go b/libgo/go/archive/zip/reader.go index 80ee03006f0..8136b840d45 100644 --- a/libgo/go/archive/zip/reader.go +++ b/libgo/go/archive/zip/reader.go @@ -267,8 +267,13 @@ func readDirectoryHeader(f *File, r io.Reader) error { b = b[size:] } // Should have consumed the whole header. - if len(b) != 0 { - return ErrFormat + // But popular zip & JAR creation tools are broken and + // may pad extra zeros at the end, so accept those + // too. See golang.org/issue/8186. + for _, v := range b { + if v != 0 { + return ErrFormat + } } } return nil diff --git a/libgo/go/archive/zip/reader_test.go b/libgo/go/archive/zip/reader_test.go index 5652f3a5007..29d0652dcc1 100644 --- a/libgo/go/archive/zip/reader_test.go +++ b/libgo/go/archive/zip/reader_test.go @@ -13,6 +13,7 @@ import ( "os" "path/filepath" "regexp" + "strings" "testing" "time" ) @@ -508,3 +509,25 @@ func returnRecursiveZip() (r io.ReaderAt, size int64) { b := rZipBytes() return bytes.NewReader(b), int64(len(b)) } + +func TestIssue8186(t *testing.T) { + // Directory headers & data found in the TOC of a JAR file. + dirEnts := []string{ + "PK\x01\x02\n\x00\n\x00\x00\b\x00\x004\x9d3?\xaa\x1b\x06\xf0\x81\x02\x00\x00\x81\x02\x00\x00-\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00res/drawable-xhdpi-v4/ic_actionbar_accept.png\xfe\xca\x00\x00\x00", + "PK\x01\x02\n\x00\n\x00\x00\b\x00\x004\x9d3?\x90K\x89\xc7t\n\x00\x00t\n\x00\x00\x0e\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x02\x00\x00resources.arsc\x00\x00\x00", + "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?\xff$\x18\xed3\x03\x00\x00\xb4\b\x00\x00\x13\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00t\r\x00\x00AndroidManifest.xml", + "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?\x14\xc5K\xab\x192\x02\x00\xc8\xcd\x04\x00\v\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe8\x10\x00\x00classes.dex", + "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?E\x96\nD\xac\x01\x00\x00P\x03\x00\x00&\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00:C\x02\x00res/layout/actionbar_set_wallpaper.xml", + "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?Ļ\x14\xe3\xd8\x01\x00\x00\xd8\x03\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00:E\x02\x00res/layout/wallpaper_cropper.xml", + "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?}\xc1\x15\x9eZ\x01\x00\x00!\x02\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00`G\x02\x00META-INF/MANIFEST.MF", + "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?\xe6\x98Ьo\x01\x00\x00\x84\x02\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfcH\x02\x00META-INF/CERT.SF", + "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?\xbfP\x96b\x86\x04\x00\x00\xb2\x06\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa9J\x02\x00META-INF/CERT.RSA", + } + for i, s := range dirEnts { + var f File + err := readDirectoryHeader(&f, strings.NewReader(s)) + if err != nil { + t.Errorf("error reading #%d: %v", i, err) + } + } +} diff --git a/libgo/go/archive/zip/writer.go b/libgo/go/archive/zip/writer.go index 6c9800a78f7..170beec0eec 100644 --- a/libgo/go/archive/zip/writer.go +++ b/libgo/go/archive/zip/writer.go @@ -34,6 +34,12 @@ func NewWriter(w io.Writer) *Writer { return &Writer{cw: &countWriter{w: bufio.NewWriter(w)}} } +// Flush flushes any buffered data to the underlying writer. +// Calling Flush is not normally necessary; calling Close is sufficient. +func (w *Writer) Flush() error { + return w.cw.w.(*bufio.Writer).Flush() +} + // Close finishes writing the zip file by writing the central directory. // It does not (and can not) close the underlying writer. func (w *Writer) Close() error { diff --git a/libgo/go/archive/zip/writer_test.go b/libgo/go/archive/zip/writer_test.go index 4bfa8708090..184a7d96a7f 100644 --- a/libgo/go/archive/zip/writer_test.go +++ b/libgo/go/archive/zip/writer_test.go @@ -6,6 +6,7 @@ package zip import ( "bytes" + "io" "io/ioutil" "math/rand" "os" @@ -86,6 +87,24 @@ func TestWriter(t *testing.T) { } } +func TestWriterFlush(t *testing.T) { + var buf bytes.Buffer + w := NewWriter(struct{ io.Writer }{&buf}) + _, err := w.Create("foo") + if err != nil { + t.Fatal(err) + } + if buf.Len() > 0 { + t.Fatalf("Unexpected %d bytes already in buffer", buf.Len()) + } + if err := w.Flush(); err != nil { + t.Fatal(err) + } + if buf.Len() == 0 { + t.Fatal("No bytes written after Flush") + } +} + func testCreate(t *testing.T, w *Writer, wt *WriteTest) { header := &FileHeader{ Name: wt.Name, diff --git a/libgo/go/bufio/bufio.go b/libgo/go/bufio/bufio.go index 61ef2619100..d3c68fe6fe5 100644 --- a/libgo/go/bufio/bufio.go +++ b/libgo/go/bufio/bufio.go @@ -30,8 +30,8 @@ var ( // Reader implements buffering for an io.Reader object. type Reader struct { buf []byte - rd io.Reader - r, w int + rd io.Reader // reader provided by the client + r, w int // buf read and write positions err error lastByte int lastRuneSize int @@ -131,18 +131,17 @@ func (b *Reader) Peek(n int) ([]byte, error) { for b.w-b.r < n && b.err == nil { b.fill() // b.w-b.r < len(b.buf) => buffer is not full } - m := b.w - b.r - if m > n { - m = n - } + var err error - if m < n { + if avail := b.w - b.r; avail < n { + // not enough data in buffer + n = avail err = b.readErr() if err == nil { err = ErrBufferFull } } - return b.buf[b.r : b.r+m], err + return b.buf[b.r : b.r+n], err } // Read reads data into p. @@ -173,15 +172,13 @@ func (b *Reader) Read(p []byte) (n int, err error) { return n, b.readErr() } b.fill() // buffer is empty - if b.w == b.r { + if b.r == b.w { return 0, b.readErr() } } - if n > b.w-b.r { - n = b.w - b.r - } - copy(p[0:n], b.buf[b.r:]) + // copy as much as we can + n = copy(p, b.buf[b.r:b.w]) b.r += n b.lastByte = int(b.buf[b.r-1]) b.lastRuneSize = -1 @@ -288,7 +285,7 @@ func (b *Reader) ReadSlice(delim byte) (line []byte, err error) { } // Buffer full? - if n := b.Buffered(); n >= len(b.buf) { + if b.Buffered() >= len(b.buf) { b.r = b.w line = b.buf err = ErrBufferFull @@ -301,6 +298,7 @@ func (b *Reader) ReadSlice(delim byte) (line []byte, err error) { // Handle last byte, if any. if i := len(line) - 1; i >= 0 { b.lastByte = int(line[i]) + b.lastRuneSize = -1 } return @@ -458,11 +456,13 @@ func (b *Reader) WriteTo(w io.Writer) (n int64, err error) { return n, b.readErr() } +var errNegativeWrite = errors.New("bufio: writer returned negative count from Write") + // writeBuf writes the Reader's buffer to the writer. func (b *Reader) writeBuf(w io.Writer) (int64, error) { n, err := w.Write(b.buf[b.r:b.w]) - if n < b.r-b.w { - panic(errors.New("bufio: writer did not write all data")) + if n < 0 { + panic(errNegativeWrite) } b.r += n return int64(n), err diff --git a/libgo/go/bufio/bufio_test.go b/libgo/go/bufio/bufio_test.go index 76d3c8eade8..550dac9173f 100644 --- a/libgo/go/bufio/bufio_test.go +++ b/libgo/go/bufio/bufio_test.go @@ -31,9 +31,6 @@ func newRot13Reader(r io.Reader) *rot13Reader { func (r13 *rot13Reader) Read(p []byte) (int, error) { n, err := r13.r.Read(p) - if err != nil { - return n, err - } for i := 0; i < n; i++ { c := p[i] | 0x20 // lowercase byte if 'a' <= c && c <= 'm' { @@ -42,7 +39,7 @@ func (r13 *rot13Reader) Read(p []byte) (int, error) { p[i] -= 13 } } - return n, nil + return n, err } // Call ReadByte to accumulate the text of a file @@ -438,7 +435,7 @@ func TestUnreadRuneError(t *testing.T) { if err != nil { t.Error("unexpected error on ReadRune (2):", err) } - for _ = range buf { + for range buf { _, err = r.ReadByte() if err != nil { t.Error("unexpected error on ReadByte (2):", err) @@ -463,6 +460,18 @@ func TestUnreadRuneError(t *testing.T) { if r.UnreadRune() == nil { t.Error("expected error after UnreadByte (3)") } + // Test error after ReadSlice. + _, _, err = r.ReadRune() // reset state + if err != nil { + t.Error("unexpected error on ReadRune (4):", err) + } + _, err = r.ReadSlice(0) + if err != io.EOF { + t.Error("unexpected error on ReadSlice (4):", err) + } + if r.UnreadRune() == nil { + t.Error("expected error after ReadSlice (4)") + } } func TestUnreadRuneAtEOF(t *testing.T) { diff --git a/libgo/go/bufio/scan.go b/libgo/go/bufio/scan.go index 715ce071e3b..364d1596139 100644 --- a/libgo/go/bufio/scan.go +++ b/libgo/go/bufio/scan.go @@ -36,6 +36,7 @@ type Scanner struct { start int // First non-processed byte in buf. end int // End of data in buf. err error // Sticky error. + empties int // Count of successive empty tokens. } // SplitFunc is the signature of the split function used to tokenize the @@ -64,8 +65,9 @@ var ( ) const ( - // Maximum size used to buffer a token. The actual maximum token size - // may be smaller as the buffer may need to include, for instance, a newline. + // MaxScanTokenSize is the maximum size used to buffer a token. + // The actual maximum token size may be smaller as the buffer + // may need to include, for instance, a newline. MaxScanTokenSize = 64 * 1024 ) @@ -107,11 +109,15 @@ func (s *Scanner) Text() string { // After Scan returns false, the Err method will return any error that // occurred during scanning, except that if it was io.EOF, Err // will return nil. +// Split panics if the split function returns 100 empty tokens without +// advancing the input. This is a common error mode for scanners. func (s *Scanner) Scan() bool { // Loop until we have a token. for { // See if we can get a token with what we already have. - if s.end > s.start { + // If we've run out of data but have an error, give the split function + // a chance to recover any remaining, possibly empty token. + if s.end > s.start || s.err != nil { advance, token, err := s.split(s.buf[s.start:s.end], s.err != nil) if err != nil { s.setErr(err) @@ -122,6 +128,15 @@ func (s *Scanner) Scan() bool { } s.token = token if token != nil { + if s.err == nil || advance > 0 { + s.empties = 0 + } else { + // Returning tokens not advancing input at EOF. + s.empties++ + if s.empties > 100 { + panic("bufio.Scan: 100 empty tokens without progressing") + } + } return true } } @@ -169,6 +184,7 @@ func (s *Scanner) Scan() bool { break } if n > 0 { + s.empties = 0 break } loop++ @@ -326,9 +342,6 @@ func ScanWords(data []byte, atEOF bool) (advance int, token []byte, err error) { break } } - if atEOF && len(data) == 0 { - return 0, nil, nil - } // Scan until space, marking end of word. for width, i := 0, start; i < len(data); i += width { var r rune @@ -342,5 +355,5 @@ func ScanWords(data []byte, atEOF bool) (advance int, token []byte, err error) { return len(data), data[start:], nil } // Request more data. - return 0, nil, nil + return start, nil, nil } diff --git a/libgo/go/bufio/scan_test.go b/libgo/go/bufio/scan_test.go index 0db7cad2047..eea87cbf7b3 100644 --- a/libgo/go/bufio/scan_test.go +++ b/libgo/go/bufio/scan_test.go @@ -15,6 +15,8 @@ import ( "unicode/utf8" ) +const smallMaxTokenSize = 256 // Much smaller for more efficient testing. + // Test white space table matches the Unicode definition. func TestSpace(t *testing.T) { for r := rune(0); r <= utf8.MaxRune; r++ { @@ -172,7 +174,6 @@ func genLine(buf *bytes.Buffer, lineNum, n int, addNewline bool) { // Test the line splitter, including some carriage returns but no long lines. func TestScanLongLines(t *testing.T) { - const smallMaxTokenSize = 256 // Much smaller for more efficient testing. // Build a buffer of lots of line lengths up to but not exceeding smallMaxTokenSize. tmp := new(bytes.Buffer) buf := new(bytes.Buffer) @@ -404,3 +405,120 @@ func TestBadReader(t *testing.T) { t.Errorf("unexpected error: %v", err) } } + +func TestScanWordsExcessiveWhiteSpace(t *testing.T) { + const word = "ipsum" + s := strings.Repeat(" ", 4*smallMaxTokenSize) + word + scanner := NewScanner(strings.NewReader(s)) + scanner.MaxTokenSize(smallMaxTokenSize) + scanner.Split(ScanWords) + if !scanner.Scan() { + t.Fatalf("scan failed: %v", scanner.Err()) + } + if token := scanner.Text(); token != word { + t.Fatalf("unexpected token: %v", token) + } +} + +// Test that empty tokens, including at end of line or end of file, are found by the scanner. +// Issue 8672: Could miss final empty token. + +func commaSplit(data []byte, atEOF bool) (advance int, token []byte, err error) { + for i := 0; i < len(data); i++ { + if data[i] == ',' { + return i + 1, data[:i], nil + } + } + if !atEOF { + return 0, nil, nil + } + return 0, data, nil +} + +func TestEmptyTokens(t *testing.T) { + s := NewScanner(strings.NewReader("1,2,3,")) + values := []string{"1", "2", "3", ""} + s.Split(commaSplit) + var i int + for i = 0; i < len(values); i++ { + if !s.Scan() { + break + } + if s.Text() != values[i] { + t.Errorf("%d: expected %q got %q", i, values[i], s.Text()) + } + } + if i != len(values) { + t.Errorf("got %d fields, expected %d", i, len(values)) + } + if err := s.Err(); err != nil { + t.Fatal(err) + } +} + +func loopAtEOFSplit(data []byte, atEOF bool) (advance int, token []byte, err error) { + if len(data) > 0 { + return 1, data[:1], nil + } + return 0, data, nil +} + +func TestDontLoopForever(t *testing.T) { + s := NewScanner(strings.NewReader("abc")) + s.Split(loopAtEOFSplit) + // Expect a panic + defer func() { + err := recover() + if err == nil { + t.Fatal("should have panicked") + } + if msg, ok := err.(string); !ok || !strings.Contains(msg, "empty tokens") { + panic(err) + } + }() + for count := 0; s.Scan(); count++ { + if count > 1000 { + t.Fatal("looping") + } + } + if s.Err() != nil { + t.Fatal("after scan:", s.Err()) + } +} + +func TestBlankLines(t *testing.T) { + s := NewScanner(strings.NewReader(strings.Repeat("\n", 1000))) + for count := 0; s.Scan(); count++ { + if count > 2000 { + t.Fatal("looping") + } + } + if s.Err() != nil { + t.Fatal("after scan:", s.Err()) + } +} + +type countdown int + +func (c *countdown) split(data []byte, atEOF bool) (advance int, token []byte, err error) { + if *c > 0 { + *c-- + return 1, data[:1], nil + } + return 0, nil, nil +} + +// Check that the looping-at-EOF check doesn't trigger for merely empty tokens. +func TestEmptyLinesOK(t *testing.T) { + c := countdown(10000) + s := NewScanner(strings.NewReader(strings.Repeat("\n", 10000))) + s.Split(c.split) + for s.Scan() { + } + if s.Err() != nil { + t.Fatal("after scan:", s.Err()) + } + if c != 0 { + t.Fatalf("stopped with %d left to process", c) + } +} diff --git a/libgo/go/bytes/bytes.go b/libgo/go/bytes/bytes.go index 0c53e4c0b71..7634707b3cb 100644 --- a/libgo/go/bytes/bytes.go +++ b/libgo/go/bytes/bytes.go @@ -267,6 +267,8 @@ func Fields(s []byte) [][]byte { // It splits the slice s at each run of code points c satisfying f(c) and // returns a slice of subslices of s. If all code points in s satisfy f(c), or // len(s) == 0, an empty slice is returned. +// FieldsFunc makes no guarantees about the order in which it calls f(c). +// If f does not return consistent results for a given c, FieldsFunc may crash. func FieldsFunc(s []byte, f func(rune) bool) [][]byte { n := 0 inField := false @@ -377,9 +379,10 @@ func Map(mapping func(r rune) rune, s []byte) []byte { // Repeat returns a new byte slice consisting of count copies of b. func Repeat(b []byte, count int) []byte { nb := make([]byte, len(b)*count) - bp := 0 - for i := 0; i < count; i++ { - bp += copy(nb[bp:], b) + bp := copy(nb, b) + for bp < len(nb) { + copy(nb[bp:], nb[:bp]) + bp *= 2 } return nb } @@ -604,6 +607,9 @@ func Runes(s []byte) []rune { // Replace returns a copy of the slice s with the first n // non-overlapping instances of old replaced by new. +// If old is empty, it matches at the beginning of the slice +// and after each UTF-8 sequence, yielding up to k+1 replacements +// for a k-rune slice. // If n < 0, there is no limit on the number of replacements. func Replace(s, old, new []byte, n int) []byte { m := 0 diff --git a/libgo/go/bytes/bytes_test.go b/libgo/go/bytes/bytes_test.go index 394dd7a443d..980c41d754d 100644 --- a/libgo/go/bytes/bytes_test.go +++ b/libgo/go/bytes/bytes_test.go @@ -1232,3 +1232,9 @@ func BenchmarkTrimSpace(b *testing.B) { TrimSpace(s) } } + +func BenchmarkRepeat(b *testing.B) { + for i := 0; i < b.N; i++ { + Repeat([]byte("-"), 80) + } +} diff --git a/libgo/go/cmd/cgo/ast.go b/libgo/go/cmd/cgo/ast.go index 7757efa1bc3..10e2278a1d6 100644 --- a/libgo/go/cmd/cgo/ast.go +++ b/libgo/go/cmd/cgo/ast.go @@ -272,7 +272,7 @@ func (f *File) walk(x interface{}, context string, visit func(*File, interface{} case nil: - // These are ordered and grouped to match ../../pkg/go/ast/ast.go + // These are ordered and grouped to match ../../go/ast/ast.go case *ast.Field: if len(n.Names) == 0 && context == "field" { f.walk(&n.Type, "embed-type", visit) @@ -308,6 +308,9 @@ func (f *File) walk(x interface{}, context string, visit func(*File, interface{} if n.High != nil { f.walk(&n.High, "expr", visit) } + if n.Max != nil { + f.walk(&n.Max, "expr", visit) + } case *ast.TypeAssertExpr: f.walk(&n.X, "expr", visit) f.walk(&n.Type, "type", visit) diff --git a/libgo/go/cmd/cgo/doc.go b/libgo/go/cmd/cgo/doc.go index 69c7ce893c3..6179c7afd19 100644 --- a/libgo/go/cmd/cgo/doc.go +++ b/libgo/go/cmd/cgo/doc.go @@ -152,7 +152,7 @@ In C, a function argument written as a fixed size array actually requires a pointer to the first element of the array. C compilers are aware of this calling convention and adjust the call accordingly, but Go cannot. In Go, you must pass -the pointer to the first element explicitly: C.f(&x[0]). +the pointer to the first element explicitly: C.f(&C.x[0]). A few special functions convert between Go and C types by making copies of the data. In pseudo-Go definitions: diff --git a/libgo/go/cmd/cgo/gcc.go b/libgo/go/cmd/cgo/gcc.go index f55cfbac447..abdd369d713 100644 --- a/libgo/go/cmd/cgo/gcc.go +++ b/libgo/go/cmd/cgo/gcc.go @@ -229,7 +229,8 @@ func (p *Package) guessKinds(f *File) []*Name { // Determine kinds for names we already know about, // like #defines or 'struct foo', before bothering with gcc. var names, needType []*Name - for _, n := range f.Name { + for _, key := range nameKeys(f.Name) { + n := f.Name[key] // If we've already found this name as a #define // and we can translate it as a constant value, do so. if n.Define != "" { @@ -331,6 +332,7 @@ func (p *Package) guessKinds(f *File) []*Name { const ( notType = 1 << iota notConst + notDeclared ) for _, line := range strings.Split(stderr, "\n") { if !strings.Contains(line, ": error:") { @@ -365,7 +367,7 @@ func (p *Package) guessKinds(f *File) []*Name { completed = true case "not-declared": - error_(token.NoPos, "%s", strings.TrimSpace(line[c2+1:])) + sniff[i] |= notDeclared case "not-type": sniff[i] |= notType case "not-const": @@ -374,12 +376,12 @@ func (p *Package) guessKinds(f *File) []*Name { } if !completed { - fatalf("%s did not produce error at completed:1\non input:\n%s", p.gccBaseCmd()[0], b.Bytes()) + fatalf("%s did not produce error at completed:1\non input:\n%s\nfull error output:\n%s", p.gccBaseCmd()[0], b.Bytes(), stderr) } for i, n := range names { switch sniff[i] { - case 0: + default: error_(token.NoPos, "could not determine kind of name for C.%s", fixGo(n.Go)) case notType: n.Kind = "const" @@ -390,6 +392,14 @@ func (p *Package) guessKinds(f *File) []*Name { } } if nerrors > 0 { + // Check if compiling the preamble by itself causes any errors, + // because the messages we've printed out so far aren't helpful + // to users debugging preamble mistakes. See issue 8442. + preambleErrors := p.gccErrors([]byte(f.Preamble)) + if len(preambleErrors) > 0 { + error_(token.NoPos, "\n%s errors for preamble:\n%s", p.gccBaseCmd()[0], preambleErrors) + } + fatalf("unresolved names") } @@ -649,7 +659,13 @@ func (p *Package) rewriteRef(f *File) { f.Name[fpName] = name } r.Name = name - expr = ast.NewIdent(name.Mangle) + // Rewrite into call to _Cgo_ptr to prevent assignments. The _Cgo_ptr + // function is defined in out.go and simply returns its argument. See + // issue 7757. + expr = &ast.CallExpr{ + Fun: &ast.Ident{NamePos: (*r.Expr).Pos(), Name: "_Cgo_ptr"}, + Args: []ast.Expr{ast.NewIdent(name.Mangle)}, + } } else if r.Name.Kind == "type" { // Okay - might be new(T) expr = r.Name.Type.Go @@ -928,9 +944,8 @@ type typeConv struct { // Map from types to incomplete pointers to those types. ptrs map[dwarf.Type][]*Type - - // Fields to be processed by godefsField after completing pointers. - todoFlds [][]*ast.Field + // Keys of ptrs in insertion order (deterministic worklist) + ptrKeys []dwarf.Type // Predeclared types. bool ast.Expr @@ -940,9 +955,9 @@ type typeConv struct { float32, float64 ast.Expr complex64, complex128 ast.Expr void ast.Expr - unsafePointer ast.Expr string ast.Expr goVoid ast.Expr // _Ctype_void, denotes C's void + goVoidPtr ast.Expr // unsafe.Pointer or *byte ptrSize int64 intSize int64 @@ -972,10 +987,17 @@ func (c *typeConv) Init(ptrSize, intSize int64) { c.float64 = c.Ident("float64") c.complex64 = c.Ident("complex64") c.complex128 = c.Ident("complex128") - c.unsafePointer = c.Ident("unsafe.Pointer") c.void = c.Ident("void") c.string = c.Ident("string") c.goVoid = c.Ident("_Ctype_void") + + // Normally cgo translates void* to unsafe.Pointer, + // but for historical reasons -cdefs and -godefs use *byte instead. + if *cdefs || *godefs { + c.goVoidPtr = &ast.StarExpr{X: c.byte} + } else { + c.goVoidPtr = c.Ident("unsafe.Pointer") + } } // base strips away qualifiers and typedefs to get the underlying type @@ -1037,29 +1059,22 @@ func (tr *TypeRepr) Set(repr string, fargs ...interface{}) { } // FinishType completes any outstanding type mapping work. -// In particular, it resolves incomplete pointer types and also runs -// godefsFields on any new struct types. +// In particular, it resolves incomplete pointer types. func (c *typeConv) FinishType(pos token.Pos) { // Completing one pointer type might produce more to complete. // Keep looping until they're all done. - for len(c.ptrs) > 0 { - for dtype := range c.ptrs { - // Note Type might invalidate c.ptrs[dtype]. - t := c.Type(dtype, pos) - for _, ptr := range c.ptrs[dtype] { - ptr.Go.(*ast.StarExpr).X = t.Go - ptr.C.Set("%s*", t.C) - } - delete(c.ptrs, dtype) - } - } + for len(c.ptrKeys) > 0 { + dtype := c.ptrKeys[0] + c.ptrKeys = c.ptrKeys[1:] - // Now that pointer types are completed, we can invoke godefsFields - // to rewrite struct definitions. - for _, fld := range c.todoFlds { - godefsFields(fld) + // Note Type might invalidate c.ptrs[dtype]. + t := c.Type(dtype, pos) + for _, ptr := range c.ptrs[dtype] { + ptr.Go.(*ast.StarExpr).X = t.Go + ptr.C.Set("%s*", t.C) + } + c.ptrs[dtype] = nil // retain the map key } - c.todoFlds = nil } // Type returns a *Type with the same memory layout as @@ -1072,12 +1087,6 @@ func (c *typeConv) Type(dtype dwarf.Type, pos token.Pos) *Type { return t } - // clang won't generate DW_AT_byte_size for pointer types, - // so we have to fix it here. - if dt, ok := base(dtype).(*dwarf.PtrType); ok && dt.ByteSize == -1 { - dt.ByteSize = c.ptrSize - } - t := new(Type) t.Size = dtype.Size() // note: wrong for array of pointers, corrected below t.Align = -1 @@ -1101,12 +1110,20 @@ func (c *typeConv) Type(dtype dwarf.Type, pos token.Pos) *Type { t.Go = c.Opaque(t.Size) break } + count := dt.Count + if count == -1 { + // Indicates flexible array member, which Go doesn't support. + // Translate to zero-length array instead. + count = 0 + } sub := c.Type(dt.Type, pos) t.Align = sub.Align t.Go = &ast.ArrayType{ - Len: c.intExpr(dt.Count), + Len: c.intExpr(count), Elt: sub.Go, } + // Recalculate t.Size now that we know sub.Size. + t.Size = count * sub.Size t.C.Set("__typeof__(%s[%d])", sub.C, dt.Count) case *dwarf.BoolType: @@ -1207,11 +1224,15 @@ func (c *typeConv) Type(dtype dwarf.Type, pos token.Pos) *Type { } case *dwarf.PtrType: + // Clang doesn't emit DW_AT_byte_size for pointer types. + if t.Size != c.ptrSize && t.Size != -1 { + fatalf("%s: unexpected: %d-byte pointer type - %s", lineno(pos), t.Size, dtype) + } + t.Size = c.ptrSize t.Align = c.ptrSize - // Translate void* as unsafe.Pointer if _, ok := base(dt.Type).(*dwarf.VoidType); ok { - t.Go = c.unsafePointer + t.Go = c.goVoidPtr t.C.Set("void*") break } @@ -1219,6 +1240,9 @@ func (c *typeConv) Type(dtype dwarf.Type, pos token.Pos) *Type { // Placeholder initialization; completed in FinishType. t.Go = &ast.StarExpr{} t.C.Set("<incomplete>*") + if _, ok := c.ptrs[dt.Type]; !ok { + c.ptrKeys = append(c.ptrKeys, dt.Type) + } c.ptrs[dt.Type] = append(c.ptrs[dt.Type], t) case *dwarf.QualType: @@ -1379,34 +1403,24 @@ func (c *typeConv) Type(dtype dwarf.Type, pos token.Pos) *Type { } } - if t.Size <= 0 { - // Clang does not record the size of a pointer in its DWARF entry, - // so if dtype is an array, the call to dtype.Size at the top of the function - // computed the size as the array length * 0 = 0. - // The type switch called Type (this function) recursively on the pointer - // entry, and the code near the top of the function updated the size to - // be correct, so calling dtype.Size again will produce the correct value. - t.Size = dtype.Size() - if t.Size < 0 { - // Unsized types are [0]byte, unless they're typedefs of other types - // or structs with tags. - // if so, use the name we've already defined. - t.Size = 0 - switch dt := dtype.(type) { - case *dwarf.TypedefType: - // ok - case *dwarf.StructType: - if dt.StructName != "" { - break - } - t.Go = c.Opaque(0) - default: - t.Go = c.Opaque(0) - } - if t.C.Empty() { - t.C.Set("void") + if t.Size < 0 { + // Unsized types are [0]byte, unless they're typedefs of other types + // or structs with tags. + // if so, use the name we've already defined. + t.Size = 0 + switch dt := dtype.(type) { + case *dwarf.TypedefType: + // ok + case *dwarf.StructType: + if dt.StructName != "" { + break } - return t + t.Go = c.Opaque(0) + default: + t.Go = c.Opaque(0) + } + if t.C.Empty() { + t.C.Set("void") } } @@ -1538,6 +1552,9 @@ func (c *typeConv) pad(fld []*ast.Field, size int64) []*ast.Field { // Struct conversion: return Go and (6g) C syntax for type. func (c *typeConv) Struct(dt *dwarf.StructType, pos token.Pos) (expr *ast.StructType, csyntax string, align int64) { + // Minimum alignment for a struct is 1 byte. + align = 1 + var buf bytes.Buffer buf.WriteString("struct {") fld := make([]*ast.Field, 0, 2*len(dt.Field)+1) // enough for padding around every field @@ -1579,7 +1596,27 @@ func (c *typeConv) Struct(dt *dwarf.StructType, pos token.Pos) (expr *ast.Struct fld = c.pad(fld, f.ByteOffset-off) off = f.ByteOffset } - t := c.Type(f.Type, pos) + + name := f.Name + ft := f.Type + + // In godefs or cdefs mode, if this field is a C11 + // anonymous union then treat the first field in the + // union as the field in the struct. This handles + // cases like the glibc <sys/resource.h> file; see + // issue 6677. + if *godefs || *cdefs { + if st, ok := f.Type.(*dwarf.StructType); ok && name == "" && st.Kind == "union" && len(st.Field) > 0 && !used[st.Field[0].Name] { + name = st.Field[0].Name + ident[name] = name + ft = st.Field[0].Type + } + } + + // TODO: Handle fields that are anonymous structs by + // promoting the fields of the inner struct. + + t := c.Type(ft, pos) tgo := t.Go size := t.Size talign := t.Align @@ -1598,17 +1635,18 @@ func (c *typeConv) Struct(dt *dwarf.StructType, pos token.Pos) (expr *ast.Struct talign = size } - if talign > 0 && f.ByteOffset%talign != 0 { + if talign > 0 && f.ByteOffset%talign != 0 && !*cdefs { // Drop misaligned fields, the same way we drop integer bit fields. // The goal is to make available what can be made available. // Otherwise one bad and unneeded field in an otherwise okay struct // makes the whole program not compile. Much of the time these // structs are in system headers that cannot be corrected. + // Exception: In -cdefs mode, we use #pragma pack, so misaligned + // fields should still work. continue } n := len(fld) fld = fld[0 : n+1] - name := f.Name if name == "" { name = fmt.Sprintf("anon%d", anon) anon++ @@ -1635,7 +1673,7 @@ func (c *typeConv) Struct(dt *dwarf.StructType, pos token.Pos) (expr *ast.Struct csyntax = buf.String() if *godefs || *cdefs { - c.todoFlds = append(c.todoFlds, fld) + godefsFields(fld) } expr = &ast.StructType{Fields: &ast.FieldList{List: fld}} return @@ -1673,19 +1711,6 @@ func godefsFields(fld []*ast.Field) { n.Name = upper(n.Name) } } - p := &f.Type - t := *p - if star, ok := t.(*ast.StarExpr); ok { - star = &ast.StarExpr{X: star.X} - *p = star - p = &star.X - t = *p - } - if id, ok := t.(*ast.Ident); ok { - if id.Name == "unsafe.Pointer" { - *p = ast.NewIdent("*byte") - } - } } } diff --git a/libgo/go/cmd/cgo/out.go b/libgo/go/cmd/cgo/out.go index 76c7247af0d..d92bed9bf01 100644 --- a/libgo/go/cmd/cgo/out.go +++ b/libgo/go/cmd/cgo/out.go @@ -44,6 +44,7 @@ func (p *Package) writeDefs() { fmt.Fprintf(fm, "int main() { return 0; }\n") if *importRuntimeCgo { fmt.Fprintf(fm, "void crosscall2(void(*fn)(void*, int), void *a, int c) { }\n") + fmt.Fprintf(fm, "char* _cgo_topofstack(void) { return (char*)0; }\n") } else { // If we're not importing runtime/cgo, we *are* runtime/cgo, // which provides crosscall2. We just need a prototype. @@ -58,16 +59,14 @@ func (p *Package) writeDefs() { fmt.Fprintf(fgo2, "// Created by cgo - DO NOT EDIT\n\n") fmt.Fprintf(fgo2, "package %s\n\n", p.PackageName) fmt.Fprintf(fgo2, "import \"unsafe\"\n\n") - if *importSyscall { - fmt.Fprintf(fgo2, "import \"syscall\"\n\n") - } if !*gccgo && *importRuntimeCgo { fmt.Fprintf(fgo2, "import _ \"runtime/cgo\"\n\n") } - fmt.Fprintf(fgo2, "type _ unsafe.Pointer\n\n") if *importSyscall { - fmt.Fprintf(fgo2, "func _Cerrno(dst *error, x int32) { *dst = syscall.Errno(x) }\n") + fmt.Fprintf(fgo2, "import \"syscall\"\n\n") + fmt.Fprintf(fgo2, "var _ syscall.Errno\n") } + fmt.Fprintf(fgo2, "func _Cgo_ptr(ptr unsafe.Pointer) unsafe.Pointer { return ptr }\n\n") typedefNames := make([]string, 0, len(typedef)) for name := range typedef { @@ -87,9 +86,10 @@ func (p *Package) writeDefs() { } if *gccgo { - fmt.Fprintf(fc, p.cPrologGccgo()) + fmt.Fprint(fc, p.cPrologGccgo()) } else { - fmt.Fprintf(fc, cProlog) + fmt.Fprint(fc, cProlog) + fmt.Fprint(fgo2, goProlog) } gccgoSymbolPrefix := p.gccgoSymbolPrefix() @@ -130,6 +130,7 @@ func (p *Package) writeDefs() { fmt.Fprintf(fc, `extern void *%s __asm__("%s.%s");`, n.Mangle, gccgoSymbolPrefix, n.Mangle) fmt.Fprintf(&gccgoInit, "\t%s = %s%s;\n", n.Mangle, amp, n.C) } else { + fmt.Fprintf(fc, "#pragma dataflag NOPTR /* C pointer, not heap pointer */ \n") fmt.Fprintf(fc, "void *·%s = %s%s;\n", n.Mangle, amp, n.C) } fmt.Fprintf(fc, "\n") @@ -296,10 +297,6 @@ func (p *Package) structType(n *Name) (string, int64) { fmt.Fprintf(&buf, "\t\tchar __pad%d[%d];\n", off, pad) off += pad } - if n.AddError { - fmt.Fprint(&buf, "\t\tint e[2*sizeof(void *)/sizeof(int)]; /* error */\n") - off += 2 * p.PtrSize - } if off == 0 { fmt.Fprintf(&buf, "\t\tchar unused;\n") // avoid empty struct } @@ -334,19 +331,18 @@ func (p *Package) writeDefsFunc(fc, fgo2 *os.File, n *Name) { } // Builtins defined in the C prolog. - inProlog := name == "CString" || name == "GoString" || name == "GoStringN" || name == "GoBytes" || name == "_CMalloc" + inProlog := builtinDefs[name] != "" + cname := fmt.Sprintf("_cgo%s%s", cPrefix, n.Mangle) + paramnames := []string(nil) + for i, param := range d.Type.Params.List { + paramName := fmt.Sprintf("p%d", i) + param.Names = []*ast.Ident{ast.NewIdent(paramName)} + paramnames = append(paramnames, paramName) + } if *gccgo { // Gccgo style hooks. fmt.Fprint(fgo2, "\n") - cname := fmt.Sprintf("_cgo%s%s", cPrefix, n.Mangle) - paramnames := []string(nil) - for i, param := range d.Type.Params.List { - paramName := fmt.Sprintf("p%d", i) - param.Names = []*ast.Ident{ast.NewIdent(paramName)} - paramnames = append(paramnames, paramName) - } - conf.Fprint(fgo2, fset, d) fmt.Fprint(fgo2, " {\n") if !inProlog { @@ -383,7 +379,7 @@ func (p *Package) writeDefsFunc(fc, fgo2 *os.File, n *Name) { fmt.Fprint(fgo2, "}\n") // declare the C function. - fmt.Fprintf(fgo2, "//extern _cgo%s%s\n", cPrefix, n.Mangle) + fmt.Fprintf(fgo2, "//extern %s\n", cname) d.Name = ast.NewIdent(cname) if n.AddError { l := d.Type.Results.List @@ -394,61 +390,50 @@ func (p *Package) writeDefsFunc(fc, fgo2 *os.File, n *Name) { return } - conf.Fprint(fgo2, fset, d) - fmt.Fprint(fgo2, "\n") if inProlog { + fmt.Fprint(fgo2, builtinDefs[name]) return } - var argSize int64 - _, argSize = p.structType(n) - // C wrapper calls into gcc, passing a pointer to the argument frame. - fmt.Fprintf(fc, "#pragma cgo_import_static _cgo%s%s\n", cPrefix, n.Mangle) - fmt.Fprintf(fc, "void _cgo%s%s(void*);\n", cPrefix, n.Mangle) - fmt.Fprintf(fc, "\n") - fmt.Fprintf(fc, "void\n") - if argSize == 0 { - argSize++ + fmt.Fprintf(fc, "#pragma cgo_import_static %s\n", cname) + fmt.Fprintf(fc, "void %s(void*);\n", cname) + fmt.Fprintf(fc, "#pragma dataflag NOPTR\n") + fmt.Fprintf(fc, "void *·%s = %s;\n", cname, cname) + + nret := 0 + if !void { + d.Type.Results.List[0].Names = []*ast.Ident{ast.NewIdent("r1")} + nret = 1 } - // TODO(rsc): The struct here should declare pointers only where - // there are pointers in the actual argument frame. - // This is a workaround for golang.org/issue/6397. - fmt.Fprintf(fc, "·%s(struct{", n.Mangle) - if n := argSize / p.PtrSize; n > 0 { - fmt.Fprintf(fc, "void *y[%d];", n) + if n.AddError { + d.Type.Results.List[nret].Names = []*ast.Ident{ast.NewIdent("r2")} } - if n := argSize % p.PtrSize; n > 0 { - fmt.Fprintf(fc, "uint8 x[%d];", n) + + fmt.Fprint(fgo2, "\n") + fmt.Fprintf(fgo2, "var %s unsafe.Pointer\n", cname) + conf.Fprint(fgo2, fset, d) + fmt.Fprint(fgo2, " {\n") + + // NOTE: Using uintptr to hide from escape analysis. + arg := "0" + if len(paramnames) > 0 { + arg = "uintptr(unsafe.Pointer(&p0))" + } else if !void { + arg = "uintptr(unsafe.Pointer(&r1))" } - fmt.Fprintf(fc, "}p)\n") - fmt.Fprintf(fc, "{\n") - fmt.Fprintf(fc, "\truntime·cgocall(_cgo%s%s, &p);\n", cPrefix, n.Mangle) + + prefix := "" if n.AddError { - // gcc leaves errno in first word of interface at end of p. - // check whether it is zero; if so, turn interface into nil. - // if not, turn interface into errno. - // Go init function initializes ·_Cerrno with an os.Errno - // for us to copy. - fmt.Fprintln(fc, ` { - int32 e; - void **v; - v = (void**)(&p+1) - 2; /* v = final two void* of p */ - e = *(int32*)v; - v[0] = (void*)0xdeadbeef; - v[1] = (void*)0xdeadbeef; - if(e == 0) { - /* nil interface */ - v[0] = 0; - v[1] = 0; - } else { - ·_Cerrno(v, e); /* fill in v as error for errno e */ - } - }`) + prefix = "errno := " } - fmt.Fprintf(fc, "}\n") - fmt.Fprintf(fc, "\n") + fmt.Fprintf(fgo2, "\t%s_cgo_runtime_cgocall_errno(%s, %s)\n", prefix, cname, arg) + if n.AddError { + fmt.Fprintf(fgo2, "\tif errno != 0 { r2 = syscall.Errno(errno) }\n") + } + fmt.Fprintf(fgo2, "\treturn\n") + fmt.Fprintf(fgo2, "}\n") } // writeOutput creates stubs for a specific source file to be compiled by 6g @@ -521,7 +506,11 @@ func (p *Package) writeOutputFunc(fgcc *os.File, n *Name) { // Gcc wrapper unpacks the C argument struct // and calls the actual C function. - fmt.Fprintf(fgcc, "void\n") + if n.AddError { + fmt.Fprintf(fgcc, "int\n") + } else { + fmt.Fprintf(fgcc, "void\n") + } fmt.Fprintf(fgcc, "_cgo%s%s(void *v)\n", cPrefix, n.Mangle) fmt.Fprintf(fgcc, "{\n") if n.AddError { @@ -531,9 +520,13 @@ func (p *Package) writeOutputFunc(fgcc *os.File, n *Name) { // Use packed attribute to force no padding in this struct in case // gcc has different packing requirements. fmt.Fprintf(fgcc, "\t%s %v *a = v;\n", ctype, p.packedAttribute()) + if n.FuncType.Result != nil { + // Save the stack top for use below. + fmt.Fprintf(fgcc, "\tchar *stktop = _cgo_topofstack();\n") + } fmt.Fprintf(fgcc, "\t") if t := n.FuncType.Result; t != nil { - fmt.Fprintf(fgcc, "a->r = ") + fmt.Fprintf(fgcc, "__typeof__(a->r) r = ") if c := t.C.String(); c[len(c)-1] == '*' { fmt.Fprint(fgcc, "(__typeof__(a->r)) ") } @@ -556,8 +549,15 @@ func (p *Package) writeOutputFunc(fgcc *os.File, n *Name) { fmt.Fprintf(fgcc, "a->p%d", i) } fmt.Fprintf(fgcc, ");\n") + if n.FuncType.Result != nil { + // The cgo call may have caused a stack copy (via a callback). + // Adjust the return value pointer appropriately. + fmt.Fprintf(fgcc, "\ta = (void*)((char*)a + (_cgo_topofstack() - stktop));\n") + // Save the return value. + fmt.Fprintf(fgcc, "\ta->r = r;\n") + } if n.AddError { - fmt.Fprintf(fgcc, "\t*(int*)(a->e) = errno;\n") + fmt.Fprintf(fgcc, "\treturn errno;\n") } fmt.Fprintf(fgcc, "}\n") fmt.Fprintf(fgcc, "\n") @@ -1016,7 +1016,7 @@ func forFieldList(fl *ast.FieldList, fn func(int, ast.Expr)) { fn(i, r.Type) i++ } else { - for _ = range r.Names { + for range r.Names { fn(i, r.Type) i++ } @@ -1143,21 +1143,17 @@ __cgo_size_assert(__cgo_long_long, 8) __cgo_size_assert(float, 4) __cgo_size_assert(double, 8) +extern char* _cgo_topofstack(void); + #include <errno.h> #include <string.h> ` const builtinProlog = ` -#include <sys/types.h> /* for size_t below */ +#include <stddef.h> /* for ptrdiff_t and size_t below */ /* Define intgo when compiling with GCC. */ -#ifdef __PTRDIFF_TYPE__ -typedef __PTRDIFF_TYPE__ intgo; -#elif defined(_LP64) -typedef long long intgo; -#else -typedef int intgo; -#endif +typedef ptrdiff_t intgo; typedef struct { char *p; intgo n; } _GoString_; typedef struct { char *p; intgo n; intgo c; } _GoBytes_; @@ -1171,47 +1167,86 @@ void *_CMalloc(size_t); const cProlog = ` #include "runtime.h" #include "cgocall.h" +#include "textflag.h" + +#pragma dataflag NOPTR +static void *cgocall_errno = runtime·cgocall_errno; +#pragma dataflag NOPTR +void *·_cgo_runtime_cgocall_errno = &cgocall_errno; + +#pragma dataflag NOPTR +static void *runtime_gostring = runtime·gostring; +#pragma dataflag NOPTR +void *·_cgo_runtime_gostring = &runtime_gostring; + +#pragma dataflag NOPTR +static void *runtime_gostringn = runtime·gostringn; +#pragma dataflag NOPTR +void *·_cgo_runtime_gostringn = &runtime_gostringn; + +#pragma dataflag NOPTR +static void *runtime_gobytes = runtime·gobytes; +#pragma dataflag NOPTR +void *·_cgo_runtime_gobytes = &runtime_gobytes; + +#pragma dataflag NOPTR +static void *runtime_cmalloc = runtime·cmalloc; +#pragma dataflag NOPTR +void *·_cgo_runtime_cmalloc = &runtime_cmalloc; void ·_Cerrno(void*, int32); +` -void -·_Cfunc_GoString(int8 *p, String s) -{ - s = runtime·gostring((byte*)p); - FLUSH(&s); +const goProlog = ` +var _cgo_runtime_cgocall_errno func(unsafe.Pointer, uintptr) int32 +var _cgo_runtime_cmalloc func(uintptr) unsafe.Pointer +` + +const goStringDef = ` +var _cgo_runtime_gostring func(*_Ctype_char) string +func _Cfunc_GoString(p *_Ctype_char) string { + return _cgo_runtime_gostring(p) } +` -void -·_Cfunc_GoStringN(int8 *p, int32 l, String s) -{ - s = runtime·gostringn((byte*)p, l); - FLUSH(&s); +const goStringNDef = ` +var _cgo_runtime_gostringn func(*_Ctype_char, int) string +func _Cfunc_GoStringN(p *_Ctype_char, l _Ctype_int) string { + return _cgo_runtime_gostringn(p, int(l)) } +` -void -·_Cfunc_GoBytes(int8 *p, int32 l, Slice s) -{ - s = runtime·gobytes((byte*)p, l); - FLUSH(&s); +const goBytesDef = ` +var _cgo_runtime_gobytes func(unsafe.Pointer, int) []byte +func _Cfunc_GoBytes(p unsafe.Pointer, l _Ctype_int) []byte { + return _cgo_runtime_gobytes(p, int(l)) } +` -void -·_Cfunc_CString(String s, int8 *p) -{ - p = runtime·cmalloc(s.len+1); - runtime·memmove((byte*)p, s.str, s.len); - p[s.len] = 0; - FLUSH(&p); +const cStringDef = ` +func _Cfunc_CString(s string) *_Ctype_char { + p := _cgo_runtime_cmalloc(uintptr(len(s)+1)) + pp := (*[1<<30]byte)(p) + copy(pp[:], s) + pp[len(s)] = 0 + return (*_Ctype_char)(p) } +` -void -·_Cfunc__CMalloc(uintptr n, int8 *p) -{ - p = runtime·cmalloc(n); - FLUSH(&p); +const cMallocDef = ` +func _Cfunc__CMalloc(n _Ctype_size_t) unsafe.Pointer { + return _cgo_runtime_cmalloc(uintptr(n)) } ` +var builtinDefs = map[string]string{ + "GoString": goStringDef, + "GoStringN": goStringNDef, + "GoBytes": goBytesDef, + "CString": cStringDef, + "_CMalloc": cMallocDef, +} + func (p *Package) cPrologGccgo() string { return strings.Replace(cPrologGccgo, "PREFIX", cPrefix, -1) } diff --git a/libgo/go/cmd/go/build.go b/libgo/go/cmd/go/build.go index cead0faa97d..95b9804d3de 100644 --- a/libgo/go/cmd/go/build.go +++ b/libgo/go/cmd/go/build.go @@ -57,6 +57,7 @@ and test commands: -a force rebuilding of packages that are already up-to-date. + In Go releases, does not apply to the standard library. -n print the commands but do not run them. -p n @@ -64,7 +65,7 @@ and test commands: The default is the number of CPUs available. -race enable data race detection. - Supported only on linux/amd64, darwin/amd64 and windows/amd64. + Supported only on linux/amd64, freebsd/amd64, darwin/amd64 and windows/amd64. -v print the names of packages as they are compiled. -work @@ -291,23 +292,26 @@ func runBuild(cmd *Command, args []string) { } } + depMode := modeBuild + if buildI { + depMode = modeInstall + } + if *buildO != "" { if len(pkgs) > 1 { fatalf("go build: cannot use -o with multiple packages") + } else if len(pkgs) == 0 { + fatalf("no packages to build") } p := pkgs[0] p.target = "" // must build - not up to date - a := b.action(modeInstall, modeBuild, p) + a := b.action(modeInstall, depMode, p) a.target = *buildO b.do(a) return } a := &action{} - depMode := modeBuild - if buildI { - depMode = modeInstall - } for _, p := range packages(args) { a.deps = append(a.deps, b.action(modeBuild, depMode, p)) } @@ -438,12 +442,11 @@ const ( ) var ( - goroot = filepath.Clean(runtime.GOROOT()) - gobin = os.Getenv("GOBIN") - gorootBin = filepath.Join(goroot, "bin") - gorootSrcPkg = filepath.Join(goroot, "src/pkg") - gorootPkg = filepath.Join(goroot, "pkg") - gorootSrc = filepath.Join(goroot, "src") + goroot = filepath.Clean(runtime.GOROOT()) + gobin = os.Getenv("GOBIN") + gorootBin = filepath.Join(goroot, "bin") + gorootPkg = filepath.Join(goroot, "pkg") + gorootSrc = filepath.Join(goroot, "src") ) func (b *builder) init() { @@ -510,8 +513,13 @@ func goFilesPackage(gofiles []string) *Package { } ctxt.ReadDir = func(string) ([]os.FileInfo, error) { return dirent, nil } - if !filepath.IsAbs(dir) { - dir = filepath.Join(cwd, dir) + var err error + if dir == "" { + dir = cwd + } + dir, err = filepath.Abs(dir) + if err != nil { + fatalf("%s", err) } bp, err := ctxt.ImportDir(dir, 0) @@ -833,12 +841,17 @@ func (b *builder) build(a *action) (err error) { } } - var gofiles, cfiles, sfiles, objects, cgoObjects []string + var gofiles, cfiles, sfiles, objects, cgoObjects, pcCFLAGS, pcLDFLAGS []string gofiles = append(gofiles, a.p.GoFiles...) cfiles = append(cfiles, a.p.CFiles...) sfiles = append(sfiles, a.p.SFiles...) + if a.p.usesCgo() || a.p.usesSwig() { + if pcCFLAGS, pcLDFLAGS, err = b.getPkgConfigFlags(a.p); err != nil { + return + } + } // Run cgo. if a.p.usesCgo() { // In a package using cgo, cgo compiles the C, C++ and assembly files with gcc. @@ -869,7 +882,7 @@ func (b *builder) build(a *action) (err error) { if a.cgo != nil && a.cgo.target != "" { cgoExe = a.cgo.target } - outGo, outObj, err := b.cgo(a.p, cgoExe, obj, gccfiles, a.p.CXXFiles, a.p.MFiles) + outGo, outObj, err := b.cgo(a.p, cgoExe, obj, pcCFLAGS, pcLDFLAGS, gccfiles, a.p.CXXFiles, a.p.MFiles) if err != nil { return err } @@ -882,9 +895,18 @@ func (b *builder) build(a *action) (err error) { // In a package using SWIG, any .c or .s files are // compiled with gcc. gccfiles := append(cfiles, sfiles...) + cxxfiles, mfiles := a.p.CXXFiles, a.p.MFiles cfiles = nil sfiles = nil - outGo, outObj, err := b.swig(a.p, obj, gccfiles, a.p.CXXFiles, a.p.MFiles) + + // Don't build c/c++ files twice if cgo is enabled (mainly for pkg-config). + if a.p.usesCgo() { + cxxfiles = nil + gccfiles = nil + mfiles = nil + } + + outGo, outObj, err := b.swig(a.p, obj, pcCFLAGS, gccfiles, cxxfiles, mfiles) if err != nil { return err } @@ -893,7 +915,7 @@ func (b *builder) build(a *action) (err error) { } if len(gofiles) == 0 { - return &build.NoGoError{a.p.Dir} + return &build.NoGoError{Dir: a.p.Dir} } // If we're doing coverage, preprocess the .go files and put them in the work directory @@ -1028,6 +1050,34 @@ func (b *builder) build(a *action) (err error) { return nil } +// Calls pkg-config if needed and returns the cflags/ldflags needed to build the package. +func (b *builder) getPkgConfigFlags(p *Package) (cflags, ldflags []string, err error) { + if pkgs := p.CgoPkgConfig; len(pkgs) > 0 { + var out []byte + out, err = b.runOut(p.Dir, p.ImportPath, nil, "pkg-config", "--cflags", pkgs) + if err != nil { + b.showOutput(p.Dir, "pkg-config --cflags "+strings.Join(pkgs, " "), string(out)) + b.print(err.Error() + "\n") + err = errPrintedOutput + return + } + if len(out) > 0 { + cflags = strings.Fields(string(out)) + } + out, err = b.runOut(p.Dir, p.ImportPath, nil, "pkg-config", "--libs", pkgs) + if err != nil { + b.showOutput(p.Dir, "pkg-config --libs "+strings.Join(pkgs, " "), string(out)) + b.print(err.Error() + "\n") + err = errPrintedOutput + return + } + if len(out) > 0 { + ldflags = strings.Fields(string(out)) + } + } + return +} + // install is the action for installing a single package or executable. func (b *builder) install(a *action) (err error) { defer func() { @@ -1263,7 +1313,7 @@ func (b *builder) showcmd(dir string, format string, args ...interface{}) { // the source directory for the package that has failed to build. // showOutput rewrites mentions of dir with a relative path to dir // when the relative path is shorter. This is usually more pleasant. -// For example, if fmt doesn't compile and we are in src/pkg/html, +// For example, if fmt doesn't compile and we are in src/html, // the output is // // $ go build @@ -1275,7 +1325,7 @@ func (b *builder) showcmd(dir string, format string, args ...interface{}) { // // $ go build // # fmt -// /usr/gopher/go/src/pkg/fmt/print.go:1090: undefined: asdf +// /usr/gopher/go/src/fmt/print.go:1090: undefined: asdf // $ // // showOutput also replaces references to the work directory with $WORK. @@ -1435,6 +1485,14 @@ func (b *builder) runOut(dir string, desc string, env []string, cmdargs ...inter continue } + // err can be something like 'exit status 1'. + // Add information about what program was running. + // Note that if buf.Bytes() is non-empty, the caller usually + // shows buf.Bytes() and does not print err at all, so the + // prefix here does not make most output any more verbose. + if err != nil { + err = errors.New(cmdline[0] + ": " + err.Error()) + } return buf.Bytes(), err } } @@ -1597,7 +1655,7 @@ func (gcToolchain) gc(b *builder, p *Package, archive, obj string, importArgs [] extFiles := len(p.CgoFiles) + len(p.CFiles) + len(p.CXXFiles) + len(p.MFiles) + len(p.SFiles) + len(p.SysoFiles) + len(p.SwigFiles) + len(p.SwigCXXFiles) if p.Standard { switch p.ImportPath { - case "os", "runtime/pprof", "sync", "time": + case "bytes", "net", "os", "runtime/pprof", "sync", "time": extFiles++ } } @@ -1621,8 +1679,10 @@ func (gcToolchain) gc(b *builder, p *Package, archive, obj string, importArgs [] } func (gcToolchain) asm(b *builder, p *Package, obj, ofile, sfile string) error { + // Add -I pkg/GOOS_GOARCH so #include "textflag.h" works in .s files. + inc := filepath.Join(goroot, "pkg", fmt.Sprintf("%s_%s", goos, goarch)) sfile = mkAbs(p.Dir, sfile) - return b.run(p.Dir, p.ImportPath, nil, tool(archChar+"a"), "-trimpath", b.work, "-I", obj, "-o", ofile, "-D", "GOOS_"+goos, "-D", "GOARCH_"+goarch, sfile) + return b.run(p.Dir, p.ImportPath, nil, tool(archChar+"a"), "-trimpath", b.work, "-I", obj, "-I", inc, "-o", ofile, "-D", "GOOS_"+goos, "-D", "GOARCH_"+goarch, sfile) } func (gcToolchain) pkgpath(basedir string, p *Package) string { @@ -1716,7 +1776,7 @@ func packInternal(b *builder, afile string, ofiles []string) error { func (gcToolchain) ld(b *builder, p *Package, out string, allactions []*action, mainpkg string, ofiles []string) error { importArgs := b.includeArgs("-L", allactions) - cxx := false + cxx := len(p.CXXFiles) > 0 for _, a := range allactions { if a.p != nil && len(a.p.CXXFiles) > 0 { cxx = true @@ -1776,7 +1836,15 @@ func (gcToolchain) ld(b *builder, p *Package, out string, allactions []*action, func (gcToolchain) cc(b *builder, p *Package, objdir, ofile, cfile string) error { inc := filepath.Join(goroot, "pkg", fmt.Sprintf("%s_%s", goos, goarch)) cfile = mkAbs(p.Dir, cfile) - args := stringList(tool(archChar+"c"), "-F", "-V", "-w", "-trimpath", b.work, "-I", objdir, "-I", inc, "-o", ofile, buildCcflags, "-D", "GOOS_"+goos, "-D", "GOARCH_"+goarch, cfile) + warn := []string{"-w"} + if p.usesSwig() { + // When using SWIG, this compiler is only used to + // compile the C files generated by SWIG. + // We don't want warnings. + // See issue 9065 for details. + warn = nil + } + args := stringList(tool(archChar+"c"), "-F", "-V", warn, "-trimpath", b.work, "-I", objdir, "-I", inc, "-o", ofile, buildCcflags, "-D", "GOOS_"+goos, "-D", "GOARCH_"+goarch, cfile) return b.run(p.Dir, p.ImportPath, nil, args) } @@ -1802,7 +1870,7 @@ func (gccgoToolchain) linker() string { } func (tools gccgoToolchain) gc(b *builder, p *Package, archive, obj string, importArgs []string, gofiles []string) (ofile string, output []byte, err error) { - out := p.Name + ".o" + out := "_go_.o" ofile = obj + out gcargs := []string{"-g"} gcargs = append(gcargs, b.gccArchArgs()...) @@ -1828,6 +1896,7 @@ func (tools gccgoToolchain) asm(b *builder, p *Package, obj, ofile, sfile string defs = append(defs, `-D`, `GOPKGPATH="`+pkgpath+`"`) } defs = append(defs, b.gccArchArgs()...) + return b.run(p.Dir, p.ImportPath, nil, tools.compiler(), "-c", "-I", obj, "-o", ofile, defs, sfile) } @@ -1854,8 +1923,8 @@ func (tools gccgoToolchain) ld(b *builder, p *Package, out string, allactions [] ldflags := b.gccArchArgs() cgoldflags := []string{} usesCgo := false - cxx := false - objc := false + cxx := len(p.CXXFiles) > 0 + objc := len(p.MFiles) > 0 // Prefer the output of an install action to the output of a build action, // because the install action will delete the output of the build action. @@ -1917,8 +1986,7 @@ func (gccgoToolchain) cc(b *builder, p *Package, objdir, ofile, cfile string) er if pkgpath := gccgoCleanPkgpath(p); pkgpath != "" { defs = append(defs, `-D`, `GOPKGPATH="`+pkgpath+`"`) } - // TODO: Support using clang here (during gccgo build)? - return b.run(p.Dir, p.ImportPath, nil, "gcc", "-Wall", "-g", + return b.run(p.Dir, p.ImportPath, nil, envList("CC", defaultCC), "-Wall", "-g", "-I", objdir, "-I", inc, "-o", ofile, defs, "-c", cfile) } @@ -1969,9 +2037,9 @@ func (b *builder) libgcc(p *Package) (string, error) { return "$LIBGCC", nil } - // clang might not be able to find libgcc, and in that case, + // The compiler might not be able to find libgcc, and in that case, // it will simply return "libgcc.a", which is of no use to us. - if strings.Contains(gccCmd[0], "clang") && !filepath.IsAbs(string(f)) { + if !filepath.IsAbs(string(f)) { return "", nil } @@ -2109,36 +2177,16 @@ var ( cgoLibGccFileOnce sync.Once ) -func (b *builder) cgo(p *Package, cgoExe, obj string, gccfiles, gxxfiles, mfiles []string) (outGo, outObj []string, err error) { +func (b *builder) cgo(p *Package, cgoExe, obj string, pcCFLAGS, pcLDFLAGS, gccfiles, gxxfiles, mfiles []string) (outGo, outObj []string, err error) { cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, cgoLDFLAGS := b.cflags(p, true) _, cgoexeCFLAGS, _, _ := b.cflags(p, false) - + cgoCPPFLAGS = append(cgoCPPFLAGS, pcCFLAGS...) + cgoLDFLAGS = append(cgoLDFLAGS, pcLDFLAGS...) // If we are compiling Objective-C code, then we need to link against libobjc if len(mfiles) > 0 { cgoLDFLAGS = append(cgoLDFLAGS, "-lobjc") } - if pkgs := p.CgoPkgConfig; len(pkgs) > 0 { - out, err := b.runOut(p.Dir, p.ImportPath, nil, "pkg-config", "--cflags", pkgs) - if err != nil { - b.showOutput(p.Dir, "pkg-config --cflags "+strings.Join(pkgs, " "), string(out)) - b.print(err.Error() + "\n") - return nil, nil, errPrintedOutput - } - if len(out) > 0 { - cgoCPPFLAGS = append(cgoCPPFLAGS, strings.Fields(string(out))...) - } - out, err = b.runOut(p.Dir, p.ImportPath, nil, "pkg-config", "--libs", pkgs) - if err != nil { - b.showOutput(p.Dir, "pkg-config --libs "+strings.Join(pkgs, " "), string(out)) - b.print(err.Error() + "\n") - return nil, nil, errPrintedOutput - } - if len(out) > 0 { - cgoLDFLAGS = append(cgoLDFLAGS, strings.Fields(string(out))...) - } - } - // Allows including _cgo_export.h from .[ch] files in the package. cgoCPPFLAGS = append(cgoCPPFLAGS, "-I", obj) @@ -2215,6 +2263,14 @@ func (b *builder) cgo(p *Package, cgoExe, obj string, gccfiles, gxxfiles, mfiles strings.HasSuffix(f, ".so"), strings.HasSuffix(f, ".dll"): continue + // Remove any -fsanitize=foo flags. + // Otherwise the compiler driver thinks that we are doing final link + // and links sanitizer runtime into the object file. But we are not doing + // the final link, we will link the resulting object file again. And + // so the program ends up with two copies of sanitizer runtime. + // See issue 8788 for details. + case strings.HasPrefix(f, "-fsanitize="): + continue default: bareLDFLAGS = append(bareLDFLAGS, f) } @@ -2281,13 +2337,14 @@ func (b *builder) cgo(p *Package, cgoExe, obj string, gccfiles, gxxfiles, mfiles linkobj = append(linkobj, p.SysoFiles...) dynobj := obj + "_cgo_.o" - if goarch == "arm" && goos == "linux" { // we need to use -pie for Linux/ARM to get accurate imported sym + pie := goarch == "arm" && (goos == "linux" || goos == "android") + if pie { // we need to use -pie for Linux/ARM to get accurate imported sym cgoLDFLAGS = append(cgoLDFLAGS, "-pie") } if err := b.gccld(p, dynobj, cgoLDFLAGS, linkobj); err != nil { return nil, nil, err } - if goarch == "arm" && goos == "linux" { // but we don't need -pie for normal cgo programs + if pie { // but we don't need -pie for normal cgo programs cgoLDFLAGS = cgoLDFLAGS[0 : len(cgoLDFLAGS)-1] } @@ -2321,7 +2378,23 @@ func (b *builder) cgo(p *Package, cgoExe, obj string, gccfiles, gxxfiles, mfiles nonGccObjs = append(nonGccObjs, f) } } - if err := b.gccld(p, ofile, stringList(bareLDFLAGS, "-Wl,-r", "-nostdlib", staticLibs), gccObjs); err != nil { + ldflags := stringList(bareLDFLAGS, "-Wl,-r", "-nostdlib", staticLibs) + + // Some systems, such as Ubuntu, always add --build-id to + // every link, but we don't want a build ID since we are + // producing an object file. On some of those system a plain + // -r (not -Wl,-r) will turn off --build-id, but clang 3.0 + // doesn't support a plain -r. I don't know how to turn off + // --build-id when using clang other than passing a trailing + // --build-id=none. So that is what we do, but only on + // systems likely to support it, which is to say, systems that + // normally use gold or the GNU linker. + switch goos { + case "android", "dragonfly", "linux", "netbsd": + ldflags = append(ldflags, "-Wl,--build-id=none") + } + + if err := b.gccld(p, ofile, ldflags, gccObjs); err != nil { return nil, nil, err } @@ -2336,7 +2409,7 @@ func (b *builder) cgo(p *Package, cgoExe, obj string, gccfiles, gxxfiles, mfiles // Run SWIG on all SWIG input files. // TODO: Don't build a shared library, once SWIG emits the necessary // pragmas for external linking. -func (b *builder) swig(p *Package, obj string, gccfiles, gxxfiles, mfiles []string) (outGo, outObj []string, err error) { +func (b *builder) swig(p *Package, obj string, pcCFLAGS, gccfiles, gxxfiles, mfiles []string) (outGo, outObj []string, err error) { cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, _ := b.cflags(p, true) cflags := stringList(cgoCPPFLAGS, cgoCFLAGS) cxxflags := stringList(cgoCPPFLAGS, cgoCXXFLAGS) @@ -2377,7 +2450,7 @@ func (b *builder) swig(p *Package, obj string, gccfiles, gxxfiles, mfiles []stri } for _, f := range p.SwigFiles { - goFile, objFile, gccObjFile, err := b.swigOne(p, f, obj, false, intgosize) + goFile, objFile, gccObjFile, err := b.swigOne(p, f, obj, pcCFLAGS, false, intgosize) if err != nil { return nil, nil, err } @@ -2392,7 +2465,7 @@ func (b *builder) swig(p *Package, obj string, gccfiles, gxxfiles, mfiles []stri } } for _, f := range p.SwigCXXFiles { - goFile, objFile, gccObjFile, err := b.swigOne(p, f, obj, true, intgosize) + goFile, objFile, gccObjFile, err := b.swigOne(p, f, obj, pcCFLAGS, true, intgosize) if err != nil { return nil, nil, err } @@ -2471,13 +2544,13 @@ func (b *builder) swigIntSize(obj string) (intsize string, err error) { } // Run SWIG on one SWIG input file. -func (b *builder) swigOne(p *Package, file, obj string, cxx bool, intgosize string) (outGo, outObj, objGccObj string, err error) { +func (b *builder) swigOne(p *Package, file, obj string, pcCFLAGS []string, cxx bool, intgosize string) (outGo, outObj, objGccObj string, err error) { cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, _ := b.cflags(p, true) var cflags []string if cxx { - cflags = stringList(cgoCPPFLAGS, cgoCXXFLAGS) + cflags = stringList(cgoCPPFLAGS, pcCFLAGS, cgoCXXFLAGS) } else { - cflags = stringList(cgoCPPFLAGS, cgoCFLAGS) + cflags = stringList(cgoCPPFLAGS, pcCFLAGS, cgoCFLAGS) } n := 5 // length of ".swig" @@ -2503,6 +2576,13 @@ func (b *builder) swigOne(p *Package, file, obj string, cxx bool, intgosize stri "-o", obj + gccBase + gccExt, "-outdir", obj, } + + for _, f := range cflags { + if len(f) > 3 && f[:2] == "-I" { + args = append(args, f) + } + } + if gccgo { args = append(args, "-gccgo") if pkgpath := gccgoPkgpath(p); pkgpath != "" { @@ -2575,8 +2655,8 @@ func raceInit() { if !buildRace { return } - if goarch != "amd64" || goos != "linux" && goos != "darwin" && goos != "windows" { - fmt.Fprintf(os.Stderr, "go %s: -race is only supported on linux/amd64, darwin/amd64 and windows/amd64\n", flag.Args()[0]) + if goarch != "amd64" || goos != "linux" && goos != "freebsd" && goos != "darwin" && goos != "windows" { + fmt.Fprintf(os.Stderr, "go %s: -race is only supported on linux/amd64, freebsd/amd64, darwin/amd64 and windows/amd64\n", flag.Args()[0]) os.Exit(2) } buildGcflags = append(buildGcflags, "-race") diff --git a/libgo/go/cmd/go/doc.go b/libgo/go/cmd/go/doc.go index 9840804ce74..d0d8a8a5b2e 100644 --- a/libgo/go/cmd/go/doc.go +++ b/libgo/go/cmd/go/doc.go @@ -19,6 +19,7 @@ The commands are: env print Go environment information fix run go tool fix on packages fmt run gofmt on package sources + generate generate Go files by processing source get download and install packages and dependencies install compile and install packages and dependencies list list packages @@ -75,6 +76,7 @@ and test commands: -a force rebuilding of packages that are already up-to-date. + In Go releases, does not apply to the standard library. -n print the commands but do not run them. -p n @@ -82,7 +84,7 @@ and test commands: The default is the number of CPUs available. -race enable data race detection. - Supported only on linux/amd64, darwin/amd64 and windows/amd64. + Supported only on linux/amd64, freebsd/amd64, darwin/amd64 and windows/amd64. -v print the names of packages as they are compiled. -work @@ -219,11 +221,110 @@ To run gofmt with specific options, run gofmt itself. See also: go fix, go vet. +Generate Go files by processing source + +Usage: + + go generate [-run regexp] [file.go... | packages] + +Generate runs commands described by directives within existing +files. Those commands can run any process but the intent is to +create or update Go source files, for instance by running yacc. + +Go generate is never run automatically by go build, go get, go test, +and so on. It must be run explicitly. + +Go generate scans the file for directives, which are lines of +the form, + + //go:generate command argument... + +(note: no leading spaces and no space in "//go") where command +is the generator to be run, corresponding to an executable file +that can be run locally. It must either be in the shell path +(gofmt), a fully qualified path (/usr/you/bin/mytool), or a +command alias, described below. + +Note that go generate does not parse the file, so lines that look +like directives in comments or multiline strings will be treated +as directives. + +The arguments to the directive are space-separated tokens or +double-quoted strings passed to the generator as individual +arguments when it is run. + +Quoted strings use Go syntax and are evaluated before execution; a +quoted string appears as a single argument to the generator. + +Go generate sets several variables when it runs the generator: + + $GOARCH + The execution architecture (arm, amd64, etc.) + $GOOS + The execution operating system (linux, windows, etc.) + $GOFILE + The base name of the file. + $GOPACKAGE + The name of the package of the file containing the directive. + +Other than variable substitution and quoted-string evaluation, no +special processing such as "globbing" is performed on the command +line. + +As a last step before running the command, any invocations of any +environment variables with alphanumeric names, such as $GOFILE or +$HOME, are expanded throughout the command line. The syntax for +variable expansion is $NAME on all operating systems. Due to the +order of evaluation, variables are expanded even inside quoted +strings. If the variable NAME is not set, $NAME expands to the +empty string. + +A directive of the form, + + //go:generate -command xxx args... + +specifies, for the remainder of this source file only, that the +string xxx represents the command identified by the arguments. This +can be used to create aliases or to handle multiword generators. +For example, + + //go:generate -command yacc go tool yacc + +specifies that the command "yacc" represents the generator +"go tool yacc". + +Generate processes packages in the order given on the command line, +one at a time. If the command line lists .go files, they are treated +as a single package. Within a package, generate processes the +source files in a package in file name order, one at a time. Within +a source file, generate runs generators in the order they appear +in the file, one at a time. + +If any generator returns an error exit status, "go generate" skips +all further processing for that package. + +The generator is run in the package's source directory. + +Go generate accepts one specific flag: + + -run="" + if non-empty, specifies a regular expression to + select directives whose command matches the expression. + +It also accepts the standard build flags -v, -n, and -x. +The -v flag prints the names of packages and files as they are +processed. +The -n flag prints commands that would be executed. +The -x flag prints commands as they are executed. + +For more about specifying packages, see 'go help packages'. + + Download and install packages and dependencies Usage: - go get [-d] [-fix] [-t] [-u] [build flags] [packages] + go get [-d] [-f] [-fix] [-t] [-u] [build flags] [packages] Get downloads and installs the packages named by the import paths, along with their dependencies. @@ -231,6 +332,11 @@ along with their dependencies. The -d flag instructs get to stop after downloading the packages; that is, it instructs get not to install the packages. +The -f flag, valid only when -u is set, forces get -u not to verify that +each package has been checked out from the source control repository +implied by its import path. This can be useful if the source is a local fork +of the original. + The -fix flag instructs get to run the fix tool on the downloaded packages before resolving dependencies or building the code. @@ -291,28 +397,29 @@ syntax of package template. The default output is equivalent to -f '{{.ImportPath}}'. The struct being passed to the template is: type Package struct { - Dir string // directory containing package sources - ImportPath string // import path of package in dir - Name string // package name - Doc string // package documentation string - Target string // install path - Goroot bool // is this package in the Go root? - Standard bool // is this package part of the standard Go library? - Stale bool // would 'go install' do anything for this package? - Root string // Go root or Go path dir containing this package + Dir string // directory containing package sources + ImportPath string // import path of package in dir + ImportComment string // path in import comment on package statement + Name string // package name + Doc string // package documentation string + Target string // install path + Goroot bool // is this package in the Go root? + Standard bool // is this package part of the standard Go library? + Stale bool // would 'go install' do anything for this package? + Root string // Go root or Go path dir containing this package // Source files - GoFiles []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles) - CgoFiles []string // .go sources files that import "C" + GoFiles []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles) + CgoFiles []string // .go sources files that import "C" IgnoredGoFiles []string // .go sources ignored due to build constraints - CFiles []string // .c source files - CXXFiles []string // .cc, .cxx and .cpp source files - MFiles []string // .m source files - HFiles []string // .h, .hh, .hpp and .hxx source files - SFiles []string // .s source files - SwigFiles []string // .swig files - SwigCXXFiles []string // .swigcxx files - SysoFiles []string // .syso object files to add to archive + CFiles []string // .c source files + CXXFiles []string // .cc, .cxx and .cpp source files + MFiles []string // .m source files + HFiles []string // .h, .hh, .hpp and .hxx source files + SFiles []string // .s source files + SwigFiles []string // .swig files + SwigCXXFiles []string // .swigcxx files + SysoFiles []string // .syso object files to add to archive // Cgo directives CgoCFLAGS []string // cgo: flags for C compiler @@ -431,16 +538,23 @@ non-test installation. In addition to the build flags, the flags handled by 'go test' itself are: - -c Compile the test binary to pkg.test but do not run it. - (Where pkg is the last element of the package's import path.) + -c + Compile the test binary to pkg.test but do not run it + (where pkg is the last element of the package's import path). + The file name can be changed with the -o flag. + + -exec xprog + Run the test binary using xprog. The behavior is the same as + in 'go run'. See 'go help run' for details. -i Install packages that are dependencies of the test. Do not run the test. - -exec xprog - Run the test binary using xprog. The behavior is the same as - in 'go run'. See 'go help run' for details. + -o file + Compile the test binary to the named file. + The test still runs (unless -c or -i is specified). + The test binary also accepts flags that control execution of the test; these flags are also accessible by 'go test'. See 'go help testflag' for details. @@ -488,7 +602,7 @@ Usage: Vet runs the Go vet command on the packages named by the import paths. -For more about vet, see 'godoc code.google.com/p/go.tools/cmd/vet'. +For more about vet, see 'godoc golang.org/x/tools/cmd/vet'. For more about specifying packages, see 'go help packages'. To run the vet tool with specific options, run 'go tool vet'. @@ -681,6 +795,11 @@ A few common code hosting sites have special syntax: import "launchpad.net/~user/project/branch" import "launchpad.net/~user/project/branch/sub/directory" + IBM DevOps Services (Git) + + import "hub.jazz.net/git/user/project" + import "hub.jazz.net/git/user/project/sub/directory" + For code hosted on other servers, import paths may either be qualified with the version control type, or the go tool can dynamically fetch the import path over https/http and discover where the code resides @@ -756,7 +875,26 @@ listed in the GOPATH environment variable (see 'go help gopath'). The go command attempts to download the version of the package appropriate for the Go release being used. -Run 'go help install' for more. +Run 'go help get' for more. + +Import path checking + +When the custom import path feature described above redirects to a +known code hosting site, each of the resulting packages has two possible +import paths, using the custom domain or the known hosting site. + +A package statement is said to have an "import comment" if it is immediately +followed (before the next newline) by a comment of one of these two forms: + + package math // import "path" + package math /* import "path" * / + +The go command will refuse to install a package with an import comment +unless it is being referred to by that import path. In this way, import comments +let package authors make sure the custom import path is used and not a +direct path to the underlying code hosting site. + +See https://golang.org/s/go14customimport for details. Description of package lists @@ -812,7 +950,8 @@ single directory, the command is applied to a single synthesized package made up of exactly those files, ignoring any build constraints in those files and ignoring any other files in the directory. -File names that begin with "." or "_" are ignored by the go tool. +Directory and file names that begin with "." or "_" are ignored +by the go tool, as are directories named "testdata". Description of testing flags @@ -844,6 +983,7 @@ control the execution of any test: -blockprofile block.out Write a goroutine blocking profile to the specified file when all tests are complete. + Writes test binary as -c would. -blockprofilerate n Control the detail provided in goroutine blocking profiles by @@ -875,8 +1015,7 @@ control the execution of any test: Sets -cover. -coverprofile cover.out - Write a coverage profile to the specified file after all tests - have passed. + Write a coverage profile to the file after all tests have passed. Sets -cover. -cpu 1,2,4 @@ -886,10 +1025,11 @@ control the execution of any test: -cpuprofile cpu.out Write a CPU profile to the specified file before exiting. + Writes test binary as -c would. -memprofile mem.out - Write a memory profile to the specified file after all tests - have passed. + Write a memory profile to the file after all tests have passed. + Writes test binary as -c would. -memprofilerate n Enable more precise (and expensive) memory profiles by setting diff --git a/libgo/go/cmd/go/generate.go b/libgo/go/cmd/go/generate.go new file mode 100644 index 00000000000..baf4d2b55c1 --- /dev/null +++ b/libgo/go/cmd/go/generate.go @@ -0,0 +1,398 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bufio" + "bytes" + "fmt" + "io" + "log" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +var cmdGenerate = &Command{ + Run: runGenerate, + UsageLine: "generate [-run regexp] [file.go... | packages]", + Short: "generate Go files by processing source", + Long: ` +Generate runs commands described by directives within existing +files. Those commands can run any process but the intent is to +create or update Go source files, for instance by running yacc. + +Go generate is never run automatically by go build, go get, go test, +and so on. It must be run explicitly. + +Go generate scans the file for directives, which are lines of +the form, + + //go:generate command argument... + +(note: no leading spaces and no space in "//go") where command +is the generator to be run, corresponding to an executable file +that can be run locally. It must either be in the shell path +(gofmt), a fully qualified path (/usr/you/bin/mytool), or a +command alias, described below. + +Note that go generate does not parse the file, so lines that look +like directives in comments or multiline strings will be treated +as directives. + +The arguments to the directive are space-separated tokens or +double-quoted strings passed to the generator as individual +arguments when it is run. + +Quoted strings use Go syntax and are evaluated before execution; a +quoted string appears as a single argument to the generator. + +Go generate sets several variables when it runs the generator: + + $GOARCH + The execution architecture (arm, amd64, etc.) + $GOOS + The execution operating system (linux, windows, etc.) + $GOFILE + The base name of the file. + $GOPACKAGE + The name of the package of the file containing the directive. + +Other than variable substitution and quoted-string evaluation, no +special processing such as "globbing" is performed on the command +line. + +As a last step before running the command, any invocations of any +environment variables with alphanumeric names, such as $GOFILE or +$HOME, are expanded throughout the command line. The syntax for +variable expansion is $NAME on all operating systems. Due to the +order of evaluation, variables are expanded even inside quoted +strings. If the variable NAME is not set, $NAME expands to the +empty string. + +A directive of the form, + + //go:generate -command xxx args... + +specifies, for the remainder of this source file only, that the +string xxx represents the command identified by the arguments. This +can be used to create aliases or to handle multiword generators. +For example, + + //go:generate -command yacc go tool yacc + +specifies that the command "yacc" represents the generator +"go tool yacc". + +Generate processes packages in the order given on the command line, +one at a time. If the command line lists .go files, they are treated +as a single package. Within a package, generate processes the +source files in a package in file name order, one at a time. Within +a source file, generate runs generators in the order they appear +in the file, one at a time. + +If any generator returns an error exit status, "go generate" skips +all further processing for that package. + +The generator is run in the package's source directory. + +Go generate accepts one specific flag: + + -run="" + if non-empty, specifies a regular expression to + select directives whose command matches the expression. + +It also accepts the standard build flags -v, -n, and -x. +The -v flag prints the names of packages and files as they are +processed. +The -n flag prints commands that would be executed. +The -x flag prints commands as they are executed. + +For more about specifying packages, see 'go help packages'. + `, +} + +var generateRunFlag string // generate -run flag + +func init() { + addBuildFlags(cmdGenerate) + cmdGenerate.Flag.StringVar(&generateRunFlag, "run", "", "") +} + +func runGenerate(cmd *Command, args []string) { + // Even if the arguments are .go files, this loop suffices. + for _, pkg := range packages(args) { + for _, file := range pkg.gofiles { + if !generate(pkg.Name, file) { + break + } + } + } +} + +// generate runs the generation directives for a single file. +func generate(pkg, absFile string) bool { + fd, err := os.Open(absFile) + if err != nil { + log.Fatalf("generate: %s", err) + } + defer fd.Close() + g := &Generator{ + r: fd, + path: absFile, + pkg: pkg, + commands: make(map[string][]string), + } + return g.run() +} + +// A Generator represents the state of a single Go source file +// being scanned for generator commands. +type Generator struct { + r io.Reader + path string // full rooted path name. + dir string // full rooted directory of file. + file string // base name of file. + pkg string + commands map[string][]string + lineNum int +} + +// run runs the generators in the current file. +func (g *Generator) run() (ok bool) { + // Processing below here calls g.errorf on failure, which does panic(stop). + // If we encounter an error, we abort the package. + defer func() { + e := recover() + if e != nil { + ok = false + if e != stop { + panic(e) + } + setExitStatus(1) + } + }() + g.dir, g.file = filepath.Split(g.path) + g.dir = filepath.Clean(g.dir) // No final separator please. + if buildV { + fmt.Fprintf(os.Stderr, "%s\n", shortPath(g.path)) + } + + // Scan for lines that start "//go:generate". + // Can't use bufio.Scanner because it can't handle long lines, + // which are likely to appear when using generate. + input := bufio.NewReader(g.r) + var err error + // One line per loop. + for { + g.lineNum++ // 1-indexed. + var buf []byte + buf, err = input.ReadSlice('\n') + if err == bufio.ErrBufferFull { + // Line too long - consume and ignore. + if isGoGenerate(buf) { + g.errorf("directive too long") + } + for err == bufio.ErrBufferFull { + _, err = input.ReadSlice('\n') + } + if err != nil { + break + } + continue + } + + if err != nil { + // Check for marker at EOF without final \n. + if err == io.EOF && isGoGenerate(buf) { + err = io.ErrUnexpectedEOF + } + break + } + + if !isGoGenerate(buf) { + continue + } + + words := g.split(string(buf)) + if len(words) == 0 { + g.errorf("no arguments to directive") + } + if words[0] == "-command" { + g.setShorthand(words) + continue + } + // Run the command line. + if buildN || buildX { + fmt.Fprintf(os.Stderr, "%s\n", strings.Join(words, " ")) + } + if buildN { + continue + } + g.exec(words) + } + if err != nil && err != io.EOF { + g.errorf("error reading %s: %s", shortPath(g.path), err) + } + return true +} + +func isGoGenerate(buf []byte) bool { + return bytes.HasPrefix(buf, []byte("//go:generate ")) || bytes.HasPrefix(buf, []byte("//go:generate\t")) +} + +// split breaks the line into words, evaluating quoted +// strings and evaluating environment variables. +// The initial //go:generate element is present in line. +func (g *Generator) split(line string) []string { + // Parse line, obeying quoted strings. + var words []string + line = line[len("//go:generate ") : len(line)-1] // Drop preamble and final newline. + // One (possibly quoted) word per iteration. +Words: + for { + line = strings.TrimLeft(line, " \t") + if len(line) == 0 { + break + } + if line[0] == '"' { + for i := 1; i < len(line); i++ { + c := line[i] // Only looking for ASCII so this is OK. + switch c { + case '\\': + if i+1 == len(line) { + g.errorf("bad backslash") + } + i++ // Absorb next byte (If it's a multibyte we'll get an error in Unquote). + case '"': + word, err := strconv.Unquote(line[0 : i+1]) + if err != nil { + g.errorf("bad quoted string") + } + words = append(words, word) + line = line[i+1:] + // Check the next character is space or end of line. + if len(line) > 0 && line[0] != ' ' && line[0] != '\t' { + g.errorf("expect space after quoted argument") + } + continue Words + } + } + g.errorf("mismatched quoted string") + } + i := strings.IndexAny(line, " \t") + if i < 0 { + i = len(line) + } + words = append(words, line[0:i]) + line = line[i:] + } + // Substitute command if required. + if len(words) > 0 && g.commands[words[0]] != nil { + // Replace 0th word by command substitution. + words = append(g.commands[words[0]], words[1:]...) + } + // Substitute environment variables. + for i, word := range words { + words[i] = g.expandEnv(word) + } + return words +} + +var stop = fmt.Errorf("error in generation") + +// errorf logs an error message prefixed with the file and line number. +// It then exits the program (with exit status 1) because generation stops +// at the first error. +func (g *Generator) errorf(format string, args ...interface{}) { + fmt.Fprintf(os.Stderr, "%s:%d: %s\n", shortPath(g.path), g.lineNum, + fmt.Sprintf(format, args...)) + panic(stop) +} + +// expandEnv expands any $XXX invocations in word. +func (g *Generator) expandEnv(word string) string { + if !strings.ContainsRune(word, '$') { + return word + } + var buf bytes.Buffer + var w int + var r rune + for i := 0; i < len(word); i += w { + r, w = utf8.DecodeRuneInString(word[i:]) + if r != '$' { + buf.WriteRune(r) + continue + } + w += g.identLength(word[i+w:]) + envVar := word[i+1 : i+w] + var sub string + switch envVar { + case "GOARCH": + sub = runtime.GOARCH + case "GOOS": + sub = runtime.GOOS + case "GOFILE": + sub = g.file + case "GOPACKAGE": + sub = g.pkg + default: + sub = os.Getenv(envVar) + } + buf.WriteString(sub) + } + return buf.String() +} + +// identLength returns the length of the identifier beginning the string. +func (g *Generator) identLength(word string) int { + for i, r := range word { + if r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r) { + continue + } + return i + } + return len(word) +} + +// setShorthand installs a new shorthand as defined by a -command directive. +func (g *Generator) setShorthand(words []string) { + // Create command shorthand. + if len(words) == 1 { + g.errorf("no command specified for -command") + } + command := words[1] + if g.commands[command] != nil { + g.errorf("command %q defined multiply defined", command) + } + g.commands[command] = words[2:len(words):len(words)] // force later append to make copy +} + +// exec runs the command specified by the argument. The first word is +// the command name itself. +func (g *Generator) exec(words []string) { + cmd := exec.Command(words[0], words[1:]...) + // Standard in and out of generator should be the usual. + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + // Run the command in the package directory. + cmd.Dir = g.dir + env := []string{ + "GOARCH=" + runtime.GOARCH, + "GOOS=" + runtime.GOOS, + "GOFILE=" + g.file, + "GOPACKAGE=" + g.pkg, + } + cmd.Env = mergeEnvLists(env, os.Environ()) + err := cmd.Run() + if err != nil { + g.errorf("running %q: %s", words[0], err) + } +} diff --git a/libgo/go/cmd/go/generate_test.go b/libgo/go/cmd/go/generate_test.go new file mode 100644 index 00000000000..660ebabbe84 --- /dev/null +++ b/libgo/go/cmd/go/generate_test.go @@ -0,0 +1,48 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "reflect" + "runtime" + "testing" +) + +type splitTest struct { + in string + out []string +} + +var splitTests = []splitTest{ + {"", nil}, + {"x", []string{"x"}}, + {" a b\tc ", []string{"a", "b", "c"}}, + {` " a " `, []string{" a "}}, + {"$GOARCH", []string{runtime.GOARCH}}, + {"$GOOS", []string{runtime.GOOS}}, + {"$GOFILE", []string{"proc.go"}}, + {"$GOPACKAGE", []string{"sys"}}, + {"a $XXNOTDEFINEDXX b", []string{"a", "", "b"}}, + {"/$XXNOTDEFINED/", []string{"//"}}, + {"yacc -o $GOARCH/yacc_$GOFILE", []string{"go", "tool", "yacc", "-o", runtime.GOARCH + "/yacc_proc.go"}}, +} + +func TestGenerateCommandParse(t *testing.T) { + g := &Generator{ + r: nil, // Unused here. + path: "/usr/ken/sys/proc.go", + dir: "/usr/ken/sys", + file: "proc.go", + pkg: "sys", + commands: make(map[string][]string), + } + g.setShorthand([]string{"-command", "yacc", "go", "tool", "yacc"}) + for _, test := range splitTests { + got := g.split("//go:generate " + test.in + "\n") + if !reflect.DeepEqual(got, test.out) { + t.Errorf("split(%q): got %q expected %q", test.in, got, test.out) + } + } +} diff --git a/libgo/go/cmd/go/get.go b/libgo/go/cmd/go/get.go index e708fcf779f..86e1697618a 100644 --- a/libgo/go/cmd/go/get.go +++ b/libgo/go/cmd/go/get.go @@ -16,7 +16,7 @@ import ( ) var cmdGet = &Command{ - UsageLine: "get [-d] [-fix] [-t] [-u] [build flags] [packages]", + UsageLine: "get [-d] [-f] [-fix] [-t] [-u] [build flags] [packages]", Short: "download and install packages and dependencies", Long: ` Get downloads and installs the packages named by the import paths, @@ -25,6 +25,11 @@ along with their dependencies. The -d flag instructs get to stop after downloading the packages; that is, it instructs get not to install the packages. +The -f flag, valid only when -u is set, forces get -u not to verify that +each package has been checked out from the source control repository +implied by its import path. This can be useful if the source is a local fork +of the original. + The -fix flag instructs get to run the fix tool on the downloaded packages before resolving dependencies or building the code. @@ -53,6 +58,7 @@ See also: go build, go install, go clean. } var getD = cmdGet.Flag.Bool("d", false, "") +var getF = cmdGet.Flag.Bool("f", false, "") var getT = cmdGet.Flag.Bool("t", false, "") var getU = cmdGet.Flag.Bool("u", false, "") var getFix = cmdGet.Flag.Bool("fix", false, "") @@ -63,6 +69,10 @@ func init() { } func runGet(cmd *Command, args []string) { + if *getF && !*getU { + fatalf("go get: cannot use -f flag without -u") + } + // Phase 1. Download/update. var stk importStack for _, arg := range downloadPaths(args) { @@ -151,7 +161,9 @@ func download(arg string, stk *importStack, getTestDeps bool) { } // Only process each package once. - if downloadCache[arg] { + // (Unless we're fetching test dependencies for this package, + // in which case we want to process it again.) + if downloadCache[arg] && !getTestDeps { return } downloadCache[arg] = true @@ -264,6 +276,25 @@ func downloadPackage(p *Package) error { return err } repo = "<local>" // should be unused; make distinctive + + // Double-check where it came from. + if *getU && vcs.remoteRepo != nil && !*getF { + dir := filepath.Join(p.build.SrcRoot, rootPath) + if remote, err := vcs.remoteRepo(vcs, dir); err == nil { + if rr, err := repoRootForImportPath(p.ImportPath); err == nil { + repo := rr.repo + if rr.vcs.resolveRepo != nil { + resolved, err := rr.vcs.resolveRepo(rr.vcs, dir, repo) + if err == nil { + repo = resolved + } + } + if remote != repo { + return fmt.Errorf("%s is from %s, should be from %s", dir, remote, repo) + } + } + } + } } else { // Analyze the import path to determine the version control system, // repository, and the import path for the root of the repository. diff --git a/libgo/go/cmd/go/go_windows_test.go b/libgo/go/cmd/go/go_windows_test.go new file mode 100644 index 00000000000..53d695cccc8 --- /dev/null +++ b/libgo/go/cmd/go/go_windows_test.go @@ -0,0 +1,55 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" +) + +func TestAbsolutePath(t *testing.T) { + tmp, err := ioutil.TempDir("", "TestAbsolutePath") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + file := filepath.Join(tmp, "a.go") + err = ioutil.WriteFile(file, []byte{}, 0644) + if err != nil { + t.Fatal(err) + } + dir := filepath.Join(tmp, "dir") + err = os.Mkdir(dir, 0777) + if err != nil { + t.Fatal(err) + } + + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + defer os.Chdir(wd) + + // Chdir so current directory and a.go reside on the same drive. + err = os.Chdir(dir) + if err != nil { + t.Fatal(err) + } + + noVolume := file[len(filepath.VolumeName(file)):] + wrongPath := filepath.Join(dir, noVolume) + output, err := exec.Command("go", "build", noVolume).CombinedOutput() + if err == nil { + t.Fatal("build should fail") + } + if strings.Contains(string(output), wrongPath) { + t.Fatalf("wrong output found: %v %v", err, string(output)) + } +} diff --git a/libgo/go/cmd/go/help.go b/libgo/go/cmd/go/help.go index 40da7e1f5ee..c590fdb37fe 100644 --- a/libgo/go/cmd/go/help.go +++ b/libgo/go/cmd/go/help.go @@ -81,7 +81,8 @@ single directory, the command is applied to a single synthesized package made up of exactly those files, ignoring any build constraints in those files and ignoring any other files in the directory. -File names that begin with "." or "_" are ignored by the go tool. +Directory and file names that begin with "." or "_" are ignored +by the go tool, as are directories named "testdata". `, } @@ -154,6 +155,11 @@ A few common code hosting sites have special syntax: import "launchpad.net/~user/project/branch" import "launchpad.net/~user/project/branch/sub/directory" + IBM DevOps Services (Git) + + import "hub.jazz.net/git/user/project" + import "hub.jazz.net/git/user/project/sub/directory" + For code hosted on other servers, import paths may either be qualified with the version control type, or the go tool can dynamically fetch the import path over https/http and discover where the code resides @@ -229,7 +235,26 @@ listed in the GOPATH environment variable (see 'go help gopath'). The go command attempts to download the version of the package appropriate for the Go release being used. -Run 'go help install' for more. +Run 'go help get' for more. + +Import path checking + +When the custom import path feature described above redirects to a +known code hosting site, each of the resulting packages has two possible +import paths, using the custom domain or the known hosting site. + +A package statement is said to have an "import comment" if it is immediately +followed (before the next newline) by a comment of one of these two forms: + + package math // import "path" + package math /* import "path" */ + +The go command will refuse to install a package with an import comment +unless it is being referred to by that import path. In this way, import comments +let package authors make sure the custom import path is used and not a +direct path to the underlying code hosting site. + +See https://golang.org/s/go14customimport for details. `, } diff --git a/libgo/go/cmd/go/list.go b/libgo/go/cmd/go/list.go index 0ead4350238..fbf96167feb 100644 --- a/libgo/go/cmd/go/list.go +++ b/libgo/go/cmd/go/list.go @@ -30,28 +30,29 @@ syntax of package template. The default output is equivalent to -f '{{.ImportPath}}'. The struct being passed to the template is: type Package struct { - Dir string // directory containing package sources - ImportPath string // import path of package in dir - Name string // package name - Doc string // package documentation string - Target string // install path - Goroot bool // is this package in the Go root? - Standard bool // is this package part of the standard Go library? - Stale bool // would 'go install' do anything for this package? - Root string // Go root or Go path dir containing this package + Dir string // directory containing package sources + ImportPath string // import path of package in dir + ImportComment string // path in import comment on package statement + Name string // package name + Doc string // package documentation string + Target string // install path + Goroot bool // is this package in the Go root? + Standard bool // is this package part of the standard Go library? + Stale bool // would 'go install' do anything for this package? + Root string // Go root or Go path dir containing this package // Source files - GoFiles []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles) - CgoFiles []string // .go sources files that import "C" + GoFiles []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles) + CgoFiles []string // .go sources files that import "C" IgnoredGoFiles []string // .go sources ignored due to build constraints - CFiles []string // .c source files - CXXFiles []string // .cc, .cxx and .cpp source files - MFiles []string // .m source files - HFiles []string // .h, .hh, .hpp and .hxx source files - SFiles []string // .s source files - SwigFiles []string // .swig files - SwigCXXFiles []string // .swigcxx files - SysoFiles []string // .syso object files to add to archive + CFiles []string // .c source files + CXXFiles []string // .cc, .cxx and .cpp source files + MFiles []string // .m source files + HFiles []string // .h, .hh, .hpp and .hxx source files + SFiles []string // .s source files + SwigFiles []string // .swig files + SwigCXXFiles []string // .swigcxx files + SysoFiles []string // .syso object files to add to archive // Cgo directives CgoCFLAGS []string // cgo: flags for C compiler diff --git a/libgo/go/cmd/go/main.go b/libgo/go/cmd/go/main.go index 5b1194aaa34..9691f39c763 100644 --- a/libgo/go/cmd/go/main.go +++ b/libgo/go/cmd/go/main.go @@ -79,6 +79,7 @@ var commands = []*Command{ cmdEnv, cmdFix, cmdFmt, + cmdGenerate, cmdGet, cmdInstall, cmdList, @@ -536,7 +537,7 @@ func matchPackages(pattern string) []string { }) for _, src := range buildContext.SrcDirs() { - if pattern == "std" && src != gorootSrcPkg { + if pattern == "std" && src != gorootSrc { continue } src = filepath.Clean(src) + string(filepath.Separator) @@ -618,7 +619,7 @@ func matchPackagesInFS(pattern string) []string { // The initial case is not Cleaned, though, so we do this explicitly. // // This converts a path like "./io/" to "io". Without this step, running - // "cd $GOROOT/src/pkg; go list ./io/..." would incorrectly skip the io + // "cd $GOROOT/src; go list ./io/..." would incorrectly skip the io // package, because prepending the prefix "./" to the unclean path would // result in "././io", and match("././io") returns false. path = filepath.Clean(path) diff --git a/libgo/go/cmd/go/pkg.go b/libgo/go/cmd/go/pkg.go index b700ad5c9a2..621cb4b6083 100644 --- a/libgo/go/cmd/go/pkg.go +++ b/libgo/go/cmd/go/pkg.go @@ -14,6 +14,7 @@ import ( "os" pathpkg "path" "path/filepath" + "runtime" "sort" "strings" "time" @@ -25,16 +26,17 @@ type Package struct { // Note: These fields are part of the go command's public API. // See list.go. It is okay to add fields, but not to change or // remove existing ones. Keep in sync with list.go - Dir string `json:",omitempty"` // directory containing package sources - ImportPath string `json:",omitempty"` // import path of package in dir - Name string `json:",omitempty"` // package name - Doc string `json:",omitempty"` // package documentation string - Target string `json:",omitempty"` // install path - Goroot bool `json:",omitempty"` // is this package found in the Go root? - Standard bool `json:",omitempty"` // is this package part of the standard Go library? - Stale bool `json:",omitempty"` // would 'go install' do anything for this package? - Root string `json:",omitempty"` // Go root or Go path dir containing this package - ConflictDir string `json:",omitempty"` // Dir is hidden by this other directory + Dir string `json:",omitempty"` // directory containing package sources + ImportPath string `json:",omitempty"` // import path of package in dir + ImportComment string `json:",omitempty"` // path in import comment on package statement + Name string `json:",omitempty"` // package name + Doc string `json:",omitempty"` // package documentation string + Target string `json:",omitempty"` // install path + Goroot bool `json:",omitempty"` // is this package found in the Go root? + Standard bool `json:",omitempty"` // is this package part of the standard Go library? + Stale bool `json:",omitempty"` // would 'go install' do anything for this package? + Root string `json:",omitempty"` // Go root or Go path dir containing this package + ConflictDir string `json:",omitempty"` // Dir is hidden by this other directory // Source files GoFiles []string `json:",omitempty"` // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles) @@ -103,6 +105,7 @@ func (p *Package) copyBuild(pp *build.Package) { p.Dir = pp.Dir p.ImportPath = pp.ImportPath + p.ImportComment = pp.ImportComment p.Name = pp.Name p.Doc = pp.Doc p.Root = pp.Root @@ -218,7 +221,7 @@ func dirToImportPath(dir string) string { } func makeImportValid(r rune) rune { - // Should match Go spec, compilers, and ../../pkg/go/parser/parser.go:/isValidImport. + // Should match Go spec, compilers, and ../../go/parser/parser.go:/isValidImport. const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD" if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) { return '_' @@ -244,6 +247,9 @@ func loadImport(path string, srcDir string, stk *importStack, importPos []token. importPath = dirToImportPath(filepath.Join(srcDir, path)) } if p := packageCache[importPath]; p != nil { + if perr := disallowInternal(srcDir, p, stk); perr != p { + return perr + } return reusePackage(p, stk) } @@ -258,11 +264,14 @@ func loadImport(path string, srcDir string, stk *importStack, importPos []token. // // TODO: After Go 1, decide when to pass build.AllowBinary here. // See issue 3268 for mistakes to avoid. - bp, err := buildContext.Import(path, srcDir, 0) + bp, err := buildContext.Import(path, srcDir, build.ImportComment) bp.ImportPath = importPath if gobin != "" { bp.BinDir = gobin } + if err == nil && !isLocal && bp.ImportComment != "" && bp.ImportComment != path { + err = fmt.Errorf("code in directory %s expects import %q", bp.Dir, bp.ImportComment) + } p.load(stk, bp, err) if p.Error != nil && len(importPos) > 0 { pos := importPos[0] @@ -270,6 +279,10 @@ func loadImport(path string, srcDir string, stk *importStack, importPos []token. p.Error.Pos = pos.String() } + if perr := disallowInternal(srcDir, p, stk); perr != p { + return perr + } + return p } @@ -298,12 +311,82 @@ func reusePackage(p *Package, stk *importStack) *Package { return p } +// disallowInternal checks that srcDir is allowed to import p. +// If the import is allowed, disallowInternal returns the original package p. +// If not, it returns a new package containing just an appropriate error. +func disallowInternal(srcDir string, p *Package, stk *importStack) *Package { + // golang.org/s/go14internal: + // An import of a path containing the element “internal” + // is disallowed if the importing code is outside the tree + // rooted at the parent of the “internal” directory. + // + // ... For Go 1.4, we will implement the rule first for $GOROOT, but not $GOPATH. + + // Only applies to $GOROOT. + if !p.Standard { + return p + } + + // The stack includes p.ImportPath. + // If that's the only thing on the stack, we started + // with a name given on the command line, not an + // import. Anything listed on the command line is fine. + if len(*stk) == 1 { + return p + } + + // Check for "internal" element: four cases depending on begin of string and/or end of string. + i, ok := findInternal(p.ImportPath) + if !ok { + return p + } + + // Internal is present. + // Map import path back to directory corresponding to parent of internal. + if i > 0 { + i-- // rewind over slash in ".../internal" + } + parent := p.Dir[:i+len(p.Dir)-len(p.ImportPath)] + if hasPathPrefix(filepath.ToSlash(srcDir), filepath.ToSlash(parent)) { + return p + } + + // Internal is present, and srcDir is outside parent's tree. Not allowed. + perr := *p + perr.Error = &PackageError{ + ImportStack: stk.copy(), + Err: "use of internal package not allowed", + } + perr.Incomplete = true + return &perr +} + +// findInternal looks for the final "internal" path element in the given import path. +// If there isn't one, findInternal returns ok=false. +// Otherwise, findInternal returns ok=true and the index of the "internal". +func findInternal(path string) (index int, ok bool) { + // Four cases, depending on internal at start/end of string or not. + // The order matters: we must return the index of the final element, + // because the final one produces the most restrictive requirement + // on the importer. + switch { + case strings.HasSuffix(path, "/internal"): + return len(path) - len("internal"), true + case strings.Contains(path, "/internal/"): + return strings.LastIndex(path, "/internal/") + 1, true + case path == "internal", strings.HasPrefix(path, "internal/"): + return 0, true + } + return 0, false +} + type targetDir int const ( - toRoot targetDir = iota // to bin dir inside package root (default) - toTool // GOROOT/pkg/tool - toBin // GOROOT/bin + toRoot targetDir = iota // to bin dir inside package root (default) + toTool // GOROOT/pkg/tool + toBin // GOROOT/bin + stalePath // the old import path; fail to build ) // goTools is a map of Go program import path to install target directory. @@ -316,10 +399,14 @@ var goTools = map[string]targetDir{ "cmd/nm": toTool, "cmd/objdump": toTool, "cmd/pack": toTool, + "cmd/pprof": toTool, "cmd/yacc": toTool, - "code.google.com/p/go.tools/cmd/cover": toTool, - "code.google.com/p/go.tools/cmd/godoc": toBin, - "code.google.com/p/go.tools/cmd/vet": toTool, + "golang.org/x/tools/cmd/cover": toTool, + "golang.org/x/tools/cmd/godoc": toBin, + "golang.org/x/tools/cmd/vet": toTool, + "code.google.com/p/go.tools/cmd/cover": stalePath, + "code.google.com/p/go.tools/cmd/godoc": stalePath, + "code.google.com/p/go.tools/cmd/vet": stalePath, } // expandScanner expands a scanner.List error into all the errors in the list. @@ -380,6 +467,13 @@ func (p *Package) load(stk *importStack, bp *build.Package, err error) *Package } if p.Name == "main" { + // Report an error when the old code.google.com/p/go.tools paths are used. + if goTools[p.ImportPath] == stalePath { + newPath := strings.Replace(p.ImportPath, "code.google.com/p/go.", "golang.org/x/", 1) + e := fmt.Sprintf("the %v command has moved; use %v instead.", p.ImportPath, newPath) + p.Error = &PackageError{Err: e} + return p + } _, elem := filepath.Split(p.Dir) full := buildContext.GOOS + "_" + buildContext.GOARCH + "/" + elem if buildContext.GOOS != toolGOOS || buildContext.GOARCH != toolGOARCH { @@ -482,7 +576,7 @@ func (p *Package) load(stk *importStack, bp *build.Package, err error) *Package // Build list of imported packages and full dependency list. imports := make([]*Package, 0, len(p.Imports)) - deps := make(map[string]bool) + deps := make(map[string]*Package) for i, path := range importPaths { if path == "C" { continue @@ -505,10 +599,10 @@ func (p *Package) load(stk *importStack, bp *build.Package, err error) *Package path = p1.ImportPath importPaths[i] = path } - deps[path] = true + deps[path] = p1 imports = append(imports, p1) - for _, dep := range p1.Deps { - deps[dep] = true + for _, dep := range p1.deps { + deps[dep.ImportPath] = dep } if p1.Incomplete { p.Incomplete = true @@ -522,7 +616,7 @@ func (p *Package) load(stk *importStack, bp *build.Package, err error) *Package } sort.Strings(p.Deps) for _, dep := range p.Deps { - p1 := packageCache[dep] + p1 := deps[dep] if p1 == nil { panic("impossible: missing entry in package cache for " + dep + " imported by " + p.ImportPath) } @@ -538,6 +632,16 @@ func (p *Package) load(stk *importStack, bp *build.Package, err error) *Package } p.Target = p.target + // Check for C code compiled with Plan 9 C compiler. + // No longer allowed except in runtime and runtime/cgo, for now. + if len(p.CFiles) > 0 && !p.usesCgo() && (!p.Standard || p.ImportPath != "runtime") { + p.Error = &PackageError{ + ImportStack: stk.copy(), + Err: fmt.Sprintf("C source files not allowed when not using cgo: %s", strings.Join(p.CFiles, " ")), + } + return p + } + // In the absence of errors lower in the dependency tree, // check for case-insensitive collisions of import paths. if len(p.DepsErrors) == 0 { @@ -599,6 +703,12 @@ func computeStale(pkgs ...*Package) { } } +// The runtime version string takes one of two forms: +// "go1.X[.Y]" for Go releases, and "devel +hash" at tip. +// Determine whether we are in a released copy by +// inspecting the version. +var isGoRelease = strings.HasPrefix(runtime.Version(), "go1") + // isStale reports whether package p needs to be rebuilt. func isStale(p *Package, topRoot map[string]bool) bool { if p.Standard && (p.ImportPath == "unsafe" || buildContext.Compiler == "gccgo") { @@ -619,7 +729,16 @@ func isStale(p *Package, topRoot map[string]bool) bool { return false } - if buildA || p.target == "" || p.Stale { + // If we are running a release copy of Go, do not rebuild the standard packages. + // They may not be writable anyway, but they are certainly not changing. + // This makes 'go build -a' skip the standard packages when using an official release. + // See issue 4106 and issue 8290. + pkgBuildA := buildA + if p.Standard && isGoRelease { + pkgBuildA = false + } + + if pkgBuildA || p.target == "" || p.Stale { return true } @@ -707,24 +826,13 @@ func loadPackage(arg string, stk *importStack) *Package { arg = sub } } - if strings.HasPrefix(arg, "cmd/") { + if strings.HasPrefix(arg, "cmd/") && !strings.Contains(arg[4:], "/") { if p := cmdCache[arg]; p != nil { return p } stk.push(arg) defer stk.pop() - if strings.Contains(arg[4:], "/") { - p := &Package{ - Error: &PackageError{ - ImportStack: stk.copy(), - Err: fmt.Sprintf("invalid import path: cmd/... is reserved for Go commands"), - hard: true, - }, - } - return p - } - bp, err := buildContext.ImportDir(filepath.Join(gorootSrc, arg), 0) bp.ImportPath = arg bp.Goroot = true diff --git a/libgo/go/cmd/go/test.go b/libgo/go/cmd/go/test.go index f7ae9c01fba..28b46ff52bf 100644 --- a/libgo/go/cmd/go/test.go +++ b/libgo/go/cmd/go/test.go @@ -6,6 +6,7 @@ package main import ( "bytes" + "errors" "fmt" "go/ast" "go/build" @@ -48,7 +49,7 @@ It prints a summary of the test results in the format: followed by detailed output for each failed package. 'Go test' recompiles each package along with any files with names matching -the file pattern "*_test.go". +the file pattern "*_test.go". Files whose names begin with "_" (including "_test.go") or "." are ignored. These additional files can contain test functions, benchmark functions, and example functions. See 'go help testfunc' for more. @@ -65,16 +66,23 @@ non-test installation. In addition to the build flags, the flags handled by 'go test' itself are: - -c Compile the test binary to pkg.test but do not run it. - (Where pkg is the last element of the package's import path.) + -c + Compile the test binary to pkg.test but do not run it + (where pkg is the last element of the package's import path). + The file name can be changed with the -o flag. + + -exec xprog + Run the test binary using xprog. The behavior is the same as + in 'go run'. See 'go help run' for details. -i Install packages that are dependencies of the test. Do not run the test. - -exec xprog - Run the test binary using xprog. The behavior is the same as - in 'go run'. See 'go help run' for details. + -o file + Compile the test binary to the named file. + The test still runs (unless -c or -i is specified). + The test binary also accepts flags that control execution of the test; these flags are also accessible by 'go test'. See 'go help testflag' for details. @@ -122,6 +130,7 @@ control the execution of any test: -blockprofile block.out Write a goroutine blocking profile to the specified file when all tests are complete. + Writes test binary as -c would. -blockprofilerate n Control the detail provided in goroutine blocking profiles by @@ -153,8 +162,7 @@ control the execution of any test: Sets -cover. -coverprofile cover.out - Write a coverage profile to the specified file after all tests - have passed. + Write a coverage profile to the file after all tests have passed. Sets -cover. -cpu 1,2,4 @@ -164,10 +172,11 @@ control the execution of any test: -cpuprofile cpu.out Write a CPU profile to the specified file before exiting. + Writes test binary as -c would. -memprofile mem.out - Write a memory profile to the specified file after all tests - have passed. + Write a memory profile to the file after all tests have passed. + Writes test binary as -c would. -memprofilerate n Enable more precise (and expensive) memory profiles by setting @@ -274,10 +283,10 @@ var ( testCoverMode string // -covermode flag testCoverPaths []string // -coverpkg flag testCoverPkgs []*Package // -coverpkg flag + testO string // -o flag testProfile bool // some profiling flag testNeedBinary bool // profile needs to keep binary around testV bool // -v flag - testFiles []string // -file flag(s) TODO: not respected testTimeout string // -timeout flag testArgs []string testBench bool @@ -291,6 +300,7 @@ var testMainDeps = map[string]bool{ // Dependencies for testmain. "testing": true, "regexp": true, + "os": true, } func runTest(cmd *Command, args []string) { @@ -308,6 +318,9 @@ func runTest(cmd *Command, args []string) { if testC && len(pkgs) != 1 { fatalf("cannot use -c flag with multiple packages") } + if testO != "" && len(pkgs) != 1 { + fatalf("cannot use -o flag with multiple packages") + } if testProfile && len(pkgs) != 1 { fatalf("cannot use test profile flag with multiple packages") } @@ -524,6 +537,13 @@ func contains(x []string, s string) bool { return false } +var windowsBadWords = []string{ + "install", + "patch", + "setup", + "update", +} + func (b *builder) test(p *Package) (buildAction, runAction, printAction *action, err error) { if len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 { build := b.action(modeBuild, modeBuild, p) @@ -695,7 +715,7 @@ func (b *builder) test(p *Package) (buildAction, runAction, printAction *action, omitDWARF: !testC && !testNeedBinary, } - // The generated main also imports testing and regexp. + // The generated main also imports testing, regexp, and os. stk.push("testmain") for dep := range testMainDeps { if dep == ptest.ImportPath { @@ -734,11 +754,13 @@ func (b *builder) test(p *Package) (buildAction, runAction, printAction *action, if err != nil { return nil, nil, nil, err } - if t.NeedTest || ptest.coverMode != "" { + if len(ptest.GoFiles) > 0 { pmain.imports = append(pmain.imports, ptest) + t.ImportTest = true } - if t.NeedXtest { + if pxtest != nil { pmain.imports = append(pmain.imports, pxtest) + t.ImportXtest = true } if ptest != p && localCover { @@ -790,17 +812,54 @@ func (b *builder) test(p *Package) (buildAction, runAction, printAction *action, a.objdir = testDir + string(filepath.Separator) a.objpkg = filepath.Join(testDir, "main.a") a.target = filepath.Join(testDir, testBinary) + exeSuffix - pmainAction := a + if goos == "windows" { + // There are many reserved words on Windows that, + // if used in the name of an executable, cause Windows + // to try to ask for extra permissions. + // The word list includes setup, install, update, and patch, + // but it does not appear to be defined anywhere. + // We have run into this trying to run the + // go.codereview/patch tests. + // For package names containing those words, use test.test.exe + // instead of pkgname.test.exe. + // Note that this file name is only used in the Go command's + // temporary directory. If the -c or other flags are + // given, the code below will still use pkgname.test.exe. + // There are two user-visible effects of this change. + // First, you can actually run 'go test' in directories that + // have names that Windows thinks are installer-like, + // without getting a dialog box asking for more permissions. + // Second, in the Windows process listing during go test, + // the test shows up as test.test.exe, not pkgname.test.exe. + // That second one is a drawback, but it seems a small + // price to pay for the test running at all. + // If maintaining the list of bad words is too onerous, + // we could just do this always on Windows. + for _, bad := range windowsBadWords { + if strings.Contains(testBinary, bad) { + a.target = filepath.Join(testDir, "test.test") + exeSuffix + break + } + } + } + buildAction = a if testC || testNeedBinary { // -c or profiling flag: create action to copy binary to ./test.out. - runAction = &action{ + target := filepath.Join(cwd, testBinary+exeSuffix) + if testO != "" { + target = testO + if !filepath.IsAbs(target) { + target = filepath.Join(cwd, target) + } + } + buildAction = &action{ f: (*builder).install, - deps: []*action{pmainAction}, + deps: []*action{buildAction}, p: pmain, - target: filepath.Join(cwd, testBinary+exeSuffix), + target: target, } - pmainAction = runAction // in case we are running the test + runAction = buildAction // make sure runAction != nil even if not running test } if testC { printAction = &action{p: p, deps: []*action{runAction}} // nop @@ -808,7 +867,7 @@ func (b *builder) test(p *Package) (buildAction, runAction, printAction *action, // run test runAction = &action{ f: (*builder).runTest, - deps: []*action{pmainAction}, + deps: []*action{buildAction}, p: p, ignoreFail: true, } @@ -824,7 +883,7 @@ func (b *builder) test(p *Package) (buildAction, runAction, printAction *action, } } - return pmainAction, runAction, printAction, nil + return buildAction, runAction, printAction, nil } func testImportStack(top string, p *Package, target string) []string { @@ -1068,6 +1127,31 @@ func (b *builder) notest(a *action) error { return nil } +// isTestMain tells whether fn is a TestMain(m *testing.M) function. +func isTestMain(fn *ast.FuncDecl) bool { + if fn.Name.String() != "TestMain" || + fn.Type.Results != nil && len(fn.Type.Results.List) > 0 || + fn.Type.Params == nil || + len(fn.Type.Params.List) != 1 || + len(fn.Type.Params.List[0].Names) > 1 { + return false + } + ptr, ok := fn.Type.Params.List[0].Type.(*ast.StarExpr) + if !ok { + return false + } + // We can't easily check that the type is *testing.M + // because we don't know how testing has been imported, + // but at least check that it's *M or *something.M. + if name, ok := ptr.X.(*ast.Ident); ok && name.Name == "M" { + return true + } + if sel, ok := ptr.X.(*ast.SelectorExpr); ok && sel.Sel.Name == "M" { + return true + } + return false +} + // isTest tells whether name looks like a test (or benchmark, according to prefix). // It is a Test (say) if there is a character after Test that is not a lower-case letter. // We don't want TesticularCancer. @@ -1093,12 +1177,12 @@ func loadTestFuncs(ptest *Package) (*testFuncs, error) { Package: ptest, } for _, file := range ptest.TestGoFiles { - if err := t.load(filepath.Join(ptest.Dir, file), "_test", &t.NeedTest); err != nil { + if err := t.load(filepath.Join(ptest.Dir, file), "_test", &t.ImportTest, &t.NeedTest); err != nil { return nil, err } } for _, file := range ptest.XTestGoFiles { - if err := t.load(filepath.Join(ptest.Dir, file), "_xtest", &t.NeedXtest); err != nil { + if err := t.load(filepath.Join(ptest.Dir, file), "_xtest", &t.ImportXtest, &t.NeedXtest); err != nil { return nil, err } } @@ -1121,13 +1205,16 @@ func writeTestmain(out string, t *testFuncs) error { } type testFuncs struct { - Tests []testFunc - Benchmarks []testFunc - Examples []testFunc - Package *Package - NeedTest bool - NeedXtest bool - Cover []coverInfo + Tests []testFunc + Benchmarks []testFunc + Examples []testFunc + TestMain *testFunc + Package *Package + ImportTest bool + NeedTest bool + ImportXtest bool + NeedXtest bool + Cover []coverInfo } func (t *testFuncs) CoverMode() string { @@ -1162,7 +1249,7 @@ type testFunc struct { var testFileSet = token.NewFileSet() -func (t *testFuncs) load(filename, pkg string, seen *bool) error { +func (t *testFuncs) load(filename, pkg string, doImport, seen *bool) error { f, err := parser.ParseFile(testFileSet, filename, nil, parser.ParseComments) if err != nil { return expandScanner(err) @@ -1177,17 +1264,24 @@ func (t *testFuncs) load(filename, pkg string, seen *bool) error { } name := n.Name.String() switch { + case isTestMain(n): + if t.TestMain != nil { + return errors.New("multiple definitions of TestMain") + } + t.TestMain = &testFunc{pkg, name, ""} + *doImport, *seen = true, true case isTest(name, "Test"): t.Tests = append(t.Tests, testFunc{pkg, name, ""}) - *seen = true + *doImport, *seen = true, true case isTest(name, "Benchmark"): t.Benchmarks = append(t.Benchmarks, testFunc{pkg, name, ""}) - *seen = true + *doImport, *seen = true, true } } ex := doc.Examples(f) sort.Sort(byOrder(ex)) for _, e := range ex { + *doImport = true // import test file whether executed or not if e.Output == "" && !e.EmptyOutput { // Don't run examples with no output. continue @@ -1208,14 +1302,17 @@ var testmainTmpl = template.Must(template.New("main").Parse(` package main import ( +{{if not .TestMain}} + "os" +{{end}} "regexp" "testing" -{{if .NeedTest}} - _test {{.Package.ImportPath | printf "%q"}} +{{if .ImportTest}} + {{if .NeedTest}}_test{{else}}_{{end}} {{.Package.ImportPath | printf "%q"}} {{end}} -{{if .NeedXtest}} - _xtest {{.Package.ImportPath | printf "%s_test" | printf "%q"}} +{{if .ImportXtest}} + {{if .NeedXtest}}_xtest{{else}}_{{end}} {{.Package.ImportPath | printf "%s_test" | printf "%q"}} {{end}} {{range $i, $p := .Cover}} _cover{{$i}} {{$p.Package.ImportPath | printf "%q"}} @@ -1302,7 +1399,12 @@ func main() { CoveredPackages: {{printf "%q" .Covered}}, }) {{end}} - testing.Main(matchString, tests, benchmarks, examples) + m := testing.MainStart(matchString, tests, benchmarks, examples) +{{with .TestMain}} + {{.Package}}.{{.Name}}(m) +{{else}} + os.Exit(m.Run()) +{{end}} } `)) diff --git a/libgo/go/cmd/go/testdata/generate/test1.go b/libgo/go/cmd/go/testdata/generate/test1.go new file mode 100644 index 00000000000..1f05734f04f --- /dev/null +++ b/libgo/go/cmd/go/testdata/generate/test1.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Simple test for go generate. + +// We include a build tag that go generate should ignore. + +// +build ignore + +//go:generate echo Success + +package p diff --git a/libgo/go/cmd/go/testdata/generate/test2.go b/libgo/go/cmd/go/testdata/generate/test2.go new file mode 100644 index 00000000000..ef1a3d95159 --- /dev/null +++ b/libgo/go/cmd/go/testdata/generate/test2.go @@ -0,0 +1,10 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that go generate handles command aliases. + +//go:generate -command run echo Now is the time +//go:generate run for all good men + +package p diff --git a/libgo/go/cmd/go/testdata/generate/test3.go b/libgo/go/cmd/go/testdata/generate/test3.go new file mode 100644 index 00000000000..41ffb7ea87f --- /dev/null +++ b/libgo/go/cmd/go/testdata/generate/test3.go @@ -0,0 +1,9 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test go generate variable substitution. + +//go:generate echo $GOARCH $GOFILE $GOPACKAGE xyz$GOPACKAGE/$GOFILE/123 + +package p diff --git a/libgo/go/cmd/go/testdata/importcom/bad.go b/libgo/go/cmd/go/testdata/importcom/bad.go new file mode 100644 index 00000000000..e104c2e992b --- /dev/null +++ b/libgo/go/cmd/go/testdata/importcom/bad.go @@ -0,0 +1,3 @@ +package p + +import "bad" diff --git a/libgo/go/cmd/go/testdata/importcom/conflict.go b/libgo/go/cmd/go/testdata/importcom/conflict.go new file mode 100644 index 00000000000..995556c5114 --- /dev/null +++ b/libgo/go/cmd/go/testdata/importcom/conflict.go @@ -0,0 +1,3 @@ +package p + +import "conflict" diff --git a/libgo/go/cmd/go/testdata/importcom/src/bad/bad.go b/libgo/go/cmd/go/testdata/importcom/src/bad/bad.go new file mode 100644 index 00000000000..bc51fd3fdee --- /dev/null +++ b/libgo/go/cmd/go/testdata/importcom/src/bad/bad.go @@ -0,0 +1 @@ +package bad // import diff --git a/libgo/go/cmd/go/testdata/importcom/src/conflict/a.go b/libgo/go/cmd/go/testdata/importcom/src/conflict/a.go new file mode 100644 index 00000000000..2d677035119 --- /dev/null +++ b/libgo/go/cmd/go/testdata/importcom/src/conflict/a.go @@ -0,0 +1 @@ +package conflict // import "a" diff --git a/libgo/go/cmd/go/testdata/importcom/src/conflict/b.go b/libgo/go/cmd/go/testdata/importcom/src/conflict/b.go new file mode 100644 index 00000000000..8fcfb3c8bd3 --- /dev/null +++ b/libgo/go/cmd/go/testdata/importcom/src/conflict/b.go @@ -0,0 +1 @@ +package conflict /* import "b" */ diff --git a/libgo/go/cmd/go/testdata/importcom/src/works/x/x.go b/libgo/go/cmd/go/testdata/importcom/src/works/x/x.go new file mode 100644 index 00000000000..044c6eca803 --- /dev/null +++ b/libgo/go/cmd/go/testdata/importcom/src/works/x/x.go @@ -0,0 +1 @@ +package x // import "works/x" diff --git a/libgo/go/cmd/go/testdata/importcom/src/works/x/x1.go b/libgo/go/cmd/go/testdata/importcom/src/works/x/x1.go new file mode 100644 index 00000000000..2449b29df51 --- /dev/null +++ b/libgo/go/cmd/go/testdata/importcom/src/works/x/x1.go @@ -0,0 +1 @@ +package x // important! not an import comment diff --git a/libgo/go/cmd/go/testdata/importcom/src/wrongplace/x.go b/libgo/go/cmd/go/testdata/importcom/src/wrongplace/x.go new file mode 100644 index 00000000000..b89849da785 --- /dev/null +++ b/libgo/go/cmd/go/testdata/importcom/src/wrongplace/x.go @@ -0,0 +1 @@ +package x // import "my/x" diff --git a/libgo/go/cmd/go/testdata/importcom/works.go b/libgo/go/cmd/go/testdata/importcom/works.go new file mode 100644 index 00000000000..31b55d08a37 --- /dev/null +++ b/libgo/go/cmd/go/testdata/importcom/works.go @@ -0,0 +1,3 @@ +package p + +import _ "works/x" diff --git a/libgo/go/cmd/go/testdata/importcom/wrongplace.go b/libgo/go/cmd/go/testdata/importcom/wrongplace.go new file mode 100644 index 00000000000..e2535e01ae0 --- /dev/null +++ b/libgo/go/cmd/go/testdata/importcom/wrongplace.go @@ -0,0 +1,3 @@ +package p + +import "wrongplace" diff --git a/libgo/go/cmd/go/testdata/norunexample/example_test.go b/libgo/go/cmd/go/testdata/norunexample/example_test.go new file mode 100644 index 00000000000..e158305a6c8 --- /dev/null +++ b/libgo/go/cmd/go/testdata/norunexample/example_test.go @@ -0,0 +1,11 @@ +package pkg_test + +import "os" + +func init() { + os.Stdout.Write([]byte("File with non-runnable example was built.\n")) +} + +func Example_test() { + // This test will not be run, it has no "Output:" comment. +} diff --git a/libgo/go/cmd/go/testdata/norunexample/test_test.go b/libgo/go/cmd/go/testdata/norunexample/test_test.go new file mode 100644 index 00000000000..d2e919838fb --- /dev/null +++ b/libgo/go/cmd/go/testdata/norunexample/test_test.go @@ -0,0 +1,10 @@ +package pkg + +import ( + "os" + "testing" +) + +func TestBuilt(t *testing.T) { + os.Stdout.Write([]byte("A normal test was executed.\n")) +} diff --git a/libgo/go/cmd/go/testdata/src/badc/x.go b/libgo/go/cmd/go/testdata/src/badc/x.go new file mode 100644 index 00000000000..bfa1de28bde --- /dev/null +++ b/libgo/go/cmd/go/testdata/src/badc/x.go @@ -0,0 +1 @@ +package badc diff --git a/libgo/go/cmd/go/testdata/src/badtest/badexec/x_test.go b/libgo/go/cmd/go/testdata/src/badtest/badexec/x_test.go new file mode 100644 index 00000000000..12f50517125 --- /dev/null +++ b/libgo/go/cmd/go/testdata/src/badtest/badexec/x_test.go @@ -0,0 +1,5 @@ +package badexec + +func init() { + panic("badexec") +} diff --git a/libgo/go/cmd/go/testdata/src/badtest/badsyntax/x.go b/libgo/go/cmd/go/testdata/src/badtest/badsyntax/x.go new file mode 100644 index 00000000000..c8a5407a5ac --- /dev/null +++ b/libgo/go/cmd/go/testdata/src/badtest/badsyntax/x.go @@ -0,0 +1 @@ +package badsyntax diff --git a/libgo/go/cmd/go/testdata/src/badtest/badsyntax/x_test.go b/libgo/go/cmd/go/testdata/src/badtest/badsyntax/x_test.go new file mode 100644 index 00000000000..5be10745d9b --- /dev/null +++ b/libgo/go/cmd/go/testdata/src/badtest/badsyntax/x_test.go @@ -0,0 +1,3 @@ +package badsyntax + +func func func func func! diff --git a/libgo/go/cmd/go/testdata/src/badtest/badvar/x.go b/libgo/go/cmd/go/testdata/src/badtest/badvar/x.go new file mode 100644 index 00000000000..fdd46c4c721 --- /dev/null +++ b/libgo/go/cmd/go/testdata/src/badtest/badvar/x.go @@ -0,0 +1 @@ +package badvar diff --git a/libgo/go/cmd/go/testdata/src/badtest/badvar/x_test.go b/libgo/go/cmd/go/testdata/src/badtest/badvar/x_test.go new file mode 100644 index 00000000000..c67df01c5ca --- /dev/null +++ b/libgo/go/cmd/go/testdata/src/badtest/badvar/x_test.go @@ -0,0 +1,5 @@ +package badvar_test + +func f() { + _ = notdefined +} diff --git a/libgo/go/cmd/go/testdata/src/vetpkg/a_test.go b/libgo/go/cmd/go/testdata/src/vetpkg/a_test.go new file mode 100644 index 00000000000..9b64e8e1a26 --- /dev/null +++ b/libgo/go/cmd/go/testdata/src/vetpkg/a_test.go @@ -0,0 +1 @@ +package p_test diff --git a/libgo/go/cmd/go/testdata/src/vetpkg/b.go b/libgo/go/cmd/go/testdata/src/vetpkg/b.go new file mode 100644 index 00000000000..99e18f63dc6 --- /dev/null +++ b/libgo/go/cmd/go/testdata/src/vetpkg/b.go @@ -0,0 +1,7 @@ +package p + +import "fmt" + +func f() { + fmt.Printf("%d") +} diff --git a/libgo/go/cmd/go/testdata/testinternal/p.go b/libgo/go/cmd/go/testdata/testinternal/p.go new file mode 100644 index 00000000000..e3558a53b24 --- /dev/null +++ b/libgo/go/cmd/go/testdata/testinternal/p.go @@ -0,0 +1,3 @@ +package p + +import _ "net/http/internal" diff --git a/libgo/go/cmd/go/testdata/testinternal2/p.go b/libgo/go/cmd/go/testdata/testinternal2/p.go new file mode 100644 index 00000000000..c594f5c5e9e --- /dev/null +++ b/libgo/go/cmd/go/testdata/testinternal2/p.go @@ -0,0 +1,3 @@ +package p + +import _ "./x/y/z/internal/w" diff --git a/libgo/go/cmd/go/testdata/testinternal2/x/y/z/internal/w/w.go b/libgo/go/cmd/go/testdata/testinternal2/x/y/z/internal/w/w.go new file mode 100644 index 00000000000..a796c0b5f4b --- /dev/null +++ b/libgo/go/cmd/go/testdata/testinternal2/x/y/z/internal/w/w.go @@ -0,0 +1 @@ +package w diff --git a/libgo/go/cmd/go/testflag.go b/libgo/go/cmd/go/testflag.go index 73f311e5f69..6da74b99677 100644 --- a/libgo/go/cmd/go/testflag.go +++ b/libgo/go/cmd/go/testflag.go @@ -65,9 +65,9 @@ type testFlagSpec struct { var testFlagDefn = []*testFlagSpec{ // local. {name: "c", boolVar: &testC}, - {name: "file", multiOK: true}, {name: "cover", boolVar: &testCover}, {name: "coverpkg"}, + {name: "o"}, // build flags. {name: "a", boolVar: &buildA}, @@ -153,6 +153,9 @@ func testFlags(args []string) (packageNames, passToTest []string) { // bool flags. case "a", "c", "i", "n", "x", "v", "race", "cover", "work": setBoolFlag(f.boolVar, value) + case "o": + testO = value + testNeedBinary = true case "p": setIntFlag(&buildP, value) case "exec": @@ -184,8 +187,6 @@ func testFlags(args []string) (packageNames, passToTest []string) { buildContext.BuildTags = strings.Fields(value) case "compiler": buildCompiler{}.Set(value) - case "file": - testFiles = append(testFiles, value) case "bench": // record that we saw the flag; don't care about the value testBench = true diff --git a/libgo/go/cmd/go/testgo.go b/libgo/go/cmd/go/testgo.go new file mode 100644 index 00000000000..01923f74bdf --- /dev/null +++ b/libgo/go/cmd/go/testgo.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains extra hooks for testing the go command. +// It is compiled into the Go binary only when building the +// test copy; it does not get compiled into the standard go +// command, so these testing hooks are not present in the +// go command that everyone uses. + +// +build testgo + +package main + +import "os" + +func init() { + if v := os.Getenv("TESTGO_IS_GO_RELEASE"); v != "" { + isGoRelease = v == "1" + } +} diff --git a/libgo/go/cmd/go/tool.go b/libgo/go/cmd/go/tool.go index 6d26f7a4b4a..3f11c3e3d44 100644 --- a/libgo/go/cmd/go/tool.go +++ b/libgo/go/cmd/go/tool.go @@ -47,13 +47,13 @@ const toolWindowsExtension = ".exe" func tool(toolName string) string { toolPath := filepath.Join(toolDir, toolName) - if toolIsWindows && toolName != "pprof" { + if toolIsWindows { toolPath += toolWindowsExtension } // Give a nice message if there is no tool with that name. if _, err := os.Stat(toolPath); err != nil { if isInGoToolsRepo(toolName) { - fmt.Fprintf(os.Stderr, "go tool: no such tool %q; to install:\n\tgo get code.google.com/p/go.tools/cmd/%s\n", toolName, toolName) + fmt.Fprintf(os.Stderr, "go tool: no such tool %q; to install:\n\tgo get golang.org/x/tools/cmd/%s\n", toolName, toolName) } else { fmt.Fprintf(os.Stderr, "go tool: no such tool %q\n", toolName) } @@ -91,16 +91,6 @@ func runTool(cmd *Command, args []string) { if toolPath == "" { return } - if toolIsWindows && toolName == "pprof" { - args = append([]string{"perl", toolPath}, args[1:]...) - var err error - toolPath, err = exec.LookPath("perl") - if err != nil { - fmt.Fprintf(os.Stderr, "go tool: perl not found\n") - setExitStatus(3) - return - } - } if toolN { fmt.Printf("%s %s\n", toolPath, strings.Join(args[1:], " ")) return diff --git a/libgo/go/cmd/go/vcs.go b/libgo/go/cmd/go/vcs.go index 8f0bae0b755..1cac6133889 100644 --- a/libgo/go/cmd/go/vcs.go +++ b/libgo/go/cmd/go/vcs.go @@ -33,6 +33,9 @@ type vcsCmd struct { scheme []string pingCmd string + + remoteRepo func(v *vcsCmd, rootDir string) (remoteRepo string, err error) + resolveRepo func(v *vcsCmd, rootDir, remoteRepo string) (realRepo string, err error) } // A tagCmd describes a command to list available tags @@ -81,8 +84,17 @@ var vcsHg = &vcsCmd{ tagSyncCmd: "update -r {tag}", tagSyncDefault: "update default", - scheme: []string{"https", "http", "ssh"}, - pingCmd: "identify {scheme}://{repo}", + scheme: []string{"https", "http", "ssh"}, + pingCmd: "identify {scheme}://{repo}", + remoteRepo: hgRemoteRepo, +} + +func hgRemoteRepo(vcsHg *vcsCmd, rootDir string) (remoteRepo string, err error) { + out, err := vcsHg.runOutput(rootDir, "paths default") + if err != nil { + return "", err + } + return strings.TrimSpace(string(out)), nil } // vcsGit describes how to use Git. @@ -104,8 +116,38 @@ var vcsGit = &vcsCmd{ tagSyncCmd: "checkout {tag}", tagSyncDefault: "checkout master", - scheme: []string{"git", "https", "http", "git+ssh"}, - pingCmd: "ls-remote {scheme}://{repo}", + scheme: []string{"git", "https", "http", "git+ssh"}, + pingCmd: "ls-remote {scheme}://{repo}", + remoteRepo: gitRemoteRepo, +} + +func gitRemoteRepo(vcsGit *vcsCmd, rootDir string) (remoteRepo string, err error) { + outb, err := vcsGit.runOutput(rootDir, "remote -v") + if err != nil { + return "", err + } + out := string(outb) + + // Expect: + // origin https://github.com/rsc/pdf (fetch) + // origin https://github.com/rsc/pdf (push) + // use first line only. + + if !strings.HasPrefix(out, "origin\t") { + return "", fmt.Errorf("unable to parse output of git remote -v") + } + out = strings.TrimPrefix(out, "origin\t") + i := strings.Index(out, "\n") + if i < 0 { + return "", fmt.Errorf("unable to parse output of git remote -v") + } + out = out[:i] + i = strings.LastIndex(out, " ") + if i < 0 { + return "", fmt.Errorf("unable to parse output of git remote -v") + } + out = out[:i] + return strings.TrimSpace(string(out)), nil } // vcsBzr describes how to use Bazaar. @@ -123,8 +165,51 @@ var vcsBzr = &vcsCmd{ tagSyncCmd: "update -r {tag}", tagSyncDefault: "update -r revno:-1", - scheme: []string{"https", "http", "bzr", "bzr+ssh"}, - pingCmd: "info {scheme}://{repo}", + scheme: []string{"https", "http", "bzr", "bzr+ssh"}, + pingCmd: "info {scheme}://{repo}", + remoteRepo: bzrRemoteRepo, + resolveRepo: bzrResolveRepo, +} + +func bzrRemoteRepo(vcsBzr *vcsCmd, rootDir string) (remoteRepo string, err error) { + outb, err := vcsBzr.runOutput(rootDir, "config parent_location") + if err != nil { + return "", err + } + return strings.TrimSpace(string(outb)), nil +} + +func bzrResolveRepo(vcsBzr *vcsCmd, rootDir, remoteRepo string) (realRepo string, err error) { + outb, err := vcsBzr.runOutput(rootDir, "info "+remoteRepo) + if err != nil { + return "", err + } + out := string(outb) + + // Expect: + // ... + // (branch root|repository branch): <URL> + // ... + + found := false + for _, prefix := range []string{"\n branch root: ", "\n repository branch: "} { + i := strings.Index(out, prefix) + if i >= 0 { + out = out[i+len(prefix):] + found = true + break + } + } + if !found { + return "", fmt.Errorf("unable to parse output of bzr info") + } + + i := strings.Index(out, "\n") + if i < 0 { + return "", fmt.Errorf("unable to parse output of bzr info") + } + out = out[:i] + return strings.TrimSpace(string(out)), nil } // vcsSvn describes how to use Subversion. @@ -138,8 +223,34 @@ var vcsSvn = &vcsCmd{ // There is no tag command in subversion. // The branch information is all in the path names. - scheme: []string{"https", "http", "svn", "svn+ssh"}, - pingCmd: "info {scheme}://{repo}", + scheme: []string{"https", "http", "svn", "svn+ssh"}, + pingCmd: "info {scheme}://{repo}", + remoteRepo: svnRemoteRepo, +} + +func svnRemoteRepo(vcsSvn *vcsCmd, rootDir string) (remoteRepo string, err error) { + outb, err := vcsSvn.runOutput(rootDir, "info") + if err != nil { + return "", err + } + out := string(outb) + + // Expect: + // ... + // Repository Root: <URL> + // ... + + i := strings.Index(out, "\nRepository Root: ") + if i < 0 { + return "", fmt.Errorf("unable to parse output of svn info") + } + out = out[i+len("\nRepository Root: "):] + i = strings.Index(out, "\n") + if i < 0 { + return "", fmt.Errorf("unable to parse output of svn info") + } + out = out[:i] + return strings.TrimSpace(string(out)), nil } func (v *vcsCmd) String() string { @@ -361,7 +472,14 @@ var httpPrefixRE = regexp.MustCompile(`^https?:`) func repoRootForImportPath(importPath string) (*repoRoot, error) { rr, err := repoRootForImportPathStatic(importPath, "") if err == errUnknownSite { - rr, err = repoRootForImportDynamic(importPath) + // If there are wildcards, look up the thing before the wildcard, + // hoping it applies to the wildcarded parts too. + // This makes 'go get rsc.io/pdf/...' work in a fresh GOPATH. + lookup := strings.TrimSuffix(importPath, "/...") + if i := strings.Index(lookup, "/.../"); i >= 0 { + lookup = lookup[:i] + } + rr, err = repoRootForImportDynamic(lookup) // repoRootForImportDynamic returns error detail // that is irrelevant if the user didn't intend to use a @@ -465,11 +583,11 @@ func repoRootForImportPathStatic(importPath, scheme string) (*repoRoot, error) { func repoRootForImportDynamic(importPath string) (*repoRoot, error) { slash := strings.Index(importPath, "/") if slash < 0 { - return nil, errors.New("import path doesn't contain a slash") + return nil, errors.New("import path does not contain a slash") } host := importPath[:slash] if !strings.Contains(host, ".") { - return nil, errors.New("import path doesn't contain a hostname") + return nil, errors.New("import path does not begin with hostname") } urlStr, body, err := httpsOrHTTP(importPath) if err != nil { @@ -613,6 +731,15 @@ var vcsPaths = []*vcsPath{ check: launchpadVCS, }, + // IBM DevOps Services (JazzHub) + { + prefix: "hub.jazz.net/git", + re: `^(?P<root>hub.jazz.net/git/[a-z0-9]+/[A-Za-z0-9_.\-]+)(/[A-Za-z0-9_.\-]+)*$`, + vcs: "git", + repo: "https://{root}", + check: noVCSSuffix, + }, + // General syntax for any server. { re: `^(?P<root>(?P<repo>([a-z0-9.\-]+\.)+[a-z0-9.\-]+(:[0-9]+)?/[A-Za-z0-9_.\-/]*?)\.(?P<vcs>bzr|git|hg|svn))(/[A-Za-z0-9_.\-]+)*$`, diff --git a/libgo/go/cmd/go/vcs_test.go b/libgo/go/cmd/go/vcs_test.go new file mode 100644 index 00000000000..14d681ba6af --- /dev/null +++ b/libgo/go/cmd/go/vcs_test.go @@ -0,0 +1,124 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "runtime" + "testing" +) + +// Test that RepoRootForImportPath creates the correct RepoRoot for a given importPath. +// TODO(cmang): Add tests for SVN and BZR. +func TestRepoRootForImportPath(t *testing.T) { + if testing.Short() { + t.Skip("skipping test to avoid external network") + } + switch runtime.GOOS { + case "nacl", "android": + t.Skipf("no networking available on %s", runtime.GOOS) + } + tests := []struct { + path string + want *repoRoot + }{ + { + "code.google.com/p/go", + &repoRoot{ + vcs: vcsHg, + repo: "https://code.google.com/p/go", + }, + }, + /*{ + "code.google.com/r/go", + &repoRoot{ + vcs: vcsHg, + repo: "https://code.google.com/r/go", + }, + },*/ + { + "github.com/golang/groupcache", + &repoRoot{ + vcs: vcsGit, + repo: "https://github.com/golang/groupcache", + }, + }, + // IBM DevOps Services tests + { + "hub.jazz.net/git/user1/pkgname", + &repoRoot{ + vcs: vcsGit, + repo: "https://hub.jazz.net/git/user1/pkgname", + }, + }, + { + "hub.jazz.net/git/user1/pkgname/submodule/submodule/submodule", + &repoRoot{ + vcs: vcsGit, + repo: "https://hub.jazz.net/git/user1/pkgname", + }, + }, + { + "hub.jazz.net", + nil, + }, + { + "hub2.jazz.net", + nil, + }, + { + "hub.jazz.net/someotherprefix", + nil, + }, + { + "hub.jazz.net/someotherprefix/user1/pkgname", + nil, + }, + // Spaces are not valid in user names or package names + { + "hub.jazz.net/git/User 1/pkgname", + nil, + }, + { + "hub.jazz.net/git/user1/pkg name", + nil, + }, + // Dots are not valid in user names + { + "hub.jazz.net/git/user.1/pkgname", + nil, + }, + { + "hub.jazz.net/git/user/pkg.name", + &repoRoot{ + vcs: vcsGit, + repo: "https://hub.jazz.net/git/user/pkg.name", + }, + }, + // User names cannot have uppercase letters + { + "hub.jazz.net/git/USER/pkgname", + nil, + }, + } + + for _, test := range tests { + got, err := repoRootForImportPath(test.path) + want := test.want + + if want == nil { + if err == nil { + t.Errorf("RepoRootForImport(%q): Error expected but not received", test.path) + } + continue + } + if err != nil { + t.Errorf("RepoRootForImport(%q): %v", test.path, err) + continue + } + if got.vcs.name != want.vcs.name || got.repo != want.repo { + t.Errorf("RepoRootForImport(%q) = VCS(%s) Repo(%s), want VCS(%s) Repo(%s)", test.path, got.vcs, got.repo, want.vcs, want.repo) + } + } +} diff --git a/libgo/go/cmd/go/vet.go b/libgo/go/cmd/go/vet.go index ffb4318373b..02ff54b2ac8 100644 --- a/libgo/go/cmd/go/vet.go +++ b/libgo/go/cmd/go/vet.go @@ -4,6 +4,8 @@ package main +import "path/filepath" + func init() { addBuildFlagsNX(cmdVet) } @@ -15,7 +17,7 @@ var cmdVet = &Command{ Long: ` Vet runs the Go vet command on the packages named by the import paths. -For more about vet, see 'godoc code.google.com/p/go.tools/cmd/vet'. +For more about vet, see 'godoc golang.org/x/tools/cmd/vet'. For more about specifying packages, see 'go help packages'. To run the vet tool with specific options, run 'go tool vet'. @@ -28,10 +30,21 @@ See also: go fmt, go fix. } func runVet(cmd *Command, args []string) { - for _, pkg := range packages(args) { - // Use pkg.gofiles instead of pkg.Dir so that - // the command only applies to this package, - // not to packages in subdirectories. - run(tool("vet"), relPaths(stringList(pkg.gofiles, pkg.sfiles))) + for _, p := range packages(args) { + // Vet expects to be given a set of files all from the same package. + // Run once for package p and once for package p_test. + if len(p.GoFiles)+len(p.CgoFiles)+len(p.TestGoFiles) > 0 { + runVetFiles(p, stringList(p.GoFiles, p.CgoFiles, p.TestGoFiles, p.SFiles)) + } + if len(p.XTestGoFiles) > 0 { + runVetFiles(p, stringList(p.XTestGoFiles)) + } + } +} + +func runVetFiles(p *Package, files []string) { + for i := range files { + files[i] = filepath.Join(p.Dir, files[i]) } + run(tool("vet"), relPaths(files)) } diff --git a/libgo/go/cmd/gofmt/doc.go b/libgo/go/cmd/gofmt/doc.go index 8f73ef5b9dd..3fc0439548f 100644 --- a/libgo/go/cmd/gofmt/doc.go +++ b/libgo/go/cmd/gofmt/doc.go @@ -67,7 +67,7 @@ To remove the parentheses: To convert the package tree from explicit slice upper bounds to implicit ones: - gofmt -r 'α[β:len(α)] -> α[β:]' -w $GOROOT/src/pkg + gofmt -r 'α[β:len(α)] -> α[β:]' -w $GOROOT/src The simplify command diff --git a/libgo/go/cmd/gofmt/gofmt.go b/libgo/go/cmd/gofmt/gofmt.go index 576cae5228e..81da21ff109 100644 --- a/libgo/go/cmd/gofmt/gofmt.go +++ b/libgo/go/cmd/gofmt/gofmt.go @@ -87,13 +87,13 @@ func processFile(filename string, in io.Reader, out io.Writer, stdin bool) error return err } - file, adjust, err := parse(fileSet, filename, src, stdin) + file, sourceAdj, indentAdj, err := parse(fileSet, filename, src, stdin) if err != nil { return err } if rewrite != nil { - if adjust == nil { + if sourceAdj == nil { file = rewrite(file) } else { fmt.Fprintf(os.Stderr, "warning: rewrite ignored for incomplete programs\n") @@ -106,15 +106,10 @@ func processFile(filename string, in io.Reader, out io.Writer, stdin bool) error simplify(file) } - var buf bytes.Buffer - err = (&printer.Config{Mode: printerMode, Tabwidth: tabWidth}).Fprint(&buf, fileSet, file) + res, err := format(fileSet, file, sourceAdj, indentAdj, src, printer.Config{Mode: printerMode, Tabwidth: tabWidth}) if err != nil { return err } - res := buf.Bytes() - if adjust != nil { - res = adjust(src, res) - } if !bytes.Equal(src, res) { // formatting has changed @@ -122,7 +117,7 @@ func processFile(filename string, in io.Reader, out io.Writer, stdin bool) error fmt.Fprintln(out, filename) } if *write { - err = ioutil.WriteFile(filename, res, 0) + err = ioutil.WriteFile(filename, res, 0644) if err != nil { return err } @@ -186,6 +181,11 @@ func gofmtMain() { initRewrite() if flag.NArg() == 0 { + if *write { + fmt.Fprintln(os.Stderr, "error: cannot use -w with standard input") + exitCode = 2 + return + } if err := processFile("<standard input>", os.Stdin, os.Stdout, true); err != nil { report(err) } @@ -235,19 +235,29 @@ func diff(b1, b2 []byte) (data []byte, err error) { } -// parse parses src, which was read from filename, -// as a Go source file or statement list. -func parse(fset *token.FileSet, filename string, src []byte, stdin bool) (*ast.File, func(orig, src []byte) []byte, error) { +// ---------------------------------------------------------------------------- +// Support functions +// +// The functions parse, format, and isSpace below are identical to the +// respective functions in src/go/format/format.go - keep them in sync! +// +// TODO(gri) Factor out this functionality, eventually. + +// parse parses src, which was read from the named file, +// as a Go source file, declaration, or statement list. +func parse(fset *token.FileSet, filename string, src []byte, fragmentOk bool) ( + file *ast.File, + sourceAdj func(src []byte, indent int) []byte, + indentAdj int, + err error, +) { // Try as whole source file. - file, err := parser.ParseFile(fset, filename, src, parserMode) - if err == nil { - return file, nil, nil - } - // If the error is that the source file didn't begin with a - // package line and this is standard input, fall through to + file, err = parser.ParseFile(fset, filename, src, parserMode) + // If there's no error, return. If the error is that the source file didn't begin with a + // package line and source fragments are ok, fall through to // try as a source fragment. Stop and return on any other error. - if !stdin || !strings.Contains(err.Error(), "expected 'package'") { - return nil, nil, err + if err == nil || !fragmentOk || !strings.Contains(err.Error(), "expected 'package'") { + return } // If this is a declaration list, make it a source file @@ -257,19 +267,19 @@ func parse(fset *token.FileSet, filename string, src []byte, stdin bool) (*ast.F psrc := append([]byte("package p;"), src...) file, err = parser.ParseFile(fset, filename, psrc, parserMode) if err == nil { - adjust := func(orig, src []byte) []byte { + sourceAdj = func(src []byte, indent int) []byte { // Remove the package clause. // Gofmt has turned the ; into a \n. - src = src[len("package p\n"):] - return matchSpace(orig, src) + src = src[indent+len("package p\n"):] + return bytes.TrimSpace(src) } - return file, adjust, nil + return } // If the error is that the source file didn't begin with a // declaration, fall through to try as a statement list. // Stop and return on any other error. if !strings.Contains(err.Error(), "expected declaration") { - return nil, nil, err + return } // If this is a statement list, make it a source file @@ -277,68 +287,101 @@ func parse(fset *token.FileSet, filename string, src []byte, stdin bool) (*ast.F // into a function body. This handles expressions too. // Insert using a ;, not a newline, so that the line numbers // in fsrc match the ones in src. - fsrc := append(append([]byte("package p; func _() {"), src...), '}') + fsrc := append(append([]byte("package p; func _() {"), src...), '\n', '}') file, err = parser.ParseFile(fset, filename, fsrc, parserMode) if err == nil { - adjust := func(orig, src []byte) []byte { + sourceAdj = func(src []byte, indent int) []byte { + // Cap adjusted indent to zero. + if indent < 0 { + indent = 0 + } // Remove the wrapping. // Gofmt has turned the ; into a \n\n. - src = src[len("package p\n\nfunc _() {"):] - src = src[:len(src)-len("}\n")] - // Gofmt has also indented the function body one level. - // Remove that indent. - src = bytes.Replace(src, []byte("\n\t"), []byte("\n"), -1) - return matchSpace(orig, src) + // There will be two non-blank lines with indent, hence 2*indent. + src = src[2*indent+len("package p\n\nfunc _() {"):] + src = src[:len(src)-(indent+len("\n}\n"))] + return bytes.TrimSpace(src) } - return file, adjust, nil + // Gofmt has also indented the function body one level. + // Adjust that with indentAdj. + indentAdj = -1 } - // Failed, and out of options. - return nil, nil, err + // Succeeded, or out of options. + return } -func cutSpace(b []byte) (before, middle, after []byte) { - i := 0 - for i < len(b) && (b[i] == ' ' || b[i] == '\t' || b[i] == '\n') { - i++ - } - j := len(b) - for j > 0 && (b[j-1] == ' ' || b[j-1] == '\t' || b[j-1] == '\n') { - j-- - } - if i <= j { - return b[:i], b[i:j], b[j:] +// format formats the given package file originally obtained from src +// and adjusts the result based on the original source via sourceAdj +// and indentAdj. +func format( + fset *token.FileSet, + file *ast.File, + sourceAdj func(src []byte, indent int) []byte, + indentAdj int, + src []byte, + cfg printer.Config, +) ([]byte, error) { + if sourceAdj == nil { + // Complete source file. + var buf bytes.Buffer + err := cfg.Fprint(&buf, fset, file) + if err != nil { + return nil, err + } + return buf.Bytes(), nil } - return nil, nil, b[j:] -} -// matchSpace reformats src to use the same space context as orig. -// 1) If orig begins with blank lines, matchSpace inserts them at the beginning of src. -// 2) matchSpace copies the indentation of the first non-blank line in orig -// to every non-blank line in src. -// 3) matchSpace copies the trailing space from orig and uses it in place -// of src's trailing space. -func matchSpace(orig []byte, src []byte) []byte { - before, _, after := cutSpace(orig) - i := bytes.LastIndex(before, []byte{'\n'}) - before, indent := before[:i+1], before[i+1:] - - _, src, _ = cutSpace(src) - - var b bytes.Buffer - b.Write(before) - for len(src) > 0 { - line := src - if i := bytes.IndexByte(line, '\n'); i >= 0 { - line, src = line[:i+1], line[i+1:] - } else { - src = nil + // Partial source file. + // Determine and prepend leading space. + i, j := 0, 0 + for j < len(src) && isSpace(src[j]) { + if src[j] == '\n' { + i = j + 1 // byte offset of last line in leading space } - if len(line) > 0 && line[0] != '\n' { // not blank - b.Write(indent) + j++ + } + var res []byte + res = append(res, src[:i]...) + + // Determine and prepend indentation of first code line. + // Spaces are ignored unless there are no tabs, + // in which case spaces count as one tab. + indent := 0 + hasSpace := false + for _, b := range src[i:j] { + switch b { + case ' ': + hasSpace = true + case '\t': + indent++ } - b.Write(line) } - b.Write(after) - return b.Bytes() + if indent == 0 && hasSpace { + indent = 1 + } + for i := 0; i < indent; i++ { + res = append(res, '\t') + } + + // Format the source. + // Write it without any leading and trailing space. + cfg.Indent = indent + indentAdj + var buf bytes.Buffer + err := cfg.Fprint(&buf, fset, file) + if err != nil { + return nil, err + } + res = append(res, sourceAdj(buf.Bytes(), cfg.Indent)...) + + // Determine and append trailing space. + i = len(src) + for i > 0 && isSpace(src[i-1]) { + i-- + } + return append(res, src[i:]...), nil +} + +func isSpace(b byte) bool { + return b == ' ' || b == '\t' || b == '\n' || b == '\r' } diff --git a/libgo/go/cmd/gofmt/gofmt_test.go b/libgo/go/cmd/gofmt/gofmt_test.go index b9335b8f3db..d1edb7bcc16 100644 --- a/libgo/go/cmd/gofmt/gofmt_test.go +++ b/libgo/go/cmd/gofmt/gofmt_test.go @@ -6,18 +6,60 @@ package main import ( "bytes" + "flag" "io/ioutil" + "os" "path/filepath" "strings" "testing" + "text/scanner" ) -func runTest(t *testing.T, in, out, flags string) { +var update = flag.Bool("update", false, "update .golden files") + +// gofmtFlags looks for a comment of the form +// +// //gofmt flags +// +// within the first maxLines lines of the given file, +// and returns the flags string, if any. Otherwise it +// returns the empty string. +func gofmtFlags(filename string, maxLines int) string { + f, err := os.Open(filename) + if err != nil { + return "" // ignore errors - they will be found later + } + defer f.Close() + + // initialize scanner + var s scanner.Scanner + s.Init(f) + s.Error = func(*scanner.Scanner, string) {} // ignore errors + s.Mode = scanner.GoTokens &^ scanner.SkipComments // want comments + + // look for //gofmt comment + for s.Line <= maxLines { + switch s.Scan() { + case scanner.Comment: + const prefix = "//gofmt " + if t := s.TokenText(); strings.HasPrefix(t, prefix) { + return strings.TrimSpace(t[len(prefix):]) + } + case scanner.EOF: + return "" + } + + } + + return "" +} + +func runTest(t *testing.T, in, out string) { // process flags *simplifyAST = false *rewriteRule = "" stdin := false - for _, flag := range strings.Split(flags, " ") { + for _, flag := range strings.Split(gofmtFlags(in, 20), " ") { elts := strings.SplitN(flag, "=", 2) name := elts[0] value := "" @@ -56,6 +98,17 @@ func runTest(t *testing.T, in, out, flags string) { } if got := buf.Bytes(); !bytes.Equal(got, expected) { + if *update { + if in != out { + if err := ioutil.WriteFile(out, got, 0666); err != nil { + t.Error(err) + } + return + } + // in == out: don't accidentally destroy input + t.Errorf("WARNING: -update did not rewrite input file %s", in) + } + t.Errorf("(gofmt %s) != %s (see %s.gofmt)", in, out, in) d, err := diff(expected, got) if err == nil { @@ -67,51 +120,37 @@ func runTest(t *testing.T, in, out, flags string) { } } -var tests = []struct { - in, flags string -}{ - {"gofmt.go", ""}, - {"gofmt_test.go", ""}, - {"testdata/composites.input", "-s"}, - {"testdata/slices1.input", "-s"}, - {"testdata/slices2.input", "-s"}, - {"testdata/old.input", ""}, - {"testdata/rewrite1.input", "-r=Foo->Bar"}, - {"testdata/rewrite2.input", "-r=int->bool"}, - {"testdata/rewrite3.input", "-r=x->x"}, - {"testdata/rewrite4.input", "-r=(x)->x"}, - {"testdata/rewrite5.input", "-r=x+x->2*x"}, - {"testdata/rewrite6.input", "-r=fun(x)->Fun(x)"}, - {"testdata/rewrite7.input", "-r=fun(x...)->Fun(x)"}, - {"testdata/rewrite8.input", "-r=interface{}->int"}, - {"testdata/stdin*.input", "-stdin"}, - {"testdata/comments.input", ""}, - {"testdata/import.input", ""}, - {"testdata/crlf.input", ""}, // test case for issue 3961; see also TestCRLF - {"testdata/typeswitch.input", ""}, // test case for issue 4470 -} - +// TestRewrite processes testdata/*.input files and compares them to the +// corresponding testdata/*.golden files. The gofmt flags used to process +// a file must be provided via a comment of the form +// +// //gofmt flags +// +// in the processed file within the first 20 lines, if any. func TestRewrite(t *testing.T) { - for _, test := range tests { - match, err := filepath.Glob(test.in) - if err != nil { - t.Error(err) - continue + // determine input files + match, err := filepath.Glob("testdata/*.input") + if err != nil { + t.Fatal(err) + } + + // add larger examples + match = append(match, "gofmt.go", "gofmt_test.go") + + for _, in := range match { + out := in // for files where input and output are identical + if strings.HasSuffix(in, ".input") { + out = in[:len(in)-len(".input")] + ".golden" } - for _, in := range match { - out := in - if strings.HasSuffix(in, ".input") { - out = in[:len(in)-len(".input")] + ".golden" - } - runTest(t, in, out, test.flags) - if in != out { - // Check idempotence. - runTest(t, out, out, test.flags) - } + runTest(t, in, out) + if in != out { + // Check idempotence. + runTest(t, out, out) } } } +// Test case for issue 3961. func TestCRLF(t *testing.T) { const input = "testdata/crlf.input" // must contain CR/LF's const golden = "testdata/crlf.golden" // must not contain any CR's diff --git a/libgo/go/cmd/gofmt/long_test.go b/libgo/go/cmd/gofmt/long_test.go index 108278b3369..237b86021bf 100644 --- a/libgo/go/cmd/gofmt/long_test.go +++ b/libgo/go/cmd/gofmt/long_test.go @@ -32,7 +32,7 @@ var ( ) func gofmt(fset *token.FileSet, filename string, src *bytes.Buffer) error { - f, _, err := parse(fset, filename, src.Bytes(), false) + f, _, _, err := parse(fset, filename, src.Bytes(), false) if err != nil { return err } @@ -60,7 +60,7 @@ func testFile(t *testing.T, b1, b2 *bytes.Buffer, filename string) { // exclude files w/ syntax errors (typically test cases) fset := token.NewFileSet() - if _, _, err = parse(fset, filename, b1.Bytes(), false); err != nil { + if _, _, _, err = parse(fset, filename, b1.Bytes(), false); err != nil { if *verbose { fmt.Fprintf(os.Stderr, "ignoring %s\n", err) } diff --git a/libgo/go/cmd/gofmt/rewrite.go b/libgo/go/cmd/gofmt/rewrite.go index fb6c6fc811a..d267cfcc1dc 100644 --- a/libgo/go/cmd/gofmt/rewrite.go +++ b/libgo/go/cmd/gofmt/rewrite.go @@ -226,9 +226,6 @@ func match(m map[string]reflect.Value, pattern, val reflect.Value) bool { return true case reflect.Struct: - if p.NumField() != v.NumField() { - return false - } for i := 0; i < p.NumField(); i++ { if !match(m, p.Field(i), v.Field(i)) { return false diff --git a/libgo/go/cmd/gofmt/simplify.go b/libgo/go/cmd/gofmt/simplify.go index 45d000d675e..69f7bf23c0b 100644 --- a/libgo/go/cmd/gofmt/simplify.go +++ b/libgo/go/cmd/gofmt/simplify.go @@ -68,9 +68,10 @@ func (s *simplifier) Visit(node ast.Node) ast.Visitor { // a slice expression of the form: s[a:len(s)] // can be simplified to: s[a:] // if s is "simple enough" (for now we only accept identifiers) - if s.hasDotImport { - // if dot imports are present, we cannot be certain that an - // unresolved "len" identifier refers to the predefined len() + if n.Max != nil || s.hasDotImport { + // - 3-index slices always require the 2nd and 3rd index + // - if dot imports are present, we cannot be certain that an + // unresolved "len" identifier refers to the predefined len() break } if s, _ := n.X.(*ast.Ident); s != nil && s.Obj != nil { @@ -96,16 +97,26 @@ func (s *simplifier) Visit(node ast.Node) ast.Visitor { // x, y := b[:n], b[n:] case *ast.RangeStmt: - // a range of the form: for x, _ = range v {...} + // - a range of the form: for x, _ = range v {...} // can be simplified to: for x = range v {...} - if ident, _ := n.Value.(*ast.Ident); ident != nil && ident.Name == "_" { + // - a range of the form: for _ = range v {...} + // can be simplified to: for range v {...} + if isBlank(n.Value) { n.Value = nil } + if isBlank(n.Key) && n.Value == nil { + n.Key = nil + } } return s } +func isBlank(x ast.Expr) bool { + ident, ok := x.(*ast.Ident) + return ok && ident.Name == "_" +} + func simplify(f *ast.File) { var s simplifier @@ -117,5 +128,34 @@ func simplify(f *ast.File) { } } + // remove empty declarations such as "const ()", etc + removeEmptyDeclGroups(f) + ast.Walk(&s, f) } + +func removeEmptyDeclGroups(f *ast.File) { + i := 0 + for _, d := range f.Decls { + if g, ok := d.(*ast.GenDecl); !ok || !isEmpty(f, g) { + f.Decls[i] = d + i++ + } + } + f.Decls = f.Decls[:i] +} + +func isEmpty(f *ast.File, g *ast.GenDecl) bool { + if g.Doc != nil || g.Specs != nil { + return false + } + + for _, c := range f.Comments { + // if there is a comment in the declaration, it is not considered empty + if g.Pos() <= c.Pos() && c.End() <= g.End() { + return false + } + } + + return true +} diff --git a/libgo/go/cmd/gofmt/testdata/composites.golden b/libgo/go/cmd/gofmt/testdata/composites.golden index b2825e732aa..fc9c98e625b 100644 --- a/libgo/go/cmd/gofmt/testdata/composites.golden +++ b/libgo/go/cmd/gofmt/testdata/composites.golden @@ -1,3 +1,5 @@ +//gofmt -s + package P type T struct { diff --git a/libgo/go/cmd/gofmt/testdata/composites.input b/libgo/go/cmd/gofmt/testdata/composites.input index 7210dafc96c..fc7598af99e 100644 --- a/libgo/go/cmd/gofmt/testdata/composites.input +++ b/libgo/go/cmd/gofmt/testdata/composites.input @@ -1,3 +1,5 @@ +//gofmt -s + package P type T struct { diff --git a/libgo/go/cmd/gofmt/testdata/crlf.golden b/libgo/go/cmd/gofmt/testdata/crlf.golden index 57679f770fe..193dbacc727 100644 --- a/libgo/go/cmd/gofmt/testdata/crlf.golden +++ b/libgo/go/cmd/gofmt/testdata/crlf.golden @@ -2,6 +2,7 @@ Source containing CR/LF line endings. The gofmt'ed output must only have LF line endings. + Test case for issue 3961. */ package main diff --git a/libgo/go/cmd/gofmt/testdata/crlf.input b/libgo/go/cmd/gofmt/testdata/crlf.input index 61a1aa0b4ee..ae7e14dbf13 100644 --- a/libgo/go/cmd/gofmt/testdata/crlf.input +++ b/libgo/go/cmd/gofmt/testdata/crlf.input @@ -2,6 +2,7 @@ Source containing CR/LF line endings.
The gofmt'ed output must only have LF
line endings.
+ Test case for issue 3961.
*/
package main
diff --git a/libgo/go/cmd/gofmt/testdata/rewrite1.golden b/libgo/go/cmd/gofmt/testdata/rewrite1.golden index d9beb370582..3ee5373a790 100644 --- a/libgo/go/cmd/gofmt/testdata/rewrite1.golden +++ b/libgo/go/cmd/gofmt/testdata/rewrite1.golden @@ -1,3 +1,5 @@ +//gofmt -r=Foo->Bar + // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/libgo/go/cmd/gofmt/testdata/rewrite1.input b/libgo/go/cmd/gofmt/testdata/rewrite1.input index bdb894320d3..a84c8f78165 100644 --- a/libgo/go/cmd/gofmt/testdata/rewrite1.input +++ b/libgo/go/cmd/gofmt/testdata/rewrite1.input @@ -1,3 +1,5 @@ +//gofmt -r=Foo->Bar + // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/libgo/go/cmd/gofmt/testdata/rewrite2.golden b/libgo/go/cmd/gofmt/testdata/rewrite2.golden index 64c67ffa67b..f980e035309 100644 --- a/libgo/go/cmd/gofmt/testdata/rewrite2.golden +++ b/libgo/go/cmd/gofmt/testdata/rewrite2.golden @@ -1,3 +1,5 @@ +//gofmt -r=int->bool + // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/libgo/go/cmd/gofmt/testdata/rewrite2.input b/libgo/go/cmd/gofmt/testdata/rewrite2.input index 21171447a10..489be4e07dc 100644 --- a/libgo/go/cmd/gofmt/testdata/rewrite2.input +++ b/libgo/go/cmd/gofmt/testdata/rewrite2.input @@ -1,3 +1,5 @@ +//gofmt -r=int->bool + // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/libgo/go/cmd/gofmt/testdata/rewrite3.golden b/libgo/go/cmd/gofmt/testdata/rewrite3.golden index 0d16d16011b..261a220c65d 100644 --- a/libgo/go/cmd/gofmt/testdata/rewrite3.golden +++ b/libgo/go/cmd/gofmt/testdata/rewrite3.golden @@ -1,3 +1,5 @@ +//gofmt -r=x->x + // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/libgo/go/cmd/gofmt/testdata/rewrite3.input b/libgo/go/cmd/gofmt/testdata/rewrite3.input index 0d16d16011b..261a220c65d 100644 --- a/libgo/go/cmd/gofmt/testdata/rewrite3.input +++ b/libgo/go/cmd/gofmt/testdata/rewrite3.input @@ -1,3 +1,5 @@ +//gofmt -r=x->x + // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/libgo/go/cmd/gofmt/testdata/rewrite4.golden b/libgo/go/cmd/gofmt/testdata/rewrite4.golden index 8dfc81a0746..b05547b4bf0 100644 --- a/libgo/go/cmd/gofmt/testdata/rewrite4.golden +++ b/libgo/go/cmd/gofmt/testdata/rewrite4.golden @@ -1,3 +1,5 @@ +//gofmt -r=(x)->x + // Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/libgo/go/cmd/gofmt/testdata/rewrite4.input b/libgo/go/cmd/gofmt/testdata/rewrite4.input index 164cc0451f3..0817099209c 100644 --- a/libgo/go/cmd/gofmt/testdata/rewrite4.input +++ b/libgo/go/cmd/gofmt/testdata/rewrite4.input @@ -1,3 +1,5 @@ +//gofmt -r=(x)->x + // Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/libgo/go/cmd/gofmt/testdata/rewrite5.golden b/libgo/go/cmd/gofmt/testdata/rewrite5.golden index 5a448a63d37..9beb34aee76 100644 --- a/libgo/go/cmd/gofmt/testdata/rewrite5.golden +++ b/libgo/go/cmd/gofmt/testdata/rewrite5.golden @@ -1,3 +1,5 @@ +//gofmt -r=x+x->2*x + // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/libgo/go/cmd/gofmt/testdata/rewrite5.input b/libgo/go/cmd/gofmt/testdata/rewrite5.input index 0d759e69b6d..d7a6122d07a 100644 --- a/libgo/go/cmd/gofmt/testdata/rewrite5.input +++ b/libgo/go/cmd/gofmt/testdata/rewrite5.input @@ -1,3 +1,5 @@ +//gofmt -r=x+x->2*x + // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/libgo/go/cmd/gofmt/testdata/rewrite6.golden b/libgo/go/cmd/gofmt/testdata/rewrite6.golden index e565dbdd97b..48ec9aa0df7 100644 --- a/libgo/go/cmd/gofmt/testdata/rewrite6.golden +++ b/libgo/go/cmd/gofmt/testdata/rewrite6.golden @@ -1,3 +1,5 @@ +//gofmt -r=fun(x)->Fun(x) + // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/libgo/go/cmd/gofmt/testdata/rewrite6.input b/libgo/go/cmd/gofmt/testdata/rewrite6.input index 8c088b3e878..b085a84fef4 100644 --- a/libgo/go/cmd/gofmt/testdata/rewrite6.input +++ b/libgo/go/cmd/gofmt/testdata/rewrite6.input @@ -1,3 +1,5 @@ +//gofmt -r=fun(x)->Fun(x) + // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/libgo/go/cmd/gofmt/testdata/rewrite7.golden b/libgo/go/cmd/gofmt/testdata/rewrite7.golden index 29babad9f94..8386a0b2a3e 100644 --- a/libgo/go/cmd/gofmt/testdata/rewrite7.golden +++ b/libgo/go/cmd/gofmt/testdata/rewrite7.golden @@ -1,3 +1,5 @@ +//gofmt -r=fun(x...)->Fun(x) + // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/libgo/go/cmd/gofmt/testdata/rewrite7.input b/libgo/go/cmd/gofmt/testdata/rewrite7.input index 073e2a3e6f8..c1984708e71 100644 --- a/libgo/go/cmd/gofmt/testdata/rewrite7.input +++ b/libgo/go/cmd/gofmt/testdata/rewrite7.input @@ -1,3 +1,5 @@ +//gofmt -r=fun(x...)->Fun(x) + // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/libgo/go/cmd/gofmt/testdata/rewrite8.golden b/libgo/go/cmd/gofmt/testdata/rewrite8.golden index cfc452b0310..62f0419dfb4 100644 --- a/libgo/go/cmd/gofmt/testdata/rewrite8.golden +++ b/libgo/go/cmd/gofmt/testdata/rewrite8.golden @@ -1,3 +1,5 @@ +//gofmt -r=interface{}->int + // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/libgo/go/cmd/gofmt/testdata/rewrite8.input b/libgo/go/cmd/gofmt/testdata/rewrite8.input index 235efa91cc6..7964c5c75c7 100644 --- a/libgo/go/cmd/gofmt/testdata/rewrite8.input +++ b/libgo/go/cmd/gofmt/testdata/rewrite8.input @@ -1,3 +1,5 @@ +//gofmt -r=interface{}->int + // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/libgo/go/cmd/gofmt/testdata/slices1.golden b/libgo/go/cmd/gofmt/testdata/slices1.golden index 61e074f68a8..04bc16f2160 100644 --- a/libgo/go/cmd/gofmt/testdata/slices1.golden +++ b/libgo/go/cmd/gofmt/testdata/slices1.golden @@ -1,3 +1,5 @@ +//gofmt -s + // Test cases for slice expression simplification. package p @@ -15,6 +17,7 @@ var ( _ = a[3:(len(a))] _ = a[len(a) : len(a)-1] _ = a[0:len(b)] + _ = a[2:len(a):len(a)] _ = a[:] _ = a[:10] @@ -22,6 +25,7 @@ var ( _ = a[:(len(a))] _ = a[:len(a)-1] _ = a[:len(b)] + _ = a[:len(a):len(a)] _ = s[0:] _ = s[1:10] @@ -29,6 +33,7 @@ var ( _ = s[3:(len(s))] _ = s[len(a) : len(s)-1] _ = s[0:len(b)] + _ = s[2:len(s):len(s)] _ = s[:] _ = s[:10] @@ -36,6 +41,7 @@ var ( _ = s[:(len(s))] _ = s[:len(s)-1] _ = s[:len(b)] + _ = s[:len(s):len(s)] _ = t.s[0:] _ = t.s[1:10] @@ -43,6 +49,7 @@ var ( _ = t.s[3:(len(t.s))] _ = t.s[len(a) : len(t.s)-1] _ = t.s[0:len(b)] + _ = t.s[2:len(t.s):len(t.s)] _ = t.s[:] _ = t.s[:10] @@ -50,6 +57,7 @@ var ( _ = t.s[:(len(t.s))] _ = t.s[:len(t.s)-1] _ = t.s[:len(b)] + _ = t.s[:len(t.s):len(t.s)] ) func _() { diff --git a/libgo/go/cmd/gofmt/testdata/slices1.input b/libgo/go/cmd/gofmt/testdata/slices1.input index 4d2cbfff400..1f25c43ccbc 100644 --- a/libgo/go/cmd/gofmt/testdata/slices1.input +++ b/libgo/go/cmd/gofmt/testdata/slices1.input @@ -1,3 +1,5 @@ +//gofmt -s + // Test cases for slice expression simplification. package p @@ -15,6 +17,7 @@ var ( _ = a[3:(len(a))] _ = a[len(a) : len(a)-1] _ = a[0:len(b)] + _ = a[2:len(a):len(a)] _ = a[:] _ = a[:10] @@ -22,6 +25,7 @@ var ( _ = a[:(len(a))] _ = a[:len(a)-1] _ = a[:len(b)] + _ = a[:len(a):len(a)] _ = s[0:] _ = s[1:10] @@ -29,6 +33,7 @@ var ( _ = s[3:(len(s))] _ = s[len(a) : len(s)-1] _ = s[0:len(b)] + _ = s[2:len(s):len(s)] _ = s[:] _ = s[:10] @@ -36,6 +41,7 @@ var ( _ = s[:(len(s))] _ = s[:len(s)-1] _ = s[:len(b)] + _ = s[:len(s):len(s)] _ = t.s[0:] _ = t.s[1:10] @@ -43,6 +49,7 @@ var ( _ = t.s[3:(len(t.s))] _ = t.s[len(a) : len(t.s)-1] _ = t.s[0:len(b)] + _ = t.s[2:len(t.s):len(t.s)] _ = t.s[:] _ = t.s[:10] @@ -50,6 +57,7 @@ var ( _ = t.s[:(len(t.s))] _ = t.s[:len(t.s)-1] _ = t.s[:len(b)] + _ = t.s[:len(t.s):len(t.s)] ) func _() { diff --git a/libgo/go/cmd/gofmt/testdata/slices2.golden b/libgo/go/cmd/gofmt/testdata/slices2.golden index 433788e1ee6..ab657004e64 100644 --- a/libgo/go/cmd/gofmt/testdata/slices2.golden +++ b/libgo/go/cmd/gofmt/testdata/slices2.golden @@ -1,3 +1,5 @@ +//gofmt -s + // Test cases for slice expression simplification. // Because of a dot import, these slices must remain untouched. package p diff --git a/libgo/go/cmd/gofmt/testdata/slices2.input b/libgo/go/cmd/gofmt/testdata/slices2.input index 433788e1ee6..ab657004e64 100644 --- a/libgo/go/cmd/gofmt/testdata/slices2.input +++ b/libgo/go/cmd/gofmt/testdata/slices2.input @@ -1,3 +1,5 @@ +//gofmt -s + // Test cases for slice expression simplification. // Because of a dot import, these slices must remain untouched. package p diff --git a/libgo/go/cmd/gofmt/testdata/stdin1.golden b/libgo/go/cmd/gofmt/testdata/stdin1.golden index ff8b0b7ab48..9e4dcd20fe0 100644 --- a/libgo/go/cmd/gofmt/testdata/stdin1.golden +++ b/libgo/go/cmd/gofmt/testdata/stdin1.golden @@ -1,3 +1,5 @@ + //gofmt -stdin + if x { y } diff --git a/libgo/go/cmd/gofmt/testdata/stdin1.golden.gofmt b/libgo/go/cmd/gofmt/testdata/stdin1.golden.gofmt deleted file mode 100644 index 1f888877d01..00000000000 --- a/libgo/go/cmd/gofmt/testdata/stdin1.golden.gofmt +++ /dev/null @@ -1,3 +0,0 @@ - if x { - y -} diff --git a/libgo/go/cmd/gofmt/testdata/stdin1.input b/libgo/go/cmd/gofmt/testdata/stdin1.input index ff8b0b7ab48..9e4dcd20fe0 100644 --- a/libgo/go/cmd/gofmt/testdata/stdin1.input +++ b/libgo/go/cmd/gofmt/testdata/stdin1.input @@ -1,3 +1,5 @@ + //gofmt -stdin + if x { y } diff --git a/libgo/go/cmd/gofmt/testdata/stdin1.input.gofmt b/libgo/go/cmd/gofmt/testdata/stdin1.input.gofmt deleted file mode 100644 index 1f888877d01..00000000000 --- a/libgo/go/cmd/gofmt/testdata/stdin1.input.gofmt +++ /dev/null @@ -1,3 +0,0 @@ - if x { - y -} diff --git a/libgo/go/cmd/gofmt/testdata/stdin2.golden b/libgo/go/cmd/gofmt/testdata/stdin2.golden index 7eb1b54fec0..57df3554035 100644 --- a/libgo/go/cmd/gofmt/testdata/stdin2.golden +++ b/libgo/go/cmd/gofmt/testdata/stdin2.golden @@ -1,4 +1,4 @@ - +//gofmt -stdin var x int diff --git a/libgo/go/cmd/gofmt/testdata/stdin2.golden.gofmt b/libgo/go/cmd/gofmt/testdata/stdin2.golden.gofmt deleted file mode 100644 index 85e80030081..00000000000 --- a/libgo/go/cmd/gofmt/testdata/stdin2.golden.gofmt +++ /dev/null @@ -1,10 +0,0 @@ - - - -var x int - -func f() { - y := z -} - - diff --git a/libgo/go/cmd/gofmt/testdata/stdin2.input b/libgo/go/cmd/gofmt/testdata/stdin2.input index 99defd2d10c..69d6bdd682e 100644 --- a/libgo/go/cmd/gofmt/testdata/stdin2.input +++ b/libgo/go/cmd/gofmt/testdata/stdin2.input @@ -1,4 +1,4 @@ - +//gofmt -stdin var x int diff --git a/libgo/go/cmd/gofmt/testdata/stdin2.input.gofmt b/libgo/go/cmd/gofmt/testdata/stdin2.input.gofmt deleted file mode 100644 index 7eb1b54fec0..00000000000 --- a/libgo/go/cmd/gofmt/testdata/stdin2.input.gofmt +++ /dev/null @@ -1,11 +0,0 @@ - - -var x int - -func f() { - y := z - /* this is a comment */ - // this is a comment too -} - - diff --git a/libgo/go/cmd/gofmt/testdata/stdin3.golden b/libgo/go/cmd/gofmt/testdata/stdin3.golden index 1bf2f5a483f..d6da0e417a0 100644 --- a/libgo/go/cmd/gofmt/testdata/stdin3.golden +++ b/libgo/go/cmd/gofmt/testdata/stdin3.golden @@ -1,3 +1,4 @@ + //gofmt -stdin /* note: no newline at end of file */ for i := 0; i < 10; i++ { diff --git a/libgo/go/cmd/gofmt/testdata/stdin3.golden.gofmt b/libgo/go/cmd/gofmt/testdata/stdin3.golden.gofmt deleted file mode 100644 index b4d1d4663ed..00000000000 --- a/libgo/go/cmd/gofmt/testdata/stdin3.golden.gofmt +++ /dev/null @@ -1,7 +0,0 @@ - - - /* note: no newline at end of file */ - for i := 0; i < 10; i++ { - s += i - } -
\ No newline at end of file diff --git a/libgo/go/cmd/gofmt/testdata/stdin3.input b/libgo/go/cmd/gofmt/testdata/stdin3.input index d963bd0d21b..ab46c1063be 100644 --- a/libgo/go/cmd/gofmt/testdata/stdin3.input +++ b/libgo/go/cmd/gofmt/testdata/stdin3.input @@ -1,3 +1,4 @@ + //gofmt -stdin /* note: no newline at end of file */ for i := 0; i < 10; i++ { s += i } diff --git a/libgo/go/cmd/gofmt/testdata/stdin3.input.gofmt b/libgo/go/cmd/gofmt/testdata/stdin3.input.gofmt deleted file mode 100644 index b4d1d4663ed..00000000000 --- a/libgo/go/cmd/gofmt/testdata/stdin3.input.gofmt +++ /dev/null @@ -1,7 +0,0 @@ - - - /* note: no newline at end of file */ - for i := 0; i < 10; i++ { - s += i - } -
\ No newline at end of file diff --git a/libgo/go/cmd/gofmt/testdata/stdin4.golden b/libgo/go/cmd/gofmt/testdata/stdin4.golden index 5f73435517f..0c7acace5d0 100644 --- a/libgo/go/cmd/gofmt/testdata/stdin4.golden +++ b/libgo/go/cmd/gofmt/testdata/stdin4.golden @@ -1,3 +1,5 @@ + //gofmt -stdin + // comment i := 0 diff --git a/libgo/go/cmd/gofmt/testdata/stdin4.golden.gofmt b/libgo/go/cmd/gofmt/testdata/stdin4.golden.gofmt deleted file mode 100644 index 5f73435517f..00000000000 --- a/libgo/go/cmd/gofmt/testdata/stdin4.golden.gofmt +++ /dev/null @@ -1,3 +0,0 @@ - // comment - - i := 0 diff --git a/libgo/go/cmd/gofmt/testdata/stdin4.input b/libgo/go/cmd/gofmt/testdata/stdin4.input index f02a54fb1a9..1fc73f31e5e 100644 --- a/libgo/go/cmd/gofmt/testdata/stdin4.input +++ b/libgo/go/cmd/gofmt/testdata/stdin4.input @@ -1,3 +1,5 @@ + //gofmt -stdin + // comment i := 0 diff --git a/libgo/go/cmd/gofmt/testdata/stdin4.input.gofmt b/libgo/go/cmd/gofmt/testdata/stdin4.input.gofmt deleted file mode 100644 index 5f73435517f..00000000000 --- a/libgo/go/cmd/gofmt/testdata/stdin4.input.gofmt +++ /dev/null @@ -1,3 +0,0 @@ - // comment - - i := 0 diff --git a/libgo/go/compress/bzip2/bzip2.go b/libgo/go/compress/bzip2/bzip2.go index 82e30c7c9d7..15575d22023 100644 --- a/libgo/go/compress/bzip2/bzip2.go +++ b/libgo/go/compress/bzip2/bzip2.go @@ -42,6 +42,8 @@ type reader struct { } // NewReader returns an io.Reader which decompresses bzip2 data from r. +// If r does not also implement io.ByteReader, +// the decompressor may read more data than necessary from r. func NewReader(r io.Reader) io.Reader { bz2 := new(reader) bz2.br = newBitReader(r) @@ -261,6 +263,11 @@ func (bz2 *reader) readBlock() (err error) { } } + if numSymbols == 0 { + // There must be an EOF symbol. + return StructuralError("no symbols in input") + } + // A block uses between two and six different Huffman trees. numHuffmanTrees := br.ReadBits(3) if numHuffmanTrees < 2 || numHuffmanTrees > 6 { @@ -307,10 +314,10 @@ func (bz2 *reader) readBlock() (err error) { // Now we decode the arrays of code-lengths for each tree. lengths := make([]uint8, numSymbols) - for i := 0; i < numHuffmanTrees; i++ { + for i := range huffmanTrees { // The code lengths are delta encoded from a 5-bit base value. length := br.ReadBits(5) - for j := 0; j < numSymbols; j++ { + for j := range lengths { for { if !br.ReadBit() { break @@ -333,6 +340,12 @@ func (bz2 *reader) readBlock() (err error) { } selectorIndex := 1 // the next tree index to use + if len(treeIndexes) == 0 { + return StructuralError("no tree selectors given") + } + if int(treeIndexes[0]) >= len(huffmanTrees) { + return StructuralError("tree selector out of range") + } currentHuffmanTree := huffmanTrees[treeIndexes[0]] bufIndex := 0 // indexes bz2.buf, the output buffer. // The output of the move-to-front transform is run-length encoded and @@ -350,6 +363,12 @@ func (bz2 *reader) readBlock() (err error) { decoded := 0 // counts the number of symbols decoded by the current tree. for { if decoded == 50 { + if selectorIndex >= numSelectors { + return StructuralError("insufficient selector indices for number of symbols") + } + if int(treeIndexes[selectorIndex]) >= len(huffmanTrees) { + return StructuralError("tree selector out of range") + } currentHuffmanTree = huffmanTrees[treeIndexes[selectorIndex]] selectorIndex++ decoded = 0 diff --git a/libgo/go/compress/bzip2/bzip2_test.go b/libgo/go/compress/bzip2/bzip2_test.go index 727249dc462..fb79d089eb3 100644 --- a/libgo/go/compress/bzip2/bzip2_test.go +++ b/libgo/go/compress/bzip2/bzip2_test.go @@ -208,6 +208,52 @@ func TestBufferOverrun(t *testing.T) { ioutil.ReadAll(decompressor) } +func TestOutOfRangeSelector(t *testing.T) { + // Tests https://code.google.com/p/go/issues/detail?id=8363. + buffer := bytes.NewReader(outOfRangeSelector) + decompressor := NewReader(buffer) + // This shouldn't panic. + ioutil.ReadAll(decompressor) +} + +func TestMTF(t *testing.T) { + mtf := newMTFDecoderWithRange(5) + + // 0 1 2 3 4 + expect := byte(1) + x := mtf.Decode(1) + if x != expect { + t.Errorf("expected %v, got %v", expect, x) + } + + // 1 0 2 3 4 + x = mtf.Decode(0) + if x != expect { + t.Errorf("expected %v, got %v", expect, x) + } + + // 1 0 2 3 4 + expect = byte(0) + x = mtf.Decode(1) + if x != expect { + t.Errorf("expected %v, got %v", expect, x) + } + + // 0 1 2 3 4 + expect = byte(4) + x = mtf.Decode(4) + if x != expect { + t.Errorf("expected %v, got %v", expect, x) + } + + // 4 0 1 2 3 + expect = byte(0) + x = mtf.Decode(1) + if x != expect { + t.Errorf("expected %v, got %v", expect, x) + } +} + var bufferOverrunBase64 string = ` QlpoNTFBWSZTWTzyiGcACMP/////////////////////////////////3/7f3/// ////4N/fCZODak2Xo44GIHZgkGzDRbFAuwAAKoFV7T6AO6qwA6APb6s2rOoAkAAD @@ -361,3 +407,13 @@ O0A8s/iua5oFdNZTWvbVI4FUH9sKcLiB3/fIAF+sB4n8q6L+UCfmbPcAo/crQ6b3 HqhDBMY9J0q/jdz9GNYZ/1fbXdkUqAQKFePhtzJDRBZba27+LPQNMCcrHMq06F1T 4QmLmkHt7LxB2pAczUO+T2O9bHEw/HWw+dYf2MoRDUw= ` + +var outOfRangeSelector = []byte{ + 0x42, 0x5a, 0x68, 0x39, 0x31, 0x41, 0x59, 0x26, + 0x53, 0x59, 0x4e, 0xec, 0xe8, 0x36, 0x00, 0x00, + 0x02, 0x51, 0x80, 0x00, 0x10, 0x40, 0x00, 0x06, + 0x44, 0x90, 0x80, 0x20, 0x00, 0x31, 0x06, 0x4c, + 0x41, 0x01, 0xa7, 0xa9, 0xa5, 0x80, 0xbb, 0x94, + 0x31, 0x17, 0x72, 0x45, 0x38, 0x50, 0x90, 0x00, + 0x00, 0x00, 0x00, +} diff --git a/libgo/go/compress/bzip2/move_to_front.go b/libgo/go/compress/bzip2/move_to_front.go index b7e75a700a1..526dfb34cc0 100644 --- a/libgo/go/compress/bzip2/move_to_front.go +++ b/libgo/go/compress/bzip2/move_to_front.go @@ -11,88 +11,43 @@ package bzip2 // index into that list. When a symbol is referenced, it's moved to the front // of the list. Thus, a repeated symbol ends up being encoded with many zeros, // as the symbol will be at the front of the list after the first access. -type moveToFrontDecoder struct { - // Rather than actually keep the list in memory, the symbols are stored - // as a circular, double linked list with the symbol indexed by head - // at the front of the list. - symbols [256]byte - next [256]uint8 - prev [256]uint8 - head uint8 - len int -} +type moveToFrontDecoder []byte // newMTFDecoder creates a move-to-front decoder with an explicit initial list // of symbols. -func newMTFDecoder(symbols []byte) *moveToFrontDecoder { +func newMTFDecoder(symbols []byte) moveToFrontDecoder { if len(symbols) > 256 { panic("too many symbols") } - - m := new(moveToFrontDecoder) - copy(m.symbols[:], symbols) - m.len = len(symbols) - m.threadLinkedList() - return m + return moveToFrontDecoder(symbols) } // newMTFDecoderWithRange creates a move-to-front decoder with an initial // symbol list of 0...n-1. -func newMTFDecoderWithRange(n int) *moveToFrontDecoder { +func newMTFDecoderWithRange(n int) moveToFrontDecoder { if n > 256 { panic("newMTFDecoderWithRange: cannot have > 256 symbols") } - m := new(moveToFrontDecoder) + m := make([]byte, n) for i := 0; i < n; i++ { - m.symbols[byte(i)] = byte(i) - } - m.len = n - m.threadLinkedList() - return m -} - -// threadLinkedList creates the initial linked-list pointers. -func (m *moveToFrontDecoder) threadLinkedList() { - if m.len == 0 { - return - } - - m.prev[0] = uint8(m.len - 1) - - for i := byte(0); int(i) < m.len-1; i++ { - m.next[i] = uint8(i + 1) - m.prev[i+1] = uint8(i) + m[i] = byte(i) } - - m.next[m.len-1] = 0 + return moveToFrontDecoder(m) } -func (m *moveToFrontDecoder) Decode(n int) (b byte) { - // Most of the time, n will be zero so it's worth dealing with this - // simple case. - if n == 0 { - return m.symbols[m.head] - } - - i := m.head - for j := 0; j < n; j++ { - i = m.next[i] - } - b = m.symbols[i] - - m.next[m.prev[i]] = m.next[i] - m.prev[m.next[i]] = m.prev[i] - m.next[i] = m.head - m.prev[i] = m.prev[m.head] - m.next[m.prev[m.head]] = i - m.prev[m.head] = i - m.head = i - +func (m moveToFrontDecoder) Decode(n int) (b byte) { + // Implement move-to-front with a simple copy. This approach + // beats more sophisticated approaches in benchmarking, probably + // because it has high locality of reference inside of a + // single cache line (most move-to-front operations have n < 64). + b = m[n] + copy(m[1:], m[:n]) + m[0] = b return } // First returns the symbol at the front of the list. -func (m *moveToFrontDecoder) First() byte { - return m.symbols[m.head] +func (m moveToFrontDecoder) First() byte { + return m[0] } diff --git a/libgo/go/compress/flate/fixedhuff.go b/libgo/go/compress/flate/fixedhuff.go index 9be3d534954..7df8b9a293f 100644 --- a/libgo/go/compress/flate/fixedhuff.go +++ b/libgo/go/compress/flate/fixedhuff.go @@ -4,7 +4,7 @@ package flate -// autogenerated by gen.go, DO NOT EDIT +// autogenerated by go run gen.go -output fixedhuff.go, DO NOT EDIT var fixedHuffmanDecoder = huffmanDecoder{ 7, diff --git a/libgo/go/compress/flate/gen.go b/libgo/go/compress/flate/gen.go index 1427557f807..6288ecddd0e 100644 --- a/libgo/go/compress/flate/gen.go +++ b/libgo/go/compress/flate/gen.go @@ -7,14 +7,21 @@ // This program generates fixedhuff.go // Invoke as // -// go run gen.go |gofmt >fixedhuff.go +// go run gen.go -output fixedhuff.go package main import ( + "bytes" + "flag" "fmt" + "go/format" + "io/ioutil" + "log" ) +var filename = flag.String("output", "fixedhuff.go", "output file name") + const maxCodeLen = 16 // Note: the definition of the huffmanDecoder struct is copied from @@ -113,6 +120,8 @@ func (h *huffmanDecoder) init(bits []int) bool { } func main() { + flag.Parse() + var h huffmanDecoder var bits [288]int initReverseByte() @@ -129,27 +138,43 @@ func main() { bits[i] = 8 } h.init(bits[:]) - fmt.Println("package flate") - fmt.Println() - fmt.Println("// autogenerated by gen.go, DO NOT EDIT") - fmt.Println() - fmt.Println("var fixedHuffmanDecoder = huffmanDecoder{") - fmt.Printf("\t%d,\n", h.min) - fmt.Println("\t[huffmanNumChunks]uint32{") + + var buf bytes.Buffer + + fmt.Fprintf(&buf, `// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file.`+"\n\n") + + fmt.Fprintln(&buf, "package flate") + fmt.Fprintln(&buf) + fmt.Fprintln(&buf, "// autogenerated by go run gen.go -output fixedhuff.go, DO NOT EDIT") + fmt.Fprintln(&buf) + fmt.Fprintln(&buf, "var fixedHuffmanDecoder = huffmanDecoder{") + fmt.Fprintf(&buf, "\t%d,\n", h.min) + fmt.Fprintln(&buf, "\t[huffmanNumChunks]uint32{") for i := 0; i < huffmanNumChunks; i++ { if i&7 == 0 { - fmt.Printf("\t\t") + fmt.Fprintf(&buf, "\t\t") } else { - fmt.Printf(" ") + fmt.Fprintf(&buf, " ") } - fmt.Printf("0x%04x,", h.chunks[i]) + fmt.Fprintf(&buf, "0x%04x,", h.chunks[i]) if i&7 == 7 { - fmt.Println() + fmt.Fprintln(&buf) } } - fmt.Println("\t},") - fmt.Println("\tnil, 0,") - fmt.Println("}") + fmt.Fprintln(&buf, "\t},") + fmt.Fprintln(&buf, "\tnil, 0,") + fmt.Fprintln(&buf, "}") + + data, err := format.Source(buf.Bytes()) + if err != nil { + log.Fatal(err) + } + err = ioutil.WriteFile(*filename, data, 0644) + if err != nil { + log.Fatal(err) + } } var reverseByte [256]byte diff --git a/libgo/go/compress/flate/inflate.go b/libgo/go/compress/flate/inflate.go index ce4923eca37..76519bbf427 100644 --- a/libgo/go/compress/flate/inflate.go +++ b/libgo/go/compress/flate/inflate.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:generate go run gen.go -output fixedhuff.go + // Package flate implements the DEFLATE compressed data format, described in // RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file // formats. @@ -54,6 +56,15 @@ func (e *WriteError) Error() string { return "flate: write error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error() } +// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to +// to switch to a new underlying Reader. This permits reusing a ReadCloser +// instead of allocating a new one. +type Resetter interface { + // Reset discards any buffered data and resets the Resetter as if it was + // newly initialized with the given reader. + Reset(r io.Reader, dict []byte) error +} + // Note that much of the implementation of huffmanDecoder is also copied // into gen.go (in package main) for the purpose of precomputing the // fixed huffman tables so they can be included statically. @@ -677,10 +688,28 @@ func makeReader(r io.Reader) Reader { return bufio.NewReader(r) } +func (f *decompressor) Reset(r io.Reader, dict []byte) error { + *f = decompressor{ + r: makeReader(r), + bits: f.bits, + codebits: f.codebits, + hist: f.hist, + step: (*decompressor).nextBlock, + } + if dict != nil { + f.setDict(dict) + } + return nil +} + // NewReader returns a new ReadCloser that can be used -// to read the uncompressed version of r. It is the caller's -// responsibility to call Close on the ReadCloser when -// finished reading. +// to read the uncompressed version of r. +// If r does not also implement io.ByteReader, +// the decompressor may read more data than necessary from r. +// It is the caller's responsibility to call Close on the ReadCloser +// when finished reading. +// +// The ReadCloser returned by NewReader also implements Resetter. func NewReader(r io.Reader) io.ReadCloser { var f decompressor f.bits = new([maxLit + maxDist]int) @@ -696,6 +725,8 @@ func NewReader(r io.Reader) io.ReadCloser { // the uncompressed data stream started with the given dictionary, // which has already been read. NewReaderDict is typically used // to read data compressed by NewWriterDict. +// +// The ReadCloser returned by NewReader also implements Resetter. func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser { var f decompressor f.r = makeReader(r) diff --git a/libgo/go/compress/flate/inflate_test.go b/libgo/go/compress/flate/inflate_test.go new file mode 100644 index 00000000000..9f25d30b35c --- /dev/null +++ b/libgo/go/compress/flate/inflate_test.go @@ -0,0 +1,39 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "bytes" + "io" + "testing" +) + +func TestReset(t *testing.T) { + ss := []string{ + "lorem ipsum izzle fo rizzle", + "the quick brown fox jumped over", + } + + deflated := make([]bytes.Buffer, 2) + for i, s := range ss { + w, _ := NewWriter(&deflated[i], 1) + w.Write([]byte(s)) + w.Close() + } + + inflated := make([]bytes.Buffer, 2) + + f := NewReader(&deflated[0]) + io.Copy(&inflated[0], f) + f.(Resetter).Reset(&deflated[1], nil) + io.Copy(&inflated[1], f) + f.Close() + + for i, s := range ss { + if s != inflated[i].String() { + t.Errorf("inflated[%d]:\ngot %q\nwant %q", i, inflated[i], s) + } + } +} diff --git a/libgo/go/compress/gzip/gunzip.go b/libgo/go/compress/gzip/gunzip.go index 4f398b194a0..72ee55c4fab 100644 --- a/libgo/go/compress/gzip/gunzip.go +++ b/libgo/go/compress/gzip/gunzip.go @@ -74,14 +74,17 @@ type Reader struct { flg byte buf [512]byte err error + multistream bool } // NewReader creates a new Reader reading the given reader. -// The implementation buffers input and may read more data than necessary from r. +// If r does not also implement io.ByteReader, +// the decompressor may read more data than necessary from r. // It is the caller's responsibility to call Close on the Reader when done. func NewReader(r io.Reader) (*Reader, error) { z := new(Reader) z.r = makeReader(r) + z.multistream = true z.digest = crc32.NewIEEE() if err := z.readHeader(true); err != nil { return nil, err @@ -101,9 +104,30 @@ func (z *Reader) Reset(r io.Reader) error { } z.size = 0 z.err = nil + z.multistream = true return z.readHeader(true) } +// Multistream controls whether the reader supports multistream files. +// +// If enabled (the default), the Reader expects the input to be a sequence +// of individually gzipped data streams, each with its own header and +// trailer, ending at EOF. The effect is that the concatenation of a sequence +// of gzipped files is treated as equivalent to the gzip of the concatenation +// of the sequence. This is standard behavior for gzip readers. +// +// Calling Multistream(false) disables this behavior; disabling the behavior +// can be useful when reading file formats that distinguish individual gzip +// data streams or mix gzip data streams with other data streams. +// In this mode, when the Reader reaches the end of the data stream, +// Read returns io.EOF. If the underlying reader implements io.ByteReader, +// it will be left positioned just after the gzip stream. +// To start the next stream, call z.Reset(r) followed by z.Multistream(false). +// If there is no next stream, z.Reset(r) will return io.EOF. +func (z *Reader) Multistream(ok bool) { + z.multistream = ok +} + // GZIP (RFC 1952) is little-endian, unlike ZLIB (RFC 1950). func get4(p []byte) uint32 { return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 @@ -207,7 +231,11 @@ func (z *Reader) readHeader(save bool) error { } z.digest.Reset() - z.decompressor = flate.NewReader(z.r) + if z.decompressor == nil { + z.decompressor = flate.NewReader(z.r) + } else { + z.decompressor.(flate.Resetter).Reset(z.r, nil) + } return nil } @@ -240,6 +268,10 @@ func (z *Reader) Read(p []byte) (n int, err error) { } // File is ok; is there another? + if !z.multistream { + return 0, io.EOF + } + if err = z.readHeader(false); err != nil { z.err = err return diff --git a/libgo/go/compress/gzip/gunzip_test.go b/libgo/go/compress/gzip/gunzip_test.go index 2471038f536..0636dec9ab0 100644 --- a/libgo/go/compress/gzip/gunzip_test.go +++ b/libgo/go/compress/gzip/gunzip_test.go @@ -9,6 +9,7 @@ import ( "io" "io/ioutil" "os" + "strings" "testing" "time" ) @@ -367,3 +368,43 @@ func TestInitialReset(t *testing.T) { t.Errorf("got %q want %q", s, gunzipTests[1].raw) } } + +func TestMultistreamFalse(t *testing.T) { + // Find concatenation test. + var tt gunzipTest + for _, tt = range gunzipTests { + if strings.HasSuffix(tt.desc, " x2") { + goto Found + } + } + t.Fatal("cannot find hello.txt x2 in gunzip tests") + +Found: + br := bytes.NewReader(tt.gzip) + var r Reader + if err := r.Reset(br); err != nil { + t.Fatalf("first reset: %v", err) + } + + // Expect two streams with "hello world\n", then real EOF. + const hello = "hello world\n" + + r.Multistream(false) + data, err := ioutil.ReadAll(&r) + if string(data) != hello || err != nil { + t.Fatalf("first stream = %q, %v, want %q, %v", string(data), err, hello, nil) + } + + if err := r.Reset(br); err != nil { + t.Fatalf("second reset: %v", err) + } + r.Multistream(false) + data, err = ioutil.ReadAll(&r) + if string(data) != hello || err != nil { + t.Fatalf("second stream = %q, %v, want %q, %v", string(data), err, hello, nil) + } + + if err := r.Reset(br); err != io.EOF { + t.Fatalf("third reset: err=%v, want io.EOF", err) + } +} diff --git a/libgo/go/compress/lzw/reader.go b/libgo/go/compress/lzw/reader.go index ef596991032..526620c8271 100644 --- a/libgo/go/compress/lzw/reader.go +++ b/libgo/go/compress/lzw/reader.go @@ -6,12 +6,16 @@ // described in T. A. Welch, ``A Technique for High-Performance Data // Compression'', Computer, 17(6) (June 1984), pp 8-19. // -// In particular, it implements LZW as used by the GIF, TIFF and PDF file +// In particular, it implements LZW as used by the GIF and PDF file // formats, which means variable-width codes up to 12 bits and the first // two non-literal codes are a clear code and an EOF code. +// +// The TIFF file format uses a similar but incompatible version of the LZW +// algorithm. See the golang.org/x/image/tiff/lzw package for an +// implementation. package lzw -// TODO(nigeltao): check that TIFF and PDF use LZW in the same way as GIF, +// TODO(nigeltao): check that PDF uses LZW in the same way as GIF, // modulo LSB/MSB packing order. import ( @@ -218,6 +222,8 @@ func (d *decoder) Close() error { // NewReader creates a new io.ReadCloser. // Reads from the returned io.ReadCloser read and decompress data from r. +// If r does not also implement io.ByteReader, +// the decompressor may read more data than necessary from r. // It is the caller's responsibility to call Close on the ReadCloser when // finished reading. // The number of bits to use for literal codes, litWidth, must be in the diff --git a/libgo/go/compress/zlib/reader.go b/libgo/go/compress/zlib/reader.go index 9e1aafda9b6..816f1bf6bd0 100644 --- a/libgo/go/compress/zlib/reader.go +++ b/libgo/go/compress/zlib/reader.go @@ -51,45 +51,36 @@ type reader struct { scratch [4]byte } -// NewReader creates a new io.ReadCloser. -// Reads from the returned io.ReadCloser read and decompress data from r. +// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to +// to switch to a new underlying Reader. This permits reusing a ReadCloser +// instead of allocating a new one. +type Resetter interface { + // Reset discards any buffered data and resets the Resetter as if it was + // newly initialized with the given reader. + Reset(r io.Reader, dict []byte) error +} + +// NewReader creates a new ReadCloser. +// Reads from the returned ReadCloser read and decompress data from r. // The implementation buffers input and may read more data than necessary from r. // It is the caller's responsibility to call Close on the ReadCloser when done. +// +// The ReadCloser returned by NewReader also implements Resetter. func NewReader(r io.Reader) (io.ReadCloser, error) { return NewReaderDict(r, nil) } // NewReaderDict is like NewReader but uses a preset dictionary. // NewReaderDict ignores the dictionary if the compressed data does not refer to it. +// If the compressed data refers to a different dictionary, NewReaderDict returns ErrDictionary. +// +// The ReadCloser returned by NewReaderDict also implements Resetter. func NewReaderDict(r io.Reader, dict []byte) (io.ReadCloser, error) { z := new(reader) - if fr, ok := r.(flate.Reader); ok { - z.r = fr - } else { - z.r = bufio.NewReader(r) - } - _, err := io.ReadFull(z.r, z.scratch[0:2]) + err := z.Reset(r, dict) if err != nil { return nil, err } - h := uint(z.scratch[0])<<8 | uint(z.scratch[1]) - if (z.scratch[0]&0x0f != zlibDeflate) || (h%31 != 0) { - return nil, ErrHeader - } - if z.scratch[1]&0x20 != 0 { - _, err = io.ReadFull(z.r, z.scratch[0:4]) - if err != nil { - return nil, err - } - checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3]) - if checksum != adler32.Checksum(dict) { - return nil, ErrDictionary - } - z.decompressor = flate.NewReaderDict(z.r, dict) - } else { - z.decompressor = flate.NewReader(z.r) - } - z.digest = adler32.New() return z, nil } @@ -130,3 +121,41 @@ func (z *reader) Close() error { z.err = z.decompressor.Close() return z.err } + +func (z *reader) Reset(r io.Reader, dict []byte) error { + if fr, ok := r.(flate.Reader); ok { + z.r = fr + } else { + z.r = bufio.NewReader(r) + } + _, err := io.ReadFull(z.r, z.scratch[0:2]) + if err != nil { + return err + } + h := uint(z.scratch[0])<<8 | uint(z.scratch[1]) + if (z.scratch[0]&0x0f != zlibDeflate) || (h%31 != 0) { + return ErrHeader + } + haveDict := z.scratch[1]&0x20 != 0 + if haveDict { + _, err = io.ReadFull(z.r, z.scratch[0:4]) + if err != nil { + return err + } + checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3]) + if checksum != adler32.Checksum(dict) { + return ErrDictionary + } + } + if z.decompressor == nil { + if haveDict { + z.decompressor = flate.NewReaderDict(z.r, dict) + } else { + z.decompressor = flate.NewReader(z.r) + } + } else { + z.decompressor.(flate.Resetter).Reset(z.r, dict) + } + z.digest = adler32.New() + return nil +} diff --git a/libgo/go/crypto/cipher/cfb_test.go b/libgo/go/crypto/cipher/cfb_test.go index ec708ab2be2..9b544bb2118 100644 --- a/libgo/go/crypto/cipher/cfb_test.go +++ b/libgo/go/crypto/cipher/cfb_test.go @@ -9,10 +9,85 @@ import ( "crypto/aes" "crypto/cipher" "crypto/rand" + "encoding/hex" "testing" ) -func TestCFB(t *testing.T) { +// cfbTests contains the test vectors from +// http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf, section +// F.3.13. +var cfbTests = []struct { + key, iv, plaintext, ciphertext string +}{ + { + "2b7e151628aed2a6abf7158809cf4f3c", + "000102030405060708090a0b0c0d0e0f", + "6bc1bee22e409f96e93d7e117393172a", + "3b3fd92eb72dad20333449f8e83cfb4a", + }, + { + "2b7e151628aed2a6abf7158809cf4f3c", + "3B3FD92EB72DAD20333449F8E83CFB4A", + "ae2d8a571e03ac9c9eb76fac45af8e51", + "c8a64537a0b3a93fcde3cdad9f1ce58b", + }, + { + "2b7e151628aed2a6abf7158809cf4f3c", + "C8A64537A0B3A93FCDE3CDAD9F1CE58B", + "30c81c46a35ce411e5fbc1191a0a52ef", + "26751f67a3cbb140b1808cf187a4f4df", + }, + { + "2b7e151628aed2a6abf7158809cf4f3c", + "26751F67A3CBB140B1808CF187A4F4DF", + "f69f2445df4f9b17ad2b417be66c3710", + "c04b05357c5d1c0eeac4c66f9ff7f2e6", + }, +} + +func TestCFBVectors(t *testing.T) { + for i, test := range cfbTests { + key, err := hex.DecodeString(test.key) + if err != nil { + t.Fatal(err) + } + iv, err := hex.DecodeString(test.iv) + if err != nil { + t.Fatal(err) + } + plaintext, err := hex.DecodeString(test.plaintext) + if err != nil { + t.Fatal(err) + } + expected, err := hex.DecodeString(test.ciphertext) + if err != nil { + t.Fatal(err) + } + + block, err := aes.NewCipher(key) + if err != nil { + t.Fatal(err) + } + + ciphertext := make([]byte, len(plaintext)) + cfb := cipher.NewCFBEncrypter(block, iv) + cfb.XORKeyStream(ciphertext, plaintext) + + if !bytes.Equal(ciphertext, expected) { + t.Errorf("#%d: wrong output: got %x, expected %x", i, ciphertext, expected) + } + + cfbdec := cipher.NewCFBDecrypter(block, iv) + plaintextCopy := make([]byte, len(ciphertext)) + cfbdec.XORKeyStream(plaintextCopy, ciphertext) + + if !bytes.Equal(plaintextCopy, plaintextCopy) { + t.Errorf("#%d: wrong plaintext: got %x, expected %x", i, plaintextCopy, plaintext) + } + } +} + +func TestCFBInverse(t *testing.T) { block, err := aes.NewCipher(commonKey128) if err != nil { t.Error(err) diff --git a/libgo/go/crypto/cipher/example_test.go b/libgo/go/crypto/cipher/example_test.go index 373f6791be4..1cfa982df4b 100644 --- a/libgo/go/crypto/cipher/example_test.go +++ b/libgo/go/crypto/cipher/example_test.go @@ -240,7 +240,7 @@ func ExampleStreamReader() { } // Note that this example is simplistic in that it omits any - // authentication of the encrypted data. It you were actually to use + // authentication of the encrypted data. If you were actually to use // StreamReader in this manner, an attacker could flip arbitrary bits in // the output. } @@ -277,7 +277,7 @@ func ExampleStreamWriter() { } // Note that this example is simplistic in that it omits any - // authentication of the encrypted data. It you were actually to use + // authentication of the encrypted data. If you were actually to use // StreamReader in this manner, an attacker could flip arbitrary bits in // the decrypted result. } diff --git a/libgo/go/crypto/crypto.go b/libgo/go/crypto/crypto.go index 4b03628e692..59b23e93f5c 100644 --- a/libgo/go/crypto/crypto.go +++ b/libgo/go/crypto/crypto.go @@ -7,6 +7,7 @@ package crypto import ( "hash" + "io" "strconv" ) @@ -14,8 +15,13 @@ import ( // package. type Hash uint +// HashFunc simply returns the value of h so that Hash implements SignerOpts. +func (h Hash) HashFunc() Hash { + return h +} + const ( - MD4 Hash = 1 + iota // import code.google.com/p/go.crypto/md4 + MD4 Hash = 1 + iota // import golang.org/x/crypto/md4 MD5 // import crypto/md5 SHA1 // import crypto/sha1 SHA224 // import crypto/sha256 @@ -23,7 +29,11 @@ const ( SHA384 // import crypto/sha512 SHA512 // import crypto/sha512 MD5SHA1 // no implementation; MD5+SHA1 used for TLS RSA - RIPEMD160 // import code.google.com/p/go.crypto/ripemd160 + RIPEMD160 // import golang.org/x/crypto/ripemd160 + SHA3_224 // import golang.org/x/crypto/sha3 + SHA3_256 // import golang.org/x/crypto/sha3 + SHA3_384 // import golang.org/x/crypto/sha3 + SHA3_512 // import golang.org/x/crypto/sha3 maxHash ) @@ -35,6 +45,10 @@ var digestSizes = []uint8{ SHA256: 32, SHA384: 48, SHA512: 64, + SHA3_224: 28, + SHA3_256: 32, + SHA3_384: 48, + SHA3_512: 64, MD5SHA1: 36, RIPEMD160: 20, } @@ -83,3 +97,30 @@ type PublicKey interface{} // PrivateKey represents a private key using an unspecified algorithm. type PrivateKey interface{} + +// Signer is an interface for an opaque private key that can be used for +// signing operations. For example, an RSA key kept in a hardware module. +type Signer interface { + // Public returns the public key corresponding to the opaque, + // private key. + Public() PublicKey + + // Sign signs msg with the private key, possibly using entropy from + // rand. For an RSA key, the resulting signature should be either a + // PKCS#1 v1.5 or PSS signature (as indicated by opts). For an (EC)DSA + // key, it should be a DER-serialised, ASN.1 signature structure. + // + // Hash implements the SignerOpts interface and, in most cases, one can + // simply pass in the hash function used as opts. Sign may also attempt + // to type assert opts to other types in order to obtain algorithm + // specific values. See the documentation in each package for details. + Sign(rand io.Reader, msg []byte, opts SignerOpts) (signature []byte, err error) +} + +// SignerOpts contains options for signing with a Signer. +type SignerOpts interface { + // HashFunc returns an identifier for the hash function used to produce + // the message passed to Signer.Sign, or else zero to indicate that no + // hashing was done. + HashFunc() Hash +} diff --git a/libgo/go/crypto/ecdsa/ecdsa.go b/libgo/go/crypto/ecdsa/ecdsa.go index 1bec7437a53..d6135531bff 100644 --- a/libgo/go/crypto/ecdsa/ecdsa.go +++ b/libgo/go/crypto/ecdsa/ecdsa.go @@ -13,7 +13,9 @@ package ecdsa // http://www.secg.org/download/aid-780/sec1-v2.pdf import ( + "crypto" "crypto/elliptic" + "encoding/asn1" "io" "math/big" ) @@ -30,6 +32,28 @@ type PrivateKey struct { D *big.Int } +type ecdsaSignature struct { + R, S *big.Int +} + +// Public returns the public key corresponding to priv. +func (priv *PrivateKey) Public() crypto.PublicKey { + return &priv.PublicKey +} + +// Sign signs msg with priv, reading randomness from rand. This method is +// intended to support keys where the private part is kept in, for example, a +// hardware module. Common uses should use the Sign function in this package +// directly. +func (priv *PrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) ([]byte, error) { + r, s, err := Sign(rand, priv, msg) + if err != nil { + return nil, err + } + + return asn1.Marshal(ecdsaSignature{r, s}) +} + var one = new(big.Int).SetInt64(1) // randFieldElement returns a random element of the field underlying the given diff --git a/libgo/go/crypto/md5/gen.go b/libgo/go/crypto/md5/gen.go index 75295e4fcb0..8cd0a6358e1 100644 --- a/libgo/go/crypto/md5/gen.go +++ b/libgo/go/crypto/md5/gen.go @@ -7,7 +7,7 @@ // This program generates md5block.go // Invoke as // -// go run gen.go [-full] |gofmt >md5block.go +// go run gen.go [-full] -output md5block.go // // The -full flag causes the generated code to do a full // (16x) unrolling instead of a 4x unrolling. @@ -15,18 +15,33 @@ package main import ( + "bytes" "flag" + "go/format" + "io/ioutil" "log" - "os" "strings" "text/template" ) +var filename = flag.String("output", "md5block.go", "output file name") + func main() { flag.Parse() + var buf bytes.Buffer + t := template.Must(template.New("main").Funcs(funcs).Parse(program)) - if err := t.Execute(os.Stdout, data); err != nil { + if err := t.Execute(&buf, data); err != nil { + log.Fatal(err) + } + + data, err := format.Source(buf.Bytes()) + if err != nil { + log.Fatal(err) + } + err = ioutil.WriteFile(*filename, data, 0644) + if err != nil { log.Fatal(err) } } @@ -165,7 +180,7 @@ var program = `// Copyright 2013 The Go Authors. All rights reserved. // license that can be found in the LICENSE file. // DO NOT EDIT. -// Generate with: go run gen.go{{if .Full}} -full{{end}} | gofmt >md5block.go +// Generate with: go run gen.go{{if .Full}} -full{{end}} -output md5block.go package md5 diff --git a/libgo/go/crypto/md5/md5.go b/libgo/go/crypto/md5/md5.go index 1a1f35fabc0..8c50c6d0bfa 100644 --- a/libgo/go/crypto/md5/md5.go +++ b/libgo/go/crypto/md5/md5.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:generate go run gen.go -full -output md5block.go + // Package md5 implements the MD5 hash algorithm as defined in RFC 1321. package md5 diff --git a/libgo/go/crypto/md5/md5block.go b/libgo/go/crypto/md5/md5block.go index e2a17677757..64e1e7c1efd 100644 --- a/libgo/go/crypto/md5/md5block.go +++ b/libgo/go/crypto/md5/md5block.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // DO NOT EDIT. -// Generate with: go run gen.go -full | gofmt >md5block.go +// Generate with: go run gen.go -full -output md5block.go package md5 diff --git a/libgo/go/crypto/rand/rand_linux.go b/libgo/go/crypto/rand/rand_linux.go new file mode 100644 index 00000000000..8cb59c75dff --- /dev/null +++ b/libgo/go/crypto/rand/rand_linux.go @@ -0,0 +1,39 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rand + +import ( + "internal/syscall" + "sync" +) + +func init() { + altGetRandom = getRandomLinux +} + +var ( + once sync.Once + useSyscall bool +) + +func pickStrategy() { + // Test whether we should use the system call or /dev/urandom. + // We'll fall back to urandom if: + // - the kernel is too old (before 3.17) + // - the machine has no entropy available (early boot + no hardware + // entropy source?) and we want to avoid blocking later. + var buf [1]byte + n, err := syscall.GetRandom(buf[:], syscall.GRND_NONBLOCK) + useSyscall = n == 1 && err == nil +} + +func getRandomLinux(p []byte) (ok bool) { + once.Do(pickStrategy) + if !useSyscall { + return false + } + n, err := syscall.GetRandom(p, 0) + return n == len(p) && err == nil +} diff --git a/libgo/go/crypto/rand/rand_unix.go b/libgo/go/crypto/rand/rand_unix.go index 1e741fda193..62d0fbdb350 100644 --- a/libgo/go/crypto/rand/rand_unix.go +++ b/libgo/go/crypto/rand/rand_unix.go @@ -20,6 +20,8 @@ import ( "time" ) +const urandomDevice = "/dev/urandom" + // Easy implementation: read from /dev/urandom. // This is sufficient on Linux, OS X, and FreeBSD. @@ -27,7 +29,7 @@ func init() { if runtime.GOOS == "plan9" { Reader = newReader(nil) } else { - Reader = &devReader{name: "/dev/urandom"} + Reader = &devReader{name: urandomDevice} } } @@ -38,7 +40,14 @@ type devReader struct { mu sync.Mutex } +// altGetRandom if non-nil specifies an OS-specific function to get +// urandom-style randomness. +var altGetRandom func([]byte) (ok bool) + func (r *devReader) Read(b []byte) (n int, err error) { + if altGetRandom != nil && r.name == urandomDevice && altGetRandom(b) { + return len(b), nil + } r.mu.Lock() defer r.mu.Unlock() if r.f == nil { diff --git a/libgo/go/crypto/rc4/rc4_asm.go b/libgo/go/crypto/rc4/rc4_asm.go index fc71b9a6fa2..02e5b67d553 100644 --- a/libgo/go/crypto/rc4/rc4_asm.go +++ b/libgo/go/crypto/rc4/rc4_asm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build amd64 amd64p32 arm 386 +// +build amd64 amd64p32 arm,!nacl 386 package rc4 diff --git a/libgo/go/crypto/rc4/rc4_ref.go b/libgo/go/crypto/rc4/rc4_ref.go index 1ecce1a7fbc..e34bd34cf1d 100644 --- a/libgo/go/crypto/rc4/rc4_ref.go +++ b/libgo/go/crypto/rc4/rc4_ref.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64,!amd64p32,!arm,!386 +// +build !amd64,!amd64p32,!arm,!386 arm,nacl package rc4 diff --git a/libgo/go/crypto/rsa/pss.go b/libgo/go/crypto/rsa/pss.go index 18eafbc05f7..e9f2908250c 100644 --- a/libgo/go/crypto/rsa/pss.go +++ b/libgo/go/crypto/rsa/pss.go @@ -222,6 +222,17 @@ type PSSOptions struct { // signature. It can either be a number of bytes, or one of the special // PSSSaltLength constants. SaltLength int + + // Hash, if not zero, overrides the hash function passed to SignPSS. + // This is the only way to specify the hash function when using the + // crypto.Signer interface. + Hash crypto.Hash +} + +// HashFunc returns pssOpts.Hash so that PSSOptions implements +// crypto.SignerOpts. +func (pssOpts *PSSOptions) HashFunc() crypto.Hash { + return pssOpts.Hash } func (opts *PSSOptions) saltLength() int { @@ -244,6 +255,10 @@ func SignPSS(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []byte, saltLength = hash.Size() } + if opts.Hash != 0 { + hash = opts.Hash + } + salt := make([]byte, saltLength) if _, err = io.ReadFull(rand, salt); err != nil { return diff --git a/libgo/go/crypto/rsa/rsa.go b/libgo/go/crypto/rsa/rsa.go index bce6ba4eba3..2702311281c 100644 --- a/libgo/go/crypto/rsa/rsa.go +++ b/libgo/go/crypto/rsa/rsa.go @@ -6,6 +6,7 @@ package rsa import ( + "crypto" "crypto/rand" "crypto/subtle" "errors" @@ -58,6 +59,24 @@ type PrivateKey struct { Precomputed PrecomputedValues } +// Public returns the public key corresponding to priv. +func (priv *PrivateKey) Public() crypto.PublicKey { + return &priv.PublicKey +} + +// Sign signs msg with priv, reading randomness from rand. If opts is a +// *PSSOptions then the PSS algorithm will be used, otherwise PKCS#1 v1.5 will +// be used. This method is intended to support keys where the private part is +// kept in, for example, a hardware module. Common uses should use the Sign* +// functions in this package. +func (priv *PrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) ([]byte, error) { + if pssOpts, ok := opts.(*PSSOptions); ok { + return SignPSS(rand, priv, pssOpts.Hash, msg, pssOpts) + } + + return SignPKCS1v15(rand, priv, opts.HashFunc(), msg) +} + type PrecomputedValues struct { Dp, Dq *big.Int // D mod (P-1) (or mod Q-1) Qinv *big.Int // Q^-1 mod P diff --git a/libgo/go/crypto/subtle/constant_time.go b/libgo/go/crypto/subtle/constant_time.go index 9c4b14a65f6..6f80e7c58dc 100644 --- a/libgo/go/crypto/subtle/constant_time.go +++ b/libgo/go/crypto/subtle/constant_time.go @@ -6,12 +6,12 @@ // code but require careful thought to use correctly. package subtle -// ConstantTimeCompare returns 1 iff the two equal length slices, x +// ConstantTimeCompare returns 1 iff the two slices, x // and y, have equal contents. The time taken is a function of the length of // the slices and is independent of the contents. func ConstantTimeCompare(x, y []byte) int { if len(x) != len(y) { - panic("subtle: slices have different lengths") + return 0 } var v byte @@ -62,7 +62,6 @@ func ConstantTimeCopy(v int, x, y []byte) { for i := 0; i < len(x); i++ { x[i] = x[i]&xmask | y[i]&ymask } - return } // ConstantTimeLessOrEq returns 1 if x <= y and 0 otherwise. diff --git a/libgo/go/crypto/subtle/constant_time_test.go b/libgo/go/crypto/subtle/constant_time_test.go index d8e321ec04a..619a454441d 100644 --- a/libgo/go/crypto/subtle/constant_time_test.go +++ b/libgo/go/crypto/subtle/constant_time_test.go @@ -18,6 +18,8 @@ var testConstantTimeCompareData = []TestConstantTimeCompareStruct{ {[]byte{}, []byte{}, 1}, {[]byte{0x11}, []byte{0x11}, 1}, {[]byte{0x12}, []byte{0x11}, 0}, + {[]byte{0x11}, []byte{0x11, 0x12}, 0}, + {[]byte{0x11, 0x12}, []byte{0x11}, 0}, } func TestConstantTimeCompare(t *testing.T) { diff --git a/libgo/go/crypto/tls/alert.go b/libgo/go/crypto/tls/alert.go index 0856311e4cb..3de4834d3f5 100644 --- a/libgo/go/crypto/tls/alert.go +++ b/libgo/go/crypto/tls/alert.go @@ -35,6 +35,7 @@ const ( alertProtocolVersion alert = 70 alertInsufficientSecurity alert = 71 alertInternalError alert = 80 + alertInappropriateFallback alert = 86 alertUserCanceled alert = 90 alertNoRenegotiation alert = 100 ) @@ -60,6 +61,7 @@ var alertText = map[alert]string{ alertProtocolVersion: "protocol version not supported", alertInsufficientSecurity: "insufficient security level", alertInternalError: "internal error", + alertInappropriateFallback: "inappropriate fallback", alertUserCanceled: "user canceled", alertNoRenegotiation: "no renegotiation", } diff --git a/libgo/go/crypto/tls/cipher_suites.go b/libgo/go/crypto/tls/cipher_suites.go index 39a51459d28..226e06d68d6 100644 --- a/libgo/go/crypto/tls/cipher_suites.go +++ b/libgo/go/crypto/tls/cipher_suites.go @@ -267,4 +267,9 @@ const ( TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xc014 TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xc02f TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xc02b + + // TLS_FALLBACK_SCSV isn't a standard cipher suite but an indicator + // that the client is doing version fallback. See + // https://tools.ietf.org/html/draft-ietf-tls-downgrade-scsv-00. + TLS_FALLBACK_SCSV uint16 = 0x5600 ) diff --git a/libgo/go/crypto/tls/common.go b/libgo/go/crypto/tls/common.go index fca98bdd11c..776b70c93c8 100644 --- a/libgo/go/crypto/tls/common.go +++ b/libgo/go/crypto/tls/common.go @@ -72,6 +72,7 @@ const ( extensionSupportedCurves uint16 = 10 extensionSupportedPoints uint16 = 11 extensionSignatureAlgorithms uint16 = 13 + extensionALPN uint16 = 16 extensionSessionTicket uint16 = 35 extensionNextProtoNeg uint16 = 13172 // not IANA assigned extensionRenegotiationInfo uint16 = 0xff01 @@ -164,6 +165,14 @@ type ConnectionState struct { ServerName string // server name requested by client, if any (server side only) PeerCertificates []*x509.Certificate // certificate chain presented by remote peer VerifiedChains [][]*x509.Certificate // verified chains built from PeerCertificates + + // TLSUnique contains the "tls-unique" channel binding value (see RFC + // 5929, section 3). For resumed sessions this value will be nil + // because resumption does not include enough context (see + // https://secure-resumption.com/#channelbindings). This will change in + // future versions of Go once the TLS master-secret fix has been + // standardized and implemented. + TLSUnique []byte } // ClientAuthType declares the policy the server will follow for @@ -201,6 +210,32 @@ type ClientSessionCache interface { Put(sessionKey string, cs *ClientSessionState) } +// ClientHelloInfo contains information from a ClientHello message in order to +// guide certificate selection in the GetCertificate callback. +type ClientHelloInfo struct { + // CipherSuites lists the CipherSuites supported by the client (e.g. + // TLS_RSA_WITH_RC4_128_SHA). + CipherSuites []uint16 + + // ServerName indicates the name of the server requested by the client + // in order to support virtual hosting. ServerName is only set if the + // client is using SNI (see + // http://tools.ietf.org/html/rfc4366#section-3.1). + ServerName string + + // SupportedCurves lists the elliptic curves supported by the client. + // SupportedCurves is set only if the Supported Elliptic Curves + // Extension is being used (see + // http://tools.ietf.org/html/rfc4492#section-5.1.1). + SupportedCurves []CurveID + + // SupportedPoints lists the point formats supported by the client. + // SupportedPoints is set only if the Supported Point Formats Extension + // is being used (see + // http://tools.ietf.org/html/rfc4492#section-5.1.2). + SupportedPoints []uint8 +} + // A Config structure is used to configure a TLS client or server. // After one has been passed to a TLS function it must not be // modified. A Config may be reused; the tls package will also not @@ -229,6 +264,13 @@ type Config struct { // for all connections. NameToCertificate map[string]*Certificate + // GetCertificate returns a Certificate based on the given + // ClientHelloInfo. If GetCertificate is nil or returns nil, then the + // certificate is retrieved from NameToCertificate. If + // NameToCertificate is nil, the first element of Certificates will be + // used. + GetCertificate func(clientHello *ClientHelloInfo) (*Certificate, error) + // RootCAs defines the set of root certificate authorities // that clients use when verifying server certificates. // If RootCAs is nil, TLS uses the host's root CA set. @@ -383,22 +425,28 @@ func (c *Config) mutualVersion(vers uint16) (uint16, bool) { return vers, true } -// getCertificateForName returns the best certificate for the given name, -// defaulting to the first element of c.Certificates if there are no good -// options. -func (c *Config) getCertificateForName(name string) *Certificate { +// getCertificate returns the best certificate for the given ClientHelloInfo, +// defaulting to the first element of c.Certificates. +func (c *Config) getCertificate(clientHello *ClientHelloInfo) (*Certificate, error) { + if c.GetCertificate != nil { + cert, err := c.GetCertificate(clientHello) + if cert != nil || err != nil { + return cert, err + } + } + if len(c.Certificates) == 1 || c.NameToCertificate == nil { // There's only one choice, so no point doing any work. - return &c.Certificates[0] + return &c.Certificates[0], nil } - name = strings.ToLower(name) + name := strings.ToLower(clientHello.ServerName) for len(name) > 0 && name[len(name)-1] == '.' { name = name[:len(name)-1] } if cert, ok := c.NameToCertificate[name]; ok { - return cert + return cert, nil } // try replacing labels in the name with wildcards until we get a @@ -408,12 +456,12 @@ func (c *Config) getCertificateForName(name string) *Certificate { labels[i] = "*" candidate := strings.Join(labels, ".") if cert, ok := c.NameToCertificate[candidate]; ok { - return cert + return cert, nil } } // If nothing matches, return the first certificate. - return &c.Certificates[0] + return &c.Certificates[0], nil } // BuildNameToCertificate parses c.Certificates and builds c.NameToCertificate @@ -439,7 +487,12 @@ func (c *Config) BuildNameToCertificate() { // A Certificate is a chain of one or more certificates, leaf first. type Certificate struct { Certificate [][]byte - PrivateKey crypto.PrivateKey // supported types: *rsa.PrivateKey, *ecdsa.PrivateKey + // PrivateKey contains the private key corresponding to the public key + // in Leaf. For a server, this must be a *rsa.PrivateKey or + // *ecdsa.PrivateKey. For a client doing client authentication, this + // can be any type that implements crypto.Signer (which includes RSA + // and ECDSA private keys). + PrivateKey crypto.PrivateKey // OCSPStaple contains an optional OCSP response which will be served // to clients that request it. OCSPStaple []byte diff --git a/libgo/go/crypto/tls/conn.go b/libgo/go/crypto/tls/conn.go index 8f7d2c144ff..ba8e4c22b70 100644 --- a/libgo/go/crypto/tls/conn.go +++ b/libgo/go/crypto/tls/conn.go @@ -42,6 +42,9 @@ type Conn struct { verifiedChains [][]*x509.Certificate // serverName contains the server name indicated by the client, if any. serverName string + // firstFinished contains the first Finished hash sent during the + // handshake. This is the "tls-unique" channel binding value. + firstFinished [12]byte clientProtocol string clientProtocolFallback bool @@ -994,6 +997,9 @@ func (c *Conn) ConnectionState() ConnectionState { state.PeerCertificates = c.peerCertificates state.VerifiedChains = c.verifiedChains state.ServerName = c.serverName + if !c.didResume { + state.TLSUnique = c.firstFinished[:] + } } return state diff --git a/libgo/go/crypto/tls/conn_test.go b/libgo/go/crypto/tls/conn_test.go index 5c555147ca8..ec802cad70f 100644 --- a/libgo/go/crypto/tls/conn_test.go +++ b/libgo/go/crypto/tls/conn_test.go @@ -88,19 +88,31 @@ func TestCertificateSelection(t *testing.T) { return -1 } - if n := pointerToIndex(config.getCertificateForName("example.com")); n != 0 { + certificateForName := func(name string) *Certificate { + clientHello := &ClientHelloInfo{ + ServerName: name, + } + if cert, err := config.getCertificate(clientHello); err != nil { + t.Errorf("unable to get certificate for name '%s': %s", name, err) + return nil + } else { + return cert + } + } + + if n := pointerToIndex(certificateForName("example.com")); n != 0 { t.Errorf("example.com returned certificate %d, not 0", n) } - if n := pointerToIndex(config.getCertificateForName("bar.example.com")); n != 1 { + if n := pointerToIndex(certificateForName("bar.example.com")); n != 1 { t.Errorf("bar.example.com returned certificate %d, not 1", n) } - if n := pointerToIndex(config.getCertificateForName("foo.example.com")); n != 2 { + if n := pointerToIndex(certificateForName("foo.example.com")); n != 2 { t.Errorf("foo.example.com returned certificate %d, not 2", n) } - if n := pointerToIndex(config.getCertificateForName("foo.bar.example.com")); n != 3 { + if n := pointerToIndex(certificateForName("foo.bar.example.com")); n != 3 { t.Errorf("foo.bar.example.com returned certificate %d, not 3", n) } - if n := pointerToIndex(config.getCertificateForName("foo.bar.baz.example.com")); n != 0 { + if n := pointerToIndex(certificateForName("foo.bar.baz.example.com")); n != 0 { t.Errorf("foo.bar.baz.example.com returned certificate %d, not 0", n) } } diff --git a/libgo/go/crypto/tls/generate_cert.go b/libgo/go/crypto/tls/generate_cert.go index 5c6d8396d52..83f9916ff9d 100644 --- a/libgo/go/crypto/tls/generate_cert.go +++ b/libgo/go/crypto/tls/generate_cert.go @@ -10,6 +10,8 @@ package main import ( + "crypto/ecdsa" + "crypto/elliptic" "crypto/rand" "crypto/rsa" "crypto/x509" @@ -26,13 +28,41 @@ import ( ) var ( - host = flag.String("host", "", "Comma-separated hostnames and IPs to generate a certificate for") - validFrom = flag.String("start-date", "", "Creation date formatted as Jan 1 15:04:05 2011") - validFor = flag.Duration("duration", 365*24*time.Hour, "Duration that certificate is valid for") - isCA = flag.Bool("ca", false, "whether this cert should be its own Certificate Authority") - rsaBits = flag.Int("rsa-bits", 2048, "Size of RSA key to generate") + host = flag.String("host", "", "Comma-separated hostnames and IPs to generate a certificate for") + validFrom = flag.String("start-date", "", "Creation date formatted as Jan 1 15:04:05 2011") + validFor = flag.Duration("duration", 365*24*time.Hour, "Duration that certificate is valid for") + isCA = flag.Bool("ca", false, "whether this cert should be its own Certificate Authority") + rsaBits = flag.Int("rsa-bits", 2048, "Size of RSA key to generate. Ignored if --ecdsa-curve is set") + ecdsaCurve = flag.String("ecdsa-curve", "", "ECDSA curve to use to generate a key. Valid values are P224, P256, P384, P521") ) +func publicKey(priv interface{}) interface{} { + switch k := priv.(type) { + case *rsa.PrivateKey: + return &k.PublicKey + case *ecdsa.PrivateKey: + return &k.PublicKey + default: + return nil + } +} + +func pemBlockForKey(priv interface{}) *pem.Block { + switch k := priv.(type) { + case *rsa.PrivateKey: + return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)} + case *ecdsa.PrivateKey: + b, err := x509.MarshalECPrivateKey(k) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to marshal ECDSA private key: %v", err) + os.Exit(2) + } + return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} + default: + return nil + } +} + func main() { flag.Parse() @@ -40,7 +70,23 @@ func main() { log.Fatalf("Missing required --host parameter") } - priv, err := rsa.GenerateKey(rand.Reader, *rsaBits) + var priv interface{} + var err error + switch *ecdsaCurve { + case "": + priv, err = rsa.GenerateKey(rand.Reader, *rsaBits) + case "P224": + priv, err = ecdsa.GenerateKey(elliptic.P224(), rand.Reader) + case "P256": + priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + case "P384": + priv, err = ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + case "P521": + priv, err = ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + default: + fmt.Fprintf(os.Stderr, "Unrecognized elliptic curve: %q", *ecdsaCurve) + os.Exit(1) + } if err != nil { log.Fatalf("failed to generate private key: %s", err) } @@ -91,7 +137,7 @@ func main() { template.KeyUsage |= x509.KeyUsageCertSign } - derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv) + derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, publicKey(priv), priv) if err != nil { log.Fatalf("Failed to create certificate: %s", err) } @@ -109,7 +155,7 @@ func main() { log.Print("failed to open key.pem for writing:", err) return } - pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}) + pem.Encode(keyOut, pemBlockForKey(priv)) keyOut.Close() log.Print("written key.pem\n") } diff --git a/libgo/go/crypto/tls/handshake_client.go b/libgo/go/crypto/tls/handshake_client.go index a320fde1bc7..7f662e9c9f3 100644 --- a/libgo/go/crypto/tls/handshake_client.go +++ b/libgo/go/crypto/tls/handshake_client.go @@ -6,11 +6,11 @@ package tls import ( "bytes" + "crypto" "crypto/ecdsa" "crypto/rsa" "crypto/subtle" "crypto/x509" - "encoding/asn1" "errors" "fmt" "io" @@ -37,6 +37,18 @@ func (c *Conn) clientHandshake() error { return errors.New("tls: either ServerName or InsecureSkipVerify must be specified in the tls.Config") } + nextProtosLength := 0 + for _, proto := range c.config.NextProtos { + if l := len(proto); l == 0 || l > 255 { + return errors.New("tls: invalid NextProtos value") + } else { + nextProtosLength += 1 + l + } + } + if nextProtosLength > 0xffff { + return errors.New("tls: NextProtos values too large") + } + hello := &clientHelloMsg{ vers: c.config.maxVersion(), compressionMethods: []uint8{compressionNone}, @@ -47,6 +59,7 @@ func (c *Conn) clientHandshake() error { supportedPoints: []uint8{pointFormatUncompressed}, nextProtoNeg: len(c.config.NextProtos) > 0, secureRenegotiation: true, + alpnProtocols: c.config.NextProtos, } possibleCipherSuites := c.config.cipherSuites() @@ -174,10 +187,10 @@ NextCipherSuite: if err := hs.readSessionTicket(); err != nil { return err } - if err := hs.readFinished(); err != nil { + if err := hs.readFinished(c.firstFinished[:]); err != nil { return err } - if err := hs.sendFinished(); err != nil { + if err := hs.sendFinished(nil); err != nil { return err } } else { @@ -187,13 +200,13 @@ NextCipherSuite: if err := hs.establishKeys(); err != nil { return err } - if err := hs.sendFinished(); err != nil { + if err := hs.sendFinished(c.firstFinished[:]); err != nil { return err } if err := hs.readSessionTicket(); err != nil { return err } - if err := hs.readFinished(); err != nil { + if err := hs.readFinished(nil); err != nil { return err } } @@ -332,8 +345,8 @@ func (hs *clientHandshakeState) doFullHandshake() error { } // We need to search our list of client certs for one - // where SignatureAlgorithm is RSA and the Issuer is in - // certReq.certificateAuthorities + // where SignatureAlgorithm is acceptable to the server and the + // Issuer is in certReq.certificateAuthorities findCert: for i, chain := range c.config.Certificates { if !rsaAvail && !ecdsaAvail { @@ -360,7 +373,7 @@ func (hs *clientHandshakeState) doFullHandshake() error { if len(certReq.certificateAuthorities) == 0 { // they gave us an empty list, so just take the - // first RSA cert from c.config.Certificates + // first cert from c.config.Certificates chainToSend = &chain break findCert } @@ -415,22 +428,24 @@ func (hs *clientHandshakeState) doFullHandshake() error { hasSignatureAndHash: c.vers >= VersionTLS12, } - switch key := c.config.Certificates[0].PrivateKey.(type) { - case *ecdsa.PrivateKey: - digest, _, hashId := hs.finishedHash.hashForClientCertificate(signatureECDSA) - r, s, err := ecdsa.Sign(c.config.rand(), key, digest) - if err == nil { - signed, err = asn1.Marshal(ecdsaSignature{r, s}) - } + key, ok := chainToSend.PrivateKey.(crypto.Signer) + if !ok { + c.sendAlert(alertInternalError) + return fmt.Errorf("tls: client certificate private key of type %T does not implement crypto.Signer", chainToSend.PrivateKey) + } + switch key.Public().(type) { + case *ecdsa.PublicKey: + digest, hashFunc, hashId := hs.finishedHash.hashForClientCertificate(signatureECDSA) + signed, err = key.Sign(c.config.rand(), digest, hashFunc) certVerify.signatureAndHash.signature = signatureECDSA certVerify.signatureAndHash.hash = hashId - case *rsa.PrivateKey: + case *rsa.PublicKey: digest, hashFunc, hashId := hs.finishedHash.hashForClientCertificate(signatureRSA) - signed, err = rsa.SignPKCS1v15(c.config.rand(), key, hashFunc, digest) + signed, err = key.Sign(c.config.rand(), digest, hashFunc) certVerify.signatureAndHash.signature = signatureRSA certVerify.signatureAndHash.hash = hashId default: - err = errors.New("unknown private key type") + err = fmt.Errorf("tls: unknown client certificate key type: %T", key) } if err != nil { c.sendAlert(alertInternalError) @@ -483,11 +498,31 @@ func (hs *clientHandshakeState) processServerHello() (bool, error) { return false, errors.New("tls: server selected unsupported compression format") } - if !hs.hello.nextProtoNeg && hs.serverHello.nextProtoNeg { + clientDidNPN := hs.hello.nextProtoNeg + clientDidALPN := len(hs.hello.alpnProtocols) > 0 + serverHasNPN := hs.serverHello.nextProtoNeg + serverHasALPN := len(hs.serverHello.alpnProtocol) > 0 + + if !clientDidNPN && serverHasNPN { c.sendAlert(alertHandshakeFailure) return false, errors.New("server advertised unrequested NPN extension") } + if !clientDidALPN && serverHasALPN { + c.sendAlert(alertHandshakeFailure) + return false, errors.New("server advertised unrequested ALPN extension") + } + + if serverHasNPN && serverHasALPN { + c.sendAlert(alertHandshakeFailure) + return false, errors.New("server advertised both NPN and ALPN extensions") + } + + if serverHasALPN { + c.clientProtocol = hs.serverHello.alpnProtocol + c.clientProtocolFallback = false + } + if hs.serverResumedSession() { // Restore masterSecret and peerCerts from previous state hs.masterSecret = hs.session.masterSecret @@ -497,7 +532,7 @@ func (hs *clientHandshakeState) processServerHello() (bool, error) { return false, nil } -func (hs *clientHandshakeState) readFinished() error { +func (hs *clientHandshakeState) readFinished(out []byte) error { c := hs.c c.readRecord(recordTypeChangeCipherSpec) @@ -522,6 +557,7 @@ func (hs *clientHandshakeState) readFinished() error { return errors.New("tls: server's Finished message was incorrect") } hs.finishedHash.Write(serverFinished.marshal()) + copy(out, verify) return nil } @@ -553,7 +589,7 @@ func (hs *clientHandshakeState) readSessionTicket() error { return nil } -func (hs *clientHandshakeState) sendFinished() error { +func (hs *clientHandshakeState) sendFinished(out []byte) error { c := hs.c c.writeRecord(recordTypeChangeCipherSpec, []byte{1}) @@ -572,6 +608,7 @@ func (hs *clientHandshakeState) sendFinished() error { finished.verifyData = hs.finishedHash.clientSum(hs.masterSecret) hs.finishedHash.Write(finished.marshal()) c.writeRecord(recordTypeHandshake, finished.marshal()) + copy(out, finished.verifyData) return nil } @@ -584,18 +621,18 @@ func clientSessionCacheKey(serverAddr net.Addr, config *Config) string { return serverAddr.String() } -// mutualProtocol finds the mutual Next Protocol Negotiation protocol given the -// set of client and server supported protocols. The set of client supported -// protocols must not be empty. It returns the resulting protocol and flag +// mutualProtocol finds the mutual Next Protocol Negotiation or ALPN protocol +// given list of possible protocols and a list of the preference order. The +// first list must not be empty. It returns the resulting protocol and flag // indicating if the fallback case was reached. -func mutualProtocol(clientProtos, serverProtos []string) (string, bool) { - for _, s := range serverProtos { - for _, c := range clientProtos { +func mutualProtocol(protos, preferenceProtos []string) (string, bool) { + for _, s := range preferenceProtos { + for _, c := range protos { if s == c { return s, false } } } - return clientProtos[0], true + return protos[0], true } diff --git a/libgo/go/crypto/tls/handshake_client_test.go b/libgo/go/crypto/tls/handshake_client_test.go index 0d73c8e2f97..e5eaa7de208 100644 --- a/libgo/go/crypto/tls/handshake_client_test.go +++ b/libgo/go/crypto/tls/handshake_client_test.go @@ -49,6 +49,10 @@ type clientTest struct { // key, if not nil, contains either a *rsa.PrivateKey or // *ecdsa.PrivateKey which is the private key for the reference server. key interface{} + // validate, if not nil, is a function that will be called with the + // ConnectionState of the resulting connection. It returns a non-nil + // error if the ConnectionState is unacceptable. + validate func(ConnectionState) error } var defaultServerCommand = []string{"openssl", "s_server"} @@ -188,6 +192,11 @@ func (test *clientTest) run(t *testing.T, write bool) { if _, err := client.Write([]byte("hello\n")); err != nil { t.Logf("Client.Write failed: %s", err) } + if test.validate != nil { + if err := test.validate(client.ConnectionState()); err != nil { + t.Logf("validate callback returned error: %s", err) + } + } client.Close() clientConn.Close() doneChan <- true @@ -196,7 +205,7 @@ func (test *clientTest) run(t *testing.T, write bool) { if !write { flows, err := test.loadData() if err != nil { - t.Fatalf("%s: failed to load data from %s", test.name, test.dataPath()) + t.Fatalf("%s: failed to load data from %s: %v", test.name, test.dataPath(), err) } for i, b := range flows { if i%2 == 1 { @@ -437,3 +446,45 @@ func TestLRUClientSessionCache(t *testing.T) { t.Fatalf("failed to add nil entry to cache") } } + +func TestHandshakeClientALPNMatch(t *testing.T) { + config := *testConfig + config.NextProtos = []string{"proto2", "proto1"} + + test := &clientTest{ + name: "ALPN", + // Note that this needs OpenSSL 1.0.2 because that is the first + // version that supports the -alpn flag. + command: []string{"openssl", "s_server", "-alpn", "proto1,proto2"}, + config: &config, + validate: func(state ConnectionState) error { + // The server's preferences should override the client. + if state.NegotiatedProtocol != "proto1" { + return fmt.Errorf("Got protocol %q, wanted proto1", state.NegotiatedProtocol) + } + return nil + }, + } + runClientTestTLS12(t, test) +} + +func TestHandshakeClientALPNNoMatch(t *testing.T) { + config := *testConfig + config.NextProtos = []string{"proto3"} + + test := &clientTest{ + name: "ALPN-NoMatch", + // Note that this needs OpenSSL 1.0.2 because that is the first + // version that supports the -alpn flag. + command: []string{"openssl", "s_server", "-alpn", "proto1,proto2"}, + config: &config, + validate: func(state ConnectionState) error { + // There's no overlap so OpenSSL will not select a protocol. + if state.NegotiatedProtocol != "" { + return fmt.Errorf("Got protocol %q, wanted ''", state.NegotiatedProtocol) + } + return nil + }, + } + runClientTestTLS12(t, test) +} diff --git a/libgo/go/crypto/tls/handshake_messages.go b/libgo/go/crypto/tls/handshake_messages.go index 7bcaa5eb929..5d14871a348 100644 --- a/libgo/go/crypto/tls/handshake_messages.go +++ b/libgo/go/crypto/tls/handshake_messages.go @@ -22,6 +22,7 @@ type clientHelloMsg struct { sessionTicket []uint8 signatureAndHashes []signatureAndHash secureRenegotiation bool + alpnProtocols []string } func (m *clientHelloMsg) equal(i interface{}) bool { @@ -44,7 +45,8 @@ func (m *clientHelloMsg) equal(i interface{}) bool { m.ticketSupported == m1.ticketSupported && bytes.Equal(m.sessionTicket, m1.sessionTicket) && eqSignatureAndHashes(m.signatureAndHashes, m1.signatureAndHashes) && - m.secureRenegotiation == m1.secureRenegotiation + m.secureRenegotiation == m1.secureRenegotiation && + eqStrings(m.alpnProtocols, m1.alpnProtocols) } func (m *clientHelloMsg) marshal() []byte { @@ -86,6 +88,17 @@ func (m *clientHelloMsg) marshal() []byte { extensionsLength += 1 numExtensions++ } + if len(m.alpnProtocols) > 0 { + extensionsLength += 2 + for _, s := range m.alpnProtocols { + if l := len(s); l == 0 || l > 255 { + panic("invalid ALPN protocol") + } + extensionsLength++ + extensionsLength += len(s) + } + numExtensions++ + } if numExtensions > 0 { extensionsLength += 4 * numExtensions length += 2 + extensionsLength @@ -237,6 +250,27 @@ func (m *clientHelloMsg) marshal() []byte { z[3] = 1 z = z[5:] } + if len(m.alpnProtocols) > 0 { + z[0] = byte(extensionALPN >> 8) + z[1] = byte(extensionALPN & 0xff) + lengths := z[2:] + z = z[6:] + + stringsLength := 0 + for _, s := range m.alpnProtocols { + l := len(s) + z[0] = byte(l) + copy(z[1:], s) + z = z[1+l:] + stringsLength += 1 + l + } + + lengths[2] = byte(stringsLength >> 8) + lengths[3] = byte(stringsLength) + stringsLength += 2 + lengths[0] = byte(stringsLength >> 8) + lengths[1] = byte(stringsLength) + } m.raw = x @@ -291,6 +325,7 @@ func (m *clientHelloMsg) unmarshal(data []byte) bool { m.ticketSupported = false m.sessionTicket = nil m.signatureAndHashes = nil + m.alpnProtocols = nil if len(data) == 0 { // ClientHello is optionally followed by extension data @@ -400,6 +435,24 @@ func (m *clientHelloMsg) unmarshal(data []byte) bool { return false } m.secureRenegotiation = true + case extensionALPN: + if length < 2 { + return false + } + l := int(data[0])<<8 | int(data[1]) + if l != length-2 { + return false + } + d := data[2:length] + for len(d) != 0 { + stringLen := int(d[0]) + d = d[1:] + if stringLen == 0 || stringLen > len(d) { + return false + } + m.alpnProtocols = append(m.alpnProtocols, string(d[:stringLen])) + d = d[stringLen:] + } } data = data[length:] } @@ -419,6 +472,7 @@ type serverHelloMsg struct { ocspStapling bool ticketSupported bool secureRenegotiation bool + alpnProtocol string } func (m *serverHelloMsg) equal(i interface{}) bool { @@ -437,7 +491,8 @@ func (m *serverHelloMsg) equal(i interface{}) bool { eqStrings(m.nextProtos, m1.nextProtos) && m.ocspStapling == m1.ocspStapling && m.ticketSupported == m1.ticketSupported && - m.secureRenegotiation == m1.secureRenegotiation + m.secureRenegotiation == m1.secureRenegotiation && + m.alpnProtocol == m1.alpnProtocol } func (m *serverHelloMsg) marshal() []byte { @@ -468,6 +523,14 @@ func (m *serverHelloMsg) marshal() []byte { extensionsLength += 1 numExtensions++ } + if alpnLen := len(m.alpnProtocol); alpnLen > 0 { + if alpnLen >= 256 { + panic("invalid ALPN protocol") + } + extensionsLength += 2 + 1 + alpnLen + numExtensions++ + } + if numExtensions > 0 { extensionsLength += 4 * numExtensions length += 2 + extensionsLength @@ -528,6 +591,20 @@ func (m *serverHelloMsg) marshal() []byte { z[3] = 1 z = z[5:] } + if alpnLen := len(m.alpnProtocol); alpnLen > 0 { + z[0] = byte(extensionALPN >> 8) + z[1] = byte(extensionALPN & 0xff) + l := 2 + 1 + alpnLen + z[2] = byte(l >> 8) + z[3] = byte(l) + l -= 2 + z[4] = byte(l >> 8) + z[5] = byte(l) + l -= 1 + z[6] = byte(l) + copy(z[7:], []byte(m.alpnProtocol)) + z = z[7+alpnLen:] + } m.raw = x @@ -558,6 +635,7 @@ func (m *serverHelloMsg) unmarshal(data []byte) bool { m.nextProtos = nil m.ocspStapling = false m.ticketSupported = false + m.alpnProtocol = "" if len(data) == 0 { // ServerHello is optionally followed by extension data @@ -612,6 +690,22 @@ func (m *serverHelloMsg) unmarshal(data []byte) bool { return false } m.secureRenegotiation = true + case extensionALPN: + d := data[:length] + if len(d) < 3 { + return false + } + l := int(d[0])<<8 | int(d[1]) + if l != len(d)-2 { + return false + } + d = d[2:] + l = int(d[0]) + if l != len(d)-1 { + return false + } + d = d[1:] + m.alpnProtocol = string(d) } data = data[length:] } diff --git a/libgo/go/crypto/tls/handshake_messages_test.go b/libgo/go/crypto/tls/handshake_messages_test.go index f46aabdfd5f..a96e95c3f03 100644 --- a/libgo/go/crypto/tls/handshake_messages_test.go +++ b/libgo/go/crypto/tls/handshake_messages_test.go @@ -138,6 +138,10 @@ func (*clientHelloMsg) Generate(rand *rand.Rand, size int) reflect.Value { if rand.Intn(10) > 5 { m.signatureAndHashes = supportedSKXSignatureAlgorithms } + m.alpnProtocols = make([]string, rand.Intn(5)) + for i := range m.alpnProtocols { + m.alpnProtocols[i] = randomString(rand.Intn(20)+1, rand) + } return reflect.ValueOf(m) } @@ -166,6 +170,7 @@ func (*serverHelloMsg) Generate(rand *rand.Rand, size int) reflect.Value { if rand.Intn(10) > 5 { m.ticketSupported = true } + m.alpnProtocol = randomString(rand.Intn(32)+1, rand) return reflect.ValueOf(m) } diff --git a/libgo/go/crypto/tls/handshake_server.go b/libgo/go/crypto/tls/handshake_server.go index dff6fd9adc4..0d907656c6c 100644 --- a/libgo/go/crypto/tls/handshake_server.go +++ b/libgo/go/crypto/tls/handshake_server.go @@ -57,10 +57,10 @@ func (c *Conn) serverHandshake() error { if err := hs.establishKeys(); err != nil { return err } - if err := hs.sendFinished(); err != nil { + if err := hs.sendFinished(c.firstFinished[:]); err != nil { return err } - if err := hs.readFinished(); err != nil { + if err := hs.readFinished(nil); err != nil { return err } c.didResume = true @@ -73,13 +73,13 @@ func (c *Conn) serverHandshake() error { if err := hs.establishKeys(); err != nil { return err } - if err := hs.readFinished(); err != nil { + if err := hs.readFinished(c.firstFinished[:]); err != nil { return err } if err := hs.sendSessionTicket(); err != nil { return err } - if err := hs.sendFinished(); err != nil { + if err := hs.sendFinished(nil); err != nil { return err } } @@ -163,13 +163,21 @@ Curves: if len(hs.clientHello.serverName) > 0 { c.serverName = hs.clientHello.serverName } - // Although sending an empty NPN extension is reasonable, Firefox has - // had a bug around this. Best to send nothing at all if - // config.NextProtos is empty. See - // https://code.google.com/p/go/issues/detail?id=5445. - if hs.clientHello.nextProtoNeg && len(config.NextProtos) > 0 { - hs.hello.nextProtoNeg = true - hs.hello.nextProtos = config.NextProtos + + if len(hs.clientHello.alpnProtocols) > 0 { + if selectedProto, fallback := mutualProtocol(hs.clientHello.alpnProtocols, c.config.NextProtos); !fallback { + hs.hello.alpnProtocol = selectedProto + c.clientProtocol = selectedProto + } + } else { + // Although sending an empty NPN extension is reasonable, Firefox has + // had a bug around this. Best to send nothing at all if + // config.NextProtos is empty. See + // https://code.google.com/p/go/issues/detail?id=5445. + if hs.clientHello.nextProtoNeg && len(config.NextProtos) > 0 { + hs.hello.nextProtoNeg = true + hs.hello.nextProtos = config.NextProtos + } } if len(config.Certificates) == 0 { @@ -178,7 +186,16 @@ Curves: } hs.cert = &config.Certificates[0] if len(hs.clientHello.serverName) > 0 { - hs.cert = config.getCertificateForName(hs.clientHello.serverName) + chi := &ClientHelloInfo{ + CipherSuites: hs.clientHello.cipherSuites, + ServerName: hs.clientHello.serverName, + SupportedCurves: hs.clientHello.supportedCurves, + SupportedPoints: hs.clientHello.supportedPoints, + } + if hs.cert, err = config.getCertificate(chi); err != nil { + c.sendAlert(alertInternalError) + return false, err + } } _, hs.ecdsaOk = hs.cert.PrivateKey.(*ecdsa.PrivateKey) @@ -207,6 +224,18 @@ Curves: return false, errors.New("tls: no cipher suite supported by both client and server") } + // See https://tools.ietf.org/html/draft-ietf-tls-downgrade-scsv-00. + for _, id := range hs.clientHello.cipherSuites { + if id == TLS_FALLBACK_SCSV { + // The client is doing a fallback connection. + if hs.clientHello.vers < c.config.MaxVersion { + c.sendAlert(alertInappropriateFallback) + return false, errors.New("tls: client using inppropriate protocol fallback") + } + break + } + } + return false, nil } @@ -470,7 +499,7 @@ func (hs *serverHandshakeState) establishKeys() error { return nil } -func (hs *serverHandshakeState) readFinished() error { +func (hs *serverHandshakeState) readFinished(out []byte) error { c := hs.c c.readRecord(recordTypeChangeCipherSpec) @@ -510,6 +539,7 @@ func (hs *serverHandshakeState) readFinished() error { } hs.finishedHash.Write(clientFinished.marshal()) + copy(out, verify) return nil } @@ -539,7 +569,7 @@ func (hs *serverHandshakeState) sendSessionTicket() error { return nil } -func (hs *serverHandshakeState) sendFinished() error { +func (hs *serverHandshakeState) sendFinished(out []byte) error { c := hs.c c.writeRecord(recordTypeChangeCipherSpec, []byte{1}) @@ -550,6 +580,7 @@ func (hs *serverHandshakeState) sendFinished() error { c.writeRecord(recordTypeHandshake, finished.marshal()) c.cipherSuite = hs.suite.id + copy(out, finished.verifyData) return nil } diff --git a/libgo/go/crypto/tls/handshake_server_test.go b/libgo/go/crypto/tls/handshake_server_test.go index 3444d5ca829..0338af457ee 100644 --- a/libgo/go/crypto/tls/handshake_server_test.go +++ b/libgo/go/crypto/tls/handshake_server_test.go @@ -9,7 +9,6 @@ import ( "crypto/ecdsa" "crypto/elliptic" "crypto/rsa" - "crypto/x509" "encoding/hex" "encoding/pem" "errors" @@ -258,6 +257,16 @@ type serverTest struct { expectedPeerCerts []string // config, if not nil, contains a custom Config to use for this test. config *Config + // expectAlert, if true, indicates that a fatal alert should be returned + // when handshaking with the server. + expectAlert bool + // expectHandshakeErrorIncluding, when not empty, contains a string + // that must be a substring of the error resulting from the handshake. + expectHandshakeErrorIncluding string + // validate, if not nil, is a function that will be called with the + // ConnectionState of the resulting connection. It returns false if the + // ConnectionState is unacceptable. + validate func(ConnectionState) error } var defaultClientCommand = []string{"openssl", "s_client", "-no_ticket"} @@ -354,20 +363,30 @@ func (test *serverTest) run(t *testing.T, write bool) { config = testConfig } server := Server(serverConn, config) - peerCertsChan := make(chan []*x509.Certificate, 1) + connStateChan := make(chan ConnectionState, 1) go func() { - if _, err := server.Write([]byte("hello, world\n")); err != nil { + var err error + if _, err = server.Write([]byte("hello, world\n")); err != nil { t.Logf("Error from Server.Write: %s", err) } + if len(test.expectHandshakeErrorIncluding) > 0 { + if err == nil { + t.Errorf("Error expected, but no error returned") + } else if s := err.Error(); !strings.Contains(s, test.expectHandshakeErrorIncluding) { + t.Errorf("Error expected containing '%s' but got '%s'", test.expectHandshakeErrorIncluding, s) + } + } server.Close() serverConn.Close() - peerCertsChan <- server.ConnectionState().PeerCertificates + connStateChan <- server.ConnectionState() }() if !write { flows, err := test.loadData() if err != nil { - t.Fatalf("%s: failed to load data from %s", test.name, test.dataPath()) + if !test.expectAlert { + t.Fatalf("%s: failed to load data from %s", test.name, test.dataPath()) + } } for i, b := range flows { if i%2 == 0 { @@ -376,17 +395,24 @@ func (test *serverTest) run(t *testing.T, write bool) { } bb := make([]byte, len(b)) n, err := io.ReadFull(clientConn, bb) - if err != nil { - t.Fatalf("%s #%d: %s\nRead %d, wanted %d, got %x, wanted %x\n", test.name, i+1, err, n, len(bb), bb[:n], b) - } - if !bytes.Equal(b, bb) { - t.Fatalf("%s #%d: mismatch on read: got:%x want:%x", test.name, i+1, bb, b) + if test.expectAlert { + if err == nil { + t.Fatal("Expected read failure but read succeeded") + } + } else { + if err != nil { + t.Fatalf("%s #%d: %s\nRead %d, wanted %d, got %x, wanted %x\n", test.name, i+1, err, n, len(bb), bb[:n], b) + } + if !bytes.Equal(b, bb) { + t.Fatalf("%s #%d: mismatch on read: got:%x want:%x", test.name, i+1, bb, b) + } } } clientConn.Close() } - peerCerts := <-peerCertsChan + connState := <-connStateChan + peerCerts := connState.PeerCertificates if len(peerCerts) == len(test.expectedPeerCerts) { for i, peerCert := range peerCerts { block, _ := pem.Decode([]byte(test.expectedPeerCerts[i])) @@ -398,6 +424,12 @@ func (test *serverTest) run(t *testing.T, write bool) { t.Fatalf("%s: mismatch on peer list length: %d (wanted) != %d (got)", test.name, len(test.expectedPeerCerts), len(peerCerts)) } + if test.validate != nil { + if err := test.validate(connState); err != nil { + t.Fatalf("validate callback returned error: %s", err) + } + } + if write { path := test.dataPath() out, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) @@ -408,7 +440,9 @@ func (test *serverTest) run(t *testing.T, write bool) { recordingConn.Close() if len(recordingConn.flows) < 3 { childProcess.Stdout.(*bytes.Buffer).WriteTo(os.Stdout) - t.Fatalf("Handshake failed") + if len(test.expectHandshakeErrorIncluding) == 0 { + t.Fatalf("Handshake failed") + } } recordingConn.WriteTo(out) fmt.Printf("Wrote %s\n", path) @@ -498,6 +532,49 @@ func TestHandshakeServerECDHEECDSAAES(t *testing.T) { runServerTestTLS12(t, test) } +func TestHandshakeServerALPN(t *testing.T) { + config := *testConfig + config.NextProtos = []string{"proto1", "proto2"} + + test := &serverTest{ + name: "ALPN", + // Note that this needs OpenSSL 1.0.2 because that is the first + // version that supports the -alpn flag. + command: []string{"openssl", "s_client", "-alpn", "proto2,proto1"}, + config: &config, + validate: func(state ConnectionState) error { + // The server's preferences should override the client. + if state.NegotiatedProtocol != "proto1" { + return fmt.Errorf("Got protocol %q, wanted proto1", state.NegotiatedProtocol) + } + return nil + }, + } + runServerTestTLS12(t, test) +} + +func TestHandshakeServerALPNNoMatch(t *testing.T) { + config := *testConfig + config.NextProtos = []string{"proto3"} + + test := &serverTest{ + name: "ALPN-NoMatch", + // Note that this needs OpenSSL 1.0.2 because that is the first + // version that supports the -alpn flag. + command: []string{"openssl", "s_client", "-alpn", "proto2,proto1"}, + config: &config, + validate: func(state ConnectionState) error { + // Rather than reject the connection, Go doesn't select + // a protocol when there is no overlap. + if state.NegotiatedProtocol != "" { + return fmt.Errorf("Got protocol %q, wanted ''", state.NegotiatedProtocol) + } + return nil + }, + } + runServerTestTLS12(t, test) +} + // TestHandshakeServerSNI involves a client sending an SNI extension of // "snitest.com", which happens to match the CN of testSNICertificate. The test // verifies that the server correctly selects that certificate. @@ -509,6 +586,61 @@ func TestHandshakeServerSNI(t *testing.T) { runServerTestTLS12(t, test) } +// TestHandshakeServerSNICertForName is similar to TestHandshakeServerSNI, but +// tests the dynamic GetCertificate method +func TestHandshakeServerSNIGetCertificate(t *testing.T) { + config := *testConfig + + // Replace the NameToCertificate map with a GetCertificate function + nameToCert := config.NameToCertificate + config.NameToCertificate = nil + config.GetCertificate = func(clientHello *ClientHelloInfo) (*Certificate, error) { + cert, _ := nameToCert[clientHello.ServerName] + return cert, nil + } + test := &serverTest{ + name: "SNI", + command: []string{"openssl", "s_client", "-no_ticket", "-cipher", "AES128-SHA", "-servername", "snitest.com"}, + config: &config, + } + runServerTestTLS12(t, test) +} + +// TestHandshakeServerSNICertForNameNotFound is similar to +// TestHandshakeServerSNICertForName, but tests to make sure that when the +// GetCertificate method doesn't return a cert, we fall back to what's in +// the NameToCertificate map. +func TestHandshakeServerSNIGetCertificateNotFound(t *testing.T) { + config := *testConfig + + config.GetCertificate = func(clientHello *ClientHelloInfo) (*Certificate, error) { + return nil, nil + } + test := &serverTest{ + name: "SNI", + command: []string{"openssl", "s_client", "-no_ticket", "-cipher", "AES128-SHA", "-servername", "snitest.com"}, + config: &config, + } + runServerTestTLS12(t, test) +} + +// TestHandshakeServerSNICertForNameError tests to make sure that errors in +// GetCertificate result in a tls alert. +func TestHandshakeServerSNIGetCertificateError(t *testing.T) { + config := *testConfig + + config.GetCertificate = func(clientHello *ClientHelloInfo) (*Certificate, error) { + return nil, fmt.Errorf("Test error in GetCertificate") + } + test := &serverTest{ + name: "SNI", + command: []string{"openssl", "s_client", "-no_ticket", "-cipher", "AES128-SHA", "-servername", "snitest.com"}, + config: &config, + expectAlert: true, + } + runServerTestTLS12(t, test) +} + // TestCipherSuiteCertPreferance ensures that we select an RSA ciphersuite with // an RSA certificate and an ECDSA ciphersuite with an ECDSA certificate. func TestCipherSuiteCertPreferenceECDSA(t *testing.T) { @@ -525,7 +657,7 @@ func TestCipherSuiteCertPreferenceECDSA(t *testing.T) { config = *testConfig config.CipherSuites = []uint16{TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA} config.Certificates = []Certificate{ - Certificate{ + { Certificate: [][]byte{testECDSACertificate}, PrivateKey: testECDSAPrivateKey, }, @@ -583,6 +715,16 @@ func TestResumptionDisabled(t *testing.T) { // file for ResumeDisabled does not include a resumption handshake. } +func TestFallbackSCSV(t *testing.T) { + test := &serverTest{ + name: "FallbackSCSV", + // OpenSSL 1.0.1j is needed for the -fallback_scsv option. + command: []string{"openssl", "s_client", "-fallback_scsv"}, + expectHandshakeErrorIncluding: "inppropriate protocol fallback", + } + runServerTestTLS11(t, test) +} + // cert.pem and key.pem were generated with generate_cert.go // Thus, they have no ExtKeyUsage fields and trigger an error // when verification is turned on. diff --git a/libgo/go/crypto/tls/key_agreement.go b/libgo/go/crypto/tls/key_agreement.go index f38b701f1ba..0974fc6e0f4 100644 --- a/libgo/go/crypto/tls/key_agreement.go +++ b/libgo/go/crypto/tls/key_agreement.go @@ -292,6 +292,9 @@ func (ka *ecdheKeyAgreement) processClientKeyExchange(config *Config, cert *Cert if x == nil { return nil, errClientKeyExchange } + if !ka.curve.IsOnCurve(x, y) { + return nil, errClientKeyExchange + } x, _ = ka.curve.ScalarMult(x, y, ka.privateKey) preMasterSecret := make([]byte, (ka.curve.Params().BitSize+7)>>3) xBytes := x.Bytes() @@ -322,6 +325,9 @@ func (ka *ecdheKeyAgreement) processServerKeyExchange(config *Config, clientHell if ka.x == nil { return errServerKeyExchange } + if !ka.curve.IsOnCurve(ka.x, ka.y) { + return errServerKeyExchange + } serverECDHParams := skx.key[:4+publicLen] sig := skx.key[4+publicLen:] diff --git a/libgo/go/crypto/tls/testdata/Client-TLSv12-ALPN b/libgo/go/crypto/tls/testdata/Client-TLSv12-ALPN new file mode 100644 index 00000000000..f09a4f106c1 --- /dev/null +++ b/libgo/go/crypto/tls/testdata/Client-TLSv12-ALPN @@ -0,0 +1,97 @@ +>>> Flow 1 (client to server) +00000000 16 03 01 00 8d 01 00 00 89 03 03 00 00 00 00 00 |................| +00000010 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| +00000020 00 00 00 00 00 00 00 00 00 00 00 00 00 1a c0 2f |.............../| +00000030 c0 2b c0 11 c0 07 c0 13 c0 09 c0 14 c0 0a 00 05 |.+..............| +00000040 00 2f 00 35 c0 12 00 0a 01 00 00 46 33 74 00 00 |./.5.......F3t..| +00000050 00 05 00 05 01 00 00 00 00 00 0a 00 08 00 06 00 |................| +00000060 17 00 18 00 19 00 0b 00 02 01 00 00 0d 00 0a 00 |................| +00000070 08 04 01 04 03 02 01 02 03 ff 01 00 01 00 00 10 |................| +00000080 00 10 00 0e 06 70 72 6f 74 6f 32 06 70 72 6f 74 |.....proto2.prot| +00000090 6f 31 |o1| +>>> Flow 2 (server to client) +00000000 16 03 03 00 66 02 00 00 62 03 03 77 a9 7d 9c 4b |....f...b..w.}.K| +00000010 69 65 aa dc 95 cb 78 08 3d d2 1a 0a 45 69 23 73 |ie....x.=...Ei#s| +00000020 4f 41 4f 24 12 2e 57 47 b7 53 64 20 82 9a f8 e7 |OAO$..WG.Sd ....| +00000030 79 f8 13 2c 9d cd b5 cb cb 9a 95 56 0e e9 cb a8 |y..,.......V....| +00000040 e4 a2 8a d6 bc dc fa 25 b3 57 cc cf c0 2f 00 00 |.......%.W.../..| +00000050 1a ff 01 00 01 00 00 0b 00 04 03 00 01 02 00 10 |................| +00000060 00 09 00 07 06 70 72 6f 74 6f 31 16 03 03 02 be |.....proto1.....| +00000070 0b 00 02 ba 00 02 b7 00 02 b4 30 82 02 b0 30 82 |..........0...0.| +00000080 02 19 a0 03 02 01 02 02 09 00 85 b0 bb a4 8a 7f |................| +00000090 b8 ca 30 0d 06 09 2a 86 48 86 f7 0d 01 01 05 05 |..0...*.H.......| +000000a0 00 30 45 31 0b 30 09 06 03 55 04 06 13 02 41 55 |.0E1.0...U....AU| +000000b0 31 13 30 11 06 03 55 04 08 13 0a 53 6f 6d 65 2d |1.0...U....Some-| +000000c0 53 74 61 74 65 31 21 30 1f 06 03 55 04 0a 13 18 |State1!0...U....| +000000d0 49 6e 74 65 72 6e 65 74 20 57 69 64 67 69 74 73 |Internet Widgits| +000000e0 20 50 74 79 20 4c 74 64 30 1e 17 0d 31 30 30 34 | Pty Ltd0...1004| +000000f0 32 34 30 39 30 39 33 38 5a 17 0d 31 31 30 34 32 |24090938Z..11042| +00000100 34 30 39 30 39 33 38 5a 30 45 31 0b 30 09 06 03 |4090938Z0E1.0...| +00000110 55 04 06 13 02 41 55 31 13 30 11 06 03 55 04 08 |U....AU1.0...U..| +00000120 13 0a 53 6f 6d 65 2d 53 74 61 74 65 31 21 30 1f |..Some-State1!0.| +00000130 06 03 55 04 0a 13 18 49 6e 74 65 72 6e 65 74 20 |..U....Internet | +00000140 57 69 64 67 69 74 73 20 50 74 79 20 4c 74 64 30 |Widgits Pty Ltd0| +00000150 81 9f 30 0d 06 09 2a 86 48 86 f7 0d 01 01 01 05 |..0...*.H.......| +00000160 00 03 81 8d 00 30 81 89 02 81 81 00 bb 79 d6 f5 |.....0.......y..| +00000170 17 b5 e5 bf 46 10 d0 dc 69 be e6 2b 07 43 5a d0 |....F...i..+.CZ.| +00000180 03 2d 8a 7a 43 85 b7 14 52 e7 a5 65 4c 2c 78 b8 |.-.zC...R..eL,x.| +00000190 23 8c b5 b4 82 e5 de 1f 95 3b 7e 62 a5 2c a5 33 |#........;~b.,.3| +000001a0 d6 fe 12 5c 7a 56 fc f5 06 bf fa 58 7b 26 3f b5 |...\zV.....X{&?.| +000001b0 cd 04 d3 d0 c9 21 96 4a c7 f4 54 9f 5a bf ef 42 |.....!.J..T.Z..B| +000001c0 71 00 fe 18 99 07 7f 7e 88 7d 7d f1 04 39 c4 a2 |q......~.}}..9..| +000001d0 2e db 51 c9 7c e3 c0 4c 3b 32 66 01 cf af b1 1d |..Q.|..L;2f.....| +000001e0 b8 71 9a 1d db db 89 6b ae da 2d 79 02 03 01 00 |.q.....k..-y....| +000001f0 01 a3 81 a7 30 81 a4 30 1d 06 03 55 1d 0e 04 16 |....0..0...U....| +00000200 04 14 b1 ad e2 85 5a cf cb 28 db 69 ce 23 69 de |......Z..(.i.#i.| +00000210 d3 26 8e 18 88 39 30 75 06 03 55 1d 23 04 6e 30 |.&...90u..U.#.n0| +00000220 6c 80 14 b1 ad e2 85 5a cf cb 28 db 69 ce 23 69 |l......Z..(.i.#i| +00000230 de d3 26 8e 18 88 39 a1 49 a4 47 30 45 31 0b 30 |..&...9.I.G0E1.0| +00000240 09 06 03 55 04 06 13 02 41 55 31 13 30 11 06 03 |...U....AU1.0...| +00000250 55 04 08 13 0a 53 6f 6d 65 2d 53 74 61 74 65 31 |U....Some-State1| +00000260 21 30 1f 06 03 55 04 0a 13 18 49 6e 74 65 72 6e |!0...U....Intern| +00000270 65 74 20 57 69 64 67 69 74 73 20 50 74 79 20 4c |et Widgits Pty L| +00000280 74 64 82 09 00 85 b0 bb a4 8a 7f b8 ca 30 0c 06 |td...........0..| +00000290 03 55 1d 13 04 05 30 03 01 01 ff 30 0d 06 09 2a |.U....0....0...*| +000002a0 86 48 86 f7 0d 01 01 05 05 00 03 81 81 00 08 6c |.H.............l| +000002b0 45 24 c7 6b b1 59 ab 0c 52 cc f2 b0 14 d7 87 9d |E$.k.Y..R.......| +000002c0 7a 64 75 b5 5a 95 66 e4 c5 2b 8e ae 12 66 1f eb |zdu.Z.f..+...f..| +000002d0 4f 38 b3 6e 60 d3 92 fd f7 41 08 b5 25 13 b1 18 |O8.n`....A..%...| +000002e0 7a 24 fb 30 1d ba ed 98 b9 17 ec e7 d7 31 59 db |z$.0.........1Y.| +000002f0 95 d3 1d 78 ea 50 56 5c d5 82 5a 2d 5a 5f 33 c4 |...x.PV\..Z-Z_3.| +00000300 b6 d8 c9 75 90 96 8c 0f 52 98 b5 cd 98 1f 89 20 |...u....R...... | +00000310 5f f2 a0 1c a3 1b 96 94 dd a9 fd 57 e9 70 e8 26 |_..........W.p.&| +00000320 6d 71 99 9b 26 6e 38 50 29 6c 90 a7 bd d9 16 03 |mq..&n8P)l......| +00000330 03 00 cd 0c 00 00 c9 03 00 17 41 04 1b 42 c3 ae |..........A..B..| +00000340 44 19 d3 84 7c 6c 98 cb b9 22 a2 67 63 95 aa cc |D...|l...".gc...| +00000350 bd e4 1e f8 08 e6 60 f3 bc 83 9f 81 da 9c 1c 8c |......`.........| +00000360 ff 6f f4 3e 1e e5 3b f6 49 61 f9 70 43 7f c1 69 |.o.>..;.Ia.pC..i| +00000370 de 73 98 4b bd 5c c3 78 24 18 a8 ec 04 01 00 80 |.s.K.\.x$.......| +00000380 70 d2 5b e1 39 cf 4d 54 de d2 74 4e 5e a8 b3 ca |p.[.9.MT..tN^...| +00000390 e1 f2 4e 76 3c 77 8b ef f7 d1 df b9 ad c1 70 39 |..Nv<w........p9| +000003a0 c7 a3 1e 0f 7b 6c 78 2e c1 86 d2 67 36 d8 25 e0 |....{lx....g6.%.| +000003b0 e8 e5 cc 35 a2 96 a1 b4 b7 06 68 1e aa c7 06 97 |...5......h.....| +000003c0 b7 c2 83 ce c0 17 dd 4f 9e 6f 7a bd cd c7 6e 7f |.......O.oz...n.| +000003d0 cb 80 d1 7d 06 2d f9 f1 fb 5f cc bb d8 62 5b f0 |...}.-..._...b[.| +000003e0 27 12 57 d5 9b 55 aa 55 4b 9a 5a f6 a5 aa c1 82 |'.W..U.UK.Z.....| +000003f0 39 11 6b dc 83 7f a8 47 28 5a 0f 3d 3f 0f c2 22 |9.k....G(Z.=?.."| +00000400 16 03 03 00 04 0e 00 00 00 |.........| +>>> Flow 3 (client to server) +00000000 16 03 03 00 46 10 00 00 42 41 04 1e 18 37 ef 0d |....F...BA...7..| +00000010 19 51 88 35 75 71 b5 e5 54 5b 12 2e 8f 09 67 fd |.Q.5uq..T[....g.| +00000020 a7 24 20 3e b2 56 1c ce 97 28 5e f8 2b 2d 4f 9e |.$ >.V...(^.+-O.| +00000030 f1 07 9f 6c 4b 5b 83 56 e2 32 42 e9 58 b6 d7 49 |...lK[.V.2B.X..I| +00000040 a6 b5 68 1a 41 03 56 6b dc 5a 89 14 03 03 00 01 |..h.A.Vk.Z......| +00000050 01 16 03 03 00 28 00 00 00 00 00 00 00 00 35 9d |.....(........5.| +00000060 92 e8 bf df 7f a7 77 1b cf 03 2a bf e2 6c 62 2b |......w...*..lb+| +00000070 26 f0 fb 93 d3 df fd 55 84 d3 ed 88 31 cb |&......U....1.| +>>> Flow 4 (server to client) +00000000 14 03 03 00 01 01 16 03 03 00 28 c8 c0 78 09 73 |..........(..x.s| +00000010 58 41 73 66 88 cf db f3 fe c6 57 ab 45 be 2e d8 |XAsf......W.E...| +00000020 4e e5 ff 42 57 13 74 d2 cc c2 62 07 39 8b 06 46 |N..BW.t...b.9..F| +00000030 1d 8f 88 |...| +>>> Flow 5 (client to server) +00000000 17 03 03 00 1e 00 00 00 00 00 00 00 01 10 c3 5f |..............._| +00000010 3f c8 92 6c 7a a7 23 05 f3 d8 31 20 01 52 f1 99 |?..lz.#...1 .R..| +00000020 33 c1 2a 15 03 03 00 1a 00 00 00 00 00 00 00 02 |3.*.............| +00000030 cc ef eb 78 e4 e1 9d 90 05 6d 95 ac f2 49 ba 8e |...x.....m...I..| +00000040 6b 8d |k.| diff --git a/libgo/go/crypto/tls/testdata/Client-TLSv12-ALPN-NoMatch b/libgo/go/crypto/tls/testdata/Client-TLSv12-ALPN-NoMatch new file mode 100644 index 00000000000..f24a70cc828 --- /dev/null +++ b/libgo/go/crypto/tls/testdata/Client-TLSv12-ALPN-NoMatch @@ -0,0 +1,95 @@ +>>> Flow 1 (client to server) +00000000 16 03 01 00 86 01 00 00 82 03 03 00 00 00 00 00 |................| +00000010 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| +00000020 00 00 00 00 00 00 00 00 00 00 00 00 00 1a c0 2f |.............../| +00000030 c0 2b c0 11 c0 07 c0 13 c0 09 c0 14 c0 0a 00 05 |.+..............| +00000040 00 2f 00 35 c0 12 00 0a 01 00 00 3f 33 74 00 00 |./.5.......?3t..| +00000050 00 05 00 05 01 00 00 00 00 00 0a 00 08 00 06 00 |................| +00000060 17 00 18 00 19 00 0b 00 02 01 00 00 0d 00 0a 00 |................| +00000070 08 04 01 04 03 02 01 02 03 ff 01 00 01 00 00 10 |................| +00000080 00 09 00 07 06 70 72 6f 74 6f 33 |.....proto3| +>>> Flow 2 (server to client) +00000000 16 03 03 00 59 02 00 00 55 03 03 69 84 d1 d3 44 |....Y...U..i...D| +00000010 e9 66 08 48 bc 70 d8 ae 40 0b 17 69 e7 27 f6 7a |.f.H.p..@..i.'.z| +00000020 d5 ee 86 74 54 9e a8 bb 79 76 89 20 57 53 1b 02 |...tT...yv. WS..| +00000030 5b 70 81 a6 f1 53 bc 9d b7 42 5e ac 92 93 b5 20 |[p...S...B^.... | +00000040 8a bb 36 cc 8f cb 7e a0 61 a2 e8 ef c0 2f 00 00 |..6...~.a..../..| +00000050 0d ff 01 00 01 00 00 0b 00 04 03 00 01 02 16 03 |................| +00000060 03 02 be 0b 00 02 ba 00 02 b7 00 02 b4 30 82 02 |.............0..| +00000070 b0 30 82 02 19 a0 03 02 01 02 02 09 00 85 b0 bb |.0..............| +00000080 a4 8a 7f b8 ca 30 0d 06 09 2a 86 48 86 f7 0d 01 |.....0...*.H....| +00000090 01 05 05 00 30 45 31 0b 30 09 06 03 55 04 06 13 |....0E1.0...U...| +000000a0 02 41 55 31 13 30 11 06 03 55 04 08 13 0a 53 6f |.AU1.0...U....So| +000000b0 6d 65 2d 53 74 61 74 65 31 21 30 1f 06 03 55 04 |me-State1!0...U.| +000000c0 0a 13 18 49 6e 74 65 72 6e 65 74 20 57 69 64 67 |...Internet Widg| +000000d0 69 74 73 20 50 74 79 20 4c 74 64 30 1e 17 0d 31 |its Pty Ltd0...1| +000000e0 30 30 34 32 34 30 39 30 39 33 38 5a 17 0d 31 31 |00424090938Z..11| +000000f0 30 34 32 34 30 39 30 39 33 38 5a 30 45 31 0b 30 |0424090938Z0E1.0| +00000100 09 06 03 55 04 06 13 02 41 55 31 13 30 11 06 03 |...U....AU1.0...| +00000110 55 04 08 13 0a 53 6f 6d 65 2d 53 74 61 74 65 31 |U....Some-State1| +00000120 21 30 1f 06 03 55 04 0a 13 18 49 6e 74 65 72 6e |!0...U....Intern| +00000130 65 74 20 57 69 64 67 69 74 73 20 50 74 79 20 4c |et Widgits Pty L| +00000140 74 64 30 81 9f 30 0d 06 09 2a 86 48 86 f7 0d 01 |td0..0...*.H....| +00000150 01 01 05 00 03 81 8d 00 30 81 89 02 81 81 00 bb |........0.......| +00000160 79 d6 f5 17 b5 e5 bf 46 10 d0 dc 69 be e6 2b 07 |y......F...i..+.| +00000170 43 5a d0 03 2d 8a 7a 43 85 b7 14 52 e7 a5 65 4c |CZ..-.zC...R..eL| +00000180 2c 78 b8 23 8c b5 b4 82 e5 de 1f 95 3b 7e 62 a5 |,x.#........;~b.| +00000190 2c a5 33 d6 fe 12 5c 7a 56 fc f5 06 bf fa 58 7b |,.3...\zV.....X{| +000001a0 26 3f b5 cd 04 d3 d0 c9 21 96 4a c7 f4 54 9f 5a |&?......!.J..T.Z| +000001b0 bf ef 42 71 00 fe 18 99 07 7f 7e 88 7d 7d f1 04 |..Bq......~.}}..| +000001c0 39 c4 a2 2e db 51 c9 7c e3 c0 4c 3b 32 66 01 cf |9....Q.|..L;2f..| +000001d0 af b1 1d b8 71 9a 1d db db 89 6b ae da 2d 79 02 |....q.....k..-y.| +000001e0 03 01 00 01 a3 81 a7 30 81 a4 30 1d 06 03 55 1d |.......0..0...U.| +000001f0 0e 04 16 04 14 b1 ad e2 85 5a cf cb 28 db 69 ce |.........Z..(.i.| +00000200 23 69 de d3 26 8e 18 88 39 30 75 06 03 55 1d 23 |#i..&...90u..U.#| +00000210 04 6e 30 6c 80 14 b1 ad e2 85 5a cf cb 28 db 69 |.n0l......Z..(.i| +00000220 ce 23 69 de d3 26 8e 18 88 39 a1 49 a4 47 30 45 |.#i..&...9.I.G0E| +00000230 31 0b 30 09 06 03 55 04 06 13 02 41 55 31 13 30 |1.0...U....AU1.0| +00000240 11 06 03 55 04 08 13 0a 53 6f 6d 65 2d 53 74 61 |...U....Some-Sta| +00000250 74 65 31 21 30 1f 06 03 55 04 0a 13 18 49 6e 74 |te1!0...U....Int| +00000260 65 72 6e 65 74 20 57 69 64 67 69 74 73 20 50 74 |ernet Widgits Pt| +00000270 79 20 4c 74 64 82 09 00 85 b0 bb a4 8a 7f b8 ca |y Ltd...........| +00000280 30 0c 06 03 55 1d 13 04 05 30 03 01 01 ff 30 0d |0...U....0....0.| +00000290 06 09 2a 86 48 86 f7 0d 01 01 05 05 00 03 81 81 |..*.H...........| +000002a0 00 08 6c 45 24 c7 6b b1 59 ab 0c 52 cc f2 b0 14 |..lE$.k.Y..R....| +000002b0 d7 87 9d 7a 64 75 b5 5a 95 66 e4 c5 2b 8e ae 12 |...zdu.Z.f..+...| +000002c0 66 1f eb 4f 38 b3 6e 60 d3 92 fd f7 41 08 b5 25 |f..O8.n`....A..%| +000002d0 13 b1 18 7a 24 fb 30 1d ba ed 98 b9 17 ec e7 d7 |...z$.0.........| +000002e0 31 59 db 95 d3 1d 78 ea 50 56 5c d5 82 5a 2d 5a |1Y....x.PV\..Z-Z| +000002f0 5f 33 c4 b6 d8 c9 75 90 96 8c 0f 52 98 b5 cd 98 |_3....u....R....| +00000300 1f 89 20 5f f2 a0 1c a3 1b 96 94 dd a9 fd 57 e9 |.. _..........W.| +00000310 70 e8 26 6d 71 99 9b 26 6e 38 50 29 6c 90 a7 bd |p.&mq..&n8P)l...| +00000320 d9 16 03 03 00 cd 0c 00 00 c9 03 00 17 41 04 04 |.............A..| +00000330 be 27 08 6f 12 83 1b 04 76 fa 5f 16 d6 e3 64 76 |.'.o....v._...dv| +00000340 ad 0a 77 37 71 64 44 4c 3f 1a be dc 85 ce 46 c8 |..w7qdDL?.....F.| +00000350 29 a1 e2 24 78 66 1f 35 90 05 46 c0 91 d1 fd dd |)..$xf.5..F.....| +00000360 b5 5b 87 d7 6d 9d 77 a7 f7 b3 df 68 27 fd 6d 04 |.[..m.w....h'.m.| +00000370 01 00 80 7b 9b fd 0d 62 57 07 ef 97 f5 ff a9 00 |...{...bW.......| +00000380 a0 89 35 5a 8a e6 e7 ae 7b 55 c5 dc 21 64 87 6e |..5Z....{U..!d.n| +00000390 0f ab 85 6d 82 e8 83 fd 7d 3b 49 a7 ae 92 5f 6d |...m....};I..._m| +000003a0 a3 42 ce ff ef a6 00 6a 33 32 1f 7b eb b7 c2 5c |.B.....j32.{...\| +000003b0 2d 38 cf 10 4b 59 69 4d 15 e0 68 49 39 ba cb 2a |-8..KYiM..hI9..*| +000003c0 d9 b9 f3 fe 33 01 4f 7e ac 69 02 35 a5 e0 33 8d |....3.O~.i.5..3.| +000003d0 b3 74 34 14 45 9c 89 ad 41 2d d0 27 22 90 58 c6 |.t4.E...A-.'".X.| +000003e0 e0 2c b4 6e 19 04 e4 46 26 ec 13 35 48 a6 3f 64 |.,.n...F&..5H.?d| +000003f0 dc 85 2b 16 03 03 00 04 0e 00 00 00 |..+.........| +>>> Flow 3 (client to server) +00000000 16 03 03 00 46 10 00 00 42 41 04 1e 18 37 ef 0d |....F...BA...7..| +00000010 19 51 88 35 75 71 b5 e5 54 5b 12 2e 8f 09 67 fd |.Q.5uq..T[....g.| +00000020 a7 24 20 3e b2 56 1c ce 97 28 5e f8 2b 2d 4f 9e |.$ >.V...(^.+-O.| +00000030 f1 07 9f 6c 4b 5b 83 56 e2 32 42 e9 58 b6 d7 49 |...lK[.V.2B.X..I| +00000040 a6 b5 68 1a 41 03 56 6b dc 5a 89 14 03 03 00 01 |..h.A.Vk.Z......| +00000050 01 16 03 03 00 28 00 00 00 00 00 00 00 00 88 0d |.....(..........| +00000060 04 8b 8e 93 55 58 d6 75 ca 16 26 42 a3 60 20 67 |....UX.u..&B.` g| +00000070 84 cf d7 b3 10 fe 63 6c 2f 40 64 0c d6 78 |......cl/@d..x| +>>> Flow 4 (server to client) +00000000 14 03 03 00 01 01 16 03 03 00 28 bd 6c 2f 70 b9 |..........(.l/p.| +00000010 2f 9c 29 70 af 34 49 4c 5b 25 c3 14 b6 6d 28 81 |/.)p.4IL[%...m(.| +00000020 ff 54 d9 71 8d 2c c7 38 dd 44 27 6b 54 1e 53 7b |.T.q.,.8.D'kT.S{| +00000030 22 cb 65 |".e| +>>> Flow 5 (client to server) +00000000 17 03 03 00 1e 00 00 00 00 00 00 00 01 7f 0d d7 |................| +00000010 d9 4b 87 7b 36 fb 24 92 69 22 43 50 1e 46 fb c4 |.K.{6.$.i"CP.F..| +00000020 86 64 6f 15 03 03 00 1a 00 00 00 00 00 00 00 02 |.do.............| +00000030 37 d5 2d 0a be c5 a8 ae d4 bd 2b 09 34 18 a0 87 |7.-.......+.4...| +00000040 08 a6 |..| diff --git a/libgo/go/crypto/tls/testdata/Server-TLSv11-FallbackSCSV b/libgo/go/crypto/tls/testdata/Server-TLSv11-FallbackSCSV new file mode 100644 index 00000000000..2d8dfbc3b49 --- /dev/null +++ b/libgo/go/crypto/tls/testdata/Server-TLSv11-FallbackSCSV @@ -0,0 +1,17 @@ +>>> Flow 1 (client to server) +00000000 16 03 01 00 d4 01 00 00 d0 03 02 74 2d da 6d 98 |...........t-.m.| +00000010 ad 3e a5 ec 90 ea d1 5b f0 e0 a7 45 33 d9 5e 8d |.>.....[...E3.^.| +00000020 0f 1d 01 16 6d 00 31 65 ed 50 88 00 00 5e c0 14 |....m.1e.P...^..| +00000030 c0 0a 00 39 00 38 00 88 00 87 c0 0f c0 05 00 35 |...9.8.........5| +00000040 00 84 c0 13 c0 09 00 33 00 32 00 9a 00 99 00 45 |.......3.2.....E| +00000050 00 44 c0 0e c0 04 00 2f 00 96 00 41 00 07 c0 11 |.D...../...A....| +00000060 c0 07 c0 0c c0 02 00 05 00 04 c0 12 c0 08 00 16 |................| +00000070 00 13 c0 0d c0 03 00 0a 00 15 00 12 00 09 00 14 |................| +00000080 00 11 00 08 00 06 00 03 00 ff 56 00 01 00 00 49 |..........V....I| +00000090 00 0b 00 04 03 00 01 02 00 0a 00 34 00 32 00 0e |...........4.2..| +000000a0 00 0d 00 19 00 0b 00 0c 00 18 00 09 00 0a 00 16 |................| +000000b0 00 17 00 08 00 06 00 07 00 14 00 15 00 04 00 05 |................| +000000c0 00 12 00 13 00 01 00 02 00 03 00 0f 00 10 00 11 |................| +000000d0 00 23 00 00 00 0f 00 01 01 |.#.......| +>>> Flow 2 (server to client) +00000000 15 03 02 00 02 02 56 |......V| diff --git a/libgo/go/crypto/tls/testdata/Server-TLSv12-ALPN b/libgo/go/crypto/tls/testdata/Server-TLSv12-ALPN new file mode 100644 index 00000000000..106244d5a22 --- /dev/null +++ b/libgo/go/crypto/tls/testdata/Server-TLSv12-ALPN @@ -0,0 +1,122 @@ +>>> Flow 1 (client to server) +00000000 16 03 01 01 8a 01 00 01 86 03 03 34 54 69 f3 d7 |...........4Ti..| +00000010 20 9d 1d 74 db 72 e9 2f 51 7c c2 82 0a 9b cb 6d | ..t.r./Q|.....m| +00000020 90 b4 8e a2 1f 2f c7 66 74 8f 33 00 00 d6 c0 30 |...../.ft.3....0| +00000030 c0 2c c0 28 c0 24 c0 14 c0 0a c0 22 c0 21 c0 20 |.,.(.$.....".!. | +00000040 00 a5 00 a3 00 a1 00 9f 00 6b 00 6a 00 69 00 68 |.........k.j.i.h| +00000050 00 39 00 38 00 37 00 36 00 88 00 87 00 86 00 85 |.9.8.7.6........| +00000060 c0 32 c0 2e c0 2a c0 26 c0 0f c0 05 00 9d 00 3d |.2...*.&.......=| +00000070 00 35 00 84 c0 2f c0 2b c0 27 c0 23 c0 13 c0 09 |.5.../.+.'.#....| +00000080 c0 1f c0 1e c0 1d 00 a4 00 a2 00 a0 00 9e 00 67 |...............g| +00000090 00 40 00 3f 00 3e 00 33 00 32 00 31 00 30 00 9a |.@.?.>.3.2.1.0..| +000000a0 00 99 00 98 00 97 00 45 00 44 00 43 00 42 c0 31 |.......E.D.C.B.1| +000000b0 c0 2d c0 29 c0 25 c0 0e c0 04 00 9c 00 3c 00 2f |.-.).%.......<./| +000000c0 00 96 00 41 00 07 c0 11 c0 07 c0 0c c0 02 00 05 |...A............| +000000d0 00 04 c0 12 c0 08 c0 1c c0 1b c0 1a 00 16 00 13 |................| +000000e0 00 10 00 0d c0 0d c0 03 00 0a 00 15 00 12 00 0f |................| +000000f0 00 0c 00 09 00 14 00 11 00 0e 00 0b 00 08 00 06 |................| +00000100 00 03 00 ff 01 00 00 87 00 0b 00 04 03 00 01 02 |................| +00000110 00 0a 00 3a 00 38 00 0e 00 0d 00 19 00 1c 00 0b |...:.8..........| +00000120 00 0c 00 1b 00 18 00 09 00 0a 00 1a 00 16 00 17 |................| +00000130 00 08 00 06 00 07 00 14 00 15 00 04 00 05 00 12 |................| +00000140 00 13 00 01 00 02 00 03 00 0f 00 10 00 11 00 23 |...............#| +00000150 00 00 00 0d 00 20 00 1e 06 01 06 02 06 03 05 01 |..... ..........| +00000160 05 02 05 03 04 01 04 02 04 03 03 01 03 02 03 03 |................| +00000170 02 01 02 02 02 03 00 0f 00 01 01 00 10 00 10 00 |................| +00000180 0e 06 70 72 6f 74 6f 32 06 70 72 6f 74 6f 31 |..proto2.proto1| +>>> Flow 2 (server to client) +00000000 16 03 03 00 42 02 00 00 3e 03 03 00 00 00 00 00 |....B...>.......| +00000010 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| +00000020 00 00 00 00 00 00 00 00 00 00 00 00 c0 14 00 00 |................| +00000030 16 00 23 00 00 ff 01 00 01 00 00 10 00 09 00 07 |..#.............| +00000040 06 70 72 6f 74 6f 31 16 03 03 02 be 0b 00 02 ba |.proto1.........| +00000050 00 02 b7 00 02 b4 30 82 02 b0 30 82 02 19 a0 03 |......0...0.....| +00000060 02 01 02 02 09 00 85 b0 bb a4 8a 7f b8 ca 30 0d |..............0.| +00000070 06 09 2a 86 48 86 f7 0d 01 01 05 05 00 30 45 31 |..*.H........0E1| +00000080 0b 30 09 06 03 55 04 06 13 02 41 55 31 13 30 11 |.0...U....AU1.0.| +00000090 06 03 55 04 08 13 0a 53 6f 6d 65 2d 53 74 61 74 |..U....Some-Stat| +000000a0 65 31 21 30 1f 06 03 55 04 0a 13 18 49 6e 74 65 |e1!0...U....Inte| +000000b0 72 6e 65 74 20 57 69 64 67 69 74 73 20 50 74 79 |rnet Widgits Pty| +000000c0 20 4c 74 64 30 1e 17 0d 31 30 30 34 32 34 30 39 | Ltd0...10042409| +000000d0 30 39 33 38 5a 17 0d 31 31 30 34 32 34 30 39 30 |0938Z..110424090| +000000e0 39 33 38 5a 30 45 31 0b 30 09 06 03 55 04 06 13 |938Z0E1.0...U...| +000000f0 02 41 55 31 13 30 11 06 03 55 04 08 13 0a 53 6f |.AU1.0...U....So| +00000100 6d 65 2d 53 74 61 74 65 31 21 30 1f 06 03 55 04 |me-State1!0...U.| +00000110 0a 13 18 49 6e 74 65 72 6e 65 74 20 57 69 64 67 |...Internet Widg| +00000120 69 74 73 20 50 74 79 20 4c 74 64 30 81 9f 30 0d |its Pty Ltd0..0.| +00000130 06 09 2a 86 48 86 f7 0d 01 01 01 05 00 03 81 8d |..*.H...........| +00000140 00 30 81 89 02 81 81 00 bb 79 d6 f5 17 b5 e5 bf |.0.......y......| +00000150 46 10 d0 dc 69 be e6 2b 07 43 5a d0 03 2d 8a 7a |F...i..+.CZ..-.z| +00000160 43 85 b7 14 52 e7 a5 65 4c 2c 78 b8 23 8c b5 b4 |C...R..eL,x.#...| +00000170 82 e5 de 1f 95 3b 7e 62 a5 2c a5 33 d6 fe 12 5c |.....;~b.,.3...\| +00000180 7a 56 fc f5 06 bf fa 58 7b 26 3f b5 cd 04 d3 d0 |zV.....X{&?.....| +00000190 c9 21 96 4a c7 f4 54 9f 5a bf ef 42 71 00 fe 18 |.!.J..T.Z..Bq...| +000001a0 99 07 7f 7e 88 7d 7d f1 04 39 c4 a2 2e db 51 c9 |...~.}}..9....Q.| +000001b0 7c e3 c0 4c 3b 32 66 01 cf af b1 1d b8 71 9a 1d ||..L;2f......q..| +000001c0 db db 89 6b ae da 2d 79 02 03 01 00 01 a3 81 a7 |...k..-y........| +000001d0 30 81 a4 30 1d 06 03 55 1d 0e 04 16 04 14 b1 ad |0..0...U........| +000001e0 e2 85 5a cf cb 28 db 69 ce 23 69 de d3 26 8e 18 |..Z..(.i.#i..&..| +000001f0 88 39 30 75 06 03 55 1d 23 04 6e 30 6c 80 14 b1 |.90u..U.#.n0l...| +00000200 ad e2 85 5a cf cb 28 db 69 ce 23 69 de d3 26 8e |...Z..(.i.#i..&.| +00000210 18 88 39 a1 49 a4 47 30 45 31 0b 30 09 06 03 55 |..9.I.G0E1.0...U| +00000220 04 06 13 02 41 55 31 13 30 11 06 03 55 04 08 13 |....AU1.0...U...| +00000230 0a 53 6f 6d 65 2d 53 74 61 74 65 31 21 30 1f 06 |.Some-State1!0..| +00000240 03 55 04 0a 13 18 49 6e 74 65 72 6e 65 74 20 57 |.U....Internet W| +00000250 69 64 67 69 74 73 20 50 74 79 20 4c 74 64 82 09 |idgits Pty Ltd..| +00000260 00 85 b0 bb a4 8a 7f b8 ca 30 0c 06 03 55 1d 13 |.........0...U..| +00000270 04 05 30 03 01 01 ff 30 0d 06 09 2a 86 48 86 f7 |..0....0...*.H..| +00000280 0d 01 01 05 05 00 03 81 81 00 08 6c 45 24 c7 6b |...........lE$.k| +00000290 b1 59 ab 0c 52 cc f2 b0 14 d7 87 9d 7a 64 75 b5 |.Y..R.......zdu.| +000002a0 5a 95 66 e4 c5 2b 8e ae 12 66 1f eb 4f 38 b3 6e |Z.f..+...f..O8.n| +000002b0 60 d3 92 fd f7 41 08 b5 25 13 b1 18 7a 24 fb 30 |`....A..%...z$.0| +000002c0 1d ba ed 98 b9 17 ec e7 d7 31 59 db 95 d3 1d 78 |.........1Y....x| +000002d0 ea 50 56 5c d5 82 5a 2d 5a 5f 33 c4 b6 d8 c9 75 |.PV\..Z-Z_3....u| +000002e0 90 96 8c 0f 52 98 b5 cd 98 1f 89 20 5f f2 a0 1c |....R...... _...| +000002f0 a3 1b 96 94 dd a9 fd 57 e9 70 e8 26 6d 71 99 9b |.......W.p.&mq..| +00000300 26 6e 38 50 29 6c 90 a7 bd d9 16 03 03 00 cd 0c |&n8P)l..........| +00000310 00 00 c9 03 00 17 41 04 1e 18 37 ef 0d 19 51 88 |......A...7...Q.| +00000320 35 75 71 b5 e5 54 5b 12 2e 8f 09 67 fd a7 24 20 |5uq..T[....g..$ | +00000330 3e b2 56 1c ce 97 28 5e f8 2b 2d 4f 9e f1 07 9f |>.V...(^.+-O....| +00000340 6c 4b 5b 83 56 e2 32 42 e9 58 b6 d7 49 a6 b5 68 |lK[.V.2B.X..I..h| +00000350 1a 41 03 56 6b dc 5a 89 04 01 00 80 2d a0 6e 47 |.A.Vk.Z.....-.nG| +00000360 93 a2 19 17 32 f5 42 58 93 f6 4f d4 e9 4d a4 0f |....2.BX..O..M..| +00000370 fe 4e d7 2c 62 b6 fb 83 37 a3 09 60 4b 69 e2 4c |.N.,b...7..`Ki.L| +00000380 fc b8 4c d1 a6 9a 89 a0 c5 76 f5 62 b7 e8 eb c2 |..L......v.b....| +00000390 fa 0f 0e 61 86 bc 70 da 13 72 8d 87 94 16 9a 8d |...a..p..r......| +000003a0 5f 80 82 92 77 37 4f 9e 55 5d dc 35 42 a3 75 5c |_...w7O.U].5B.u\| +000003b0 ec a4 58 78 66 97 97 da 49 67 2e b6 7e 11 de fb |..Xxf...Ig..~...| +000003c0 e3 8f e8 bf 1d 91 1e 91 20 1b 2a df c6 58 e4 82 |........ .*..X..| +000003d0 ce 37 dd 6f a5 ac 51 3d 65 db 3f f5 16 03 03 00 |.7.o..Q=e.?.....| +000003e0 04 0e 00 00 00 |.....| +>>> Flow 3 (client to server) +00000000 16 03 03 00 46 10 00 00 42 41 04 f3 fc ea d8 50 |....F...BA.....P| +00000010 e6 15 b0 e7 11 c7 6d ee 09 ad 80 d5 54 eb 4f 62 |......m.....T.Ob| +00000020 7d bb a7 2d 28 0c 66 33 42 09 cf 2b 58 f8 58 41 |}..-(.f3B..+X.XA| +00000030 bd 46 51 0a f0 7d 8c 0c 98 9e 26 77 20 fd 5e c1 |.FQ..}....&w .^.| +00000040 a9 b3 e5 c3 6c 05 97 e3 81 fd db 14 03 03 00 01 |....l...........| +00000050 01 16 03 03 00 40 02 2a 28 41 e3 9c 5d 45 d4 45 |.....@.*(A..]E.E| +00000060 51 8c 7a c0 ba b1 8e a4 84 2c f3 83 cd c4 55 5c |Q.z......,....U\| +00000070 d6 5c 6f 72 ab 89 7a c6 d7 9c 2a 54 f0 c4 20 ee |.\or..z...*T.. .| +00000080 37 74 9b b6 8c f7 e4 37 2c eb d4 9f 5c 5e 55 a0 |7t.....7,...\^U.| +00000090 e2 5a fe 1e c8 67 |.Z...g| +>>> Flow 4 (server to client) +00000000 16 03 03 00 72 04 00 00 6e 00 00 00 00 00 68 00 |....r...n.....h.| +00000010 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 65 |...............e| +00000020 ea 8b c0 ef ba 59 31 75 33 96 f1 f8 c9 e1 ef 30 |.....Y1u3......0| +00000030 00 a3 a9 1d ab c8 4b 29 94 f2 c8 c8 8d 03 57 ab |......K)......W.| +00000040 56 df 0f 4e 0d 30 13 09 c9 e4 fa 51 4e b3 26 ad |V..N.0.....QN.&.| +00000050 43 9f ae 62 d5 59 23 05 9b 69 8f 5b a8 ba 39 f1 |C..b.Y#..i.[..9.| +00000060 90 84 35 bf 8f 8d d5 39 93 98 ee b9 75 03 3f 91 |..5....9....u.?.| +00000070 e8 56 0b cb 44 a6 7a 14 03 03 00 01 01 16 03 03 |.V..D.z.........| +00000080 00 40 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |.@..............| +00000090 00 00 f9 a0 8e 23 34 f1 61 15 a8 4e ae c4 f3 2a |.....#4.a..N...*| +000000a0 a6 f8 ee 1b 65 c4 c0 ff 93 14 74 ed 82 ae 48 a8 |....e.....t...H.| +000000b0 42 fb a9 24 5d dd fd 98 b8 65 73 03 88 99 e1 ed |B..$]....es.....| +000000c0 02 95 17 03 03 00 40 00 00 00 00 00 00 00 00 00 |......@.........| +000000d0 00 00 00 00 00 00 00 b9 b3 f5 41 84 3b 2a a9 c3 |..........A.;*..| +000000e0 9c e3 d4 38 90 76 c1 8c f0 4f 10 1b 04 b5 07 fe |...8.v...O......| +000000f0 79 3d 7b 77 a4 17 0f 4e df 64 70 70 9e 34 8e b6 |y={w...N.dpp.4..| +00000100 db b2 b6 fd 41 fe b3 15 03 03 00 30 00 00 00 00 |....A......0....| +00000110 00 00 00 00 00 00 00 00 00 00 00 00 02 73 de fe |.............s..| +00000120 fa 4b 69 6d 30 69 79 96 7e 4f 2f 04 67 36 96 27 |.Kim0iy.~O/.g6.'| +00000130 67 23 2b dc 7a c4 6c 34 ea fc 79 fd |g#+.z.l4..y.| diff --git a/libgo/go/crypto/tls/testdata/Server-TLSv12-ALPN-NoMatch b/libgo/go/crypto/tls/testdata/Server-TLSv12-ALPN-NoMatch new file mode 100644 index 00000000000..db5881b7685 --- /dev/null +++ b/libgo/go/crypto/tls/testdata/Server-TLSv12-ALPN-NoMatch @@ -0,0 +1,121 @@ +>>> Flow 1 (client to server) +00000000 16 03 01 01 8a 01 00 01 86 03 03 0a a8 82 53 61 |..............Sa| +00000010 68 e0 83 91 71 36 f9 c1 19 ff e8 09 fc 21 9f 03 |h...q6.......!..| +00000020 31 f3 87 4a 04 8c 3d c2 6e 00 32 00 00 d6 c0 30 |1..J..=.n.2....0| +00000030 c0 2c c0 28 c0 24 c0 14 c0 0a c0 22 c0 21 c0 20 |.,.(.$.....".!. | +00000040 00 a5 00 a3 00 a1 00 9f 00 6b 00 6a 00 69 00 68 |.........k.j.i.h| +00000050 00 39 00 38 00 37 00 36 00 88 00 87 00 86 00 85 |.9.8.7.6........| +00000060 c0 32 c0 2e c0 2a c0 26 c0 0f c0 05 00 9d 00 3d |.2...*.&.......=| +00000070 00 35 00 84 c0 2f c0 2b c0 27 c0 23 c0 13 c0 09 |.5.../.+.'.#....| +00000080 c0 1f c0 1e c0 1d 00 a4 00 a2 00 a0 00 9e 00 67 |...............g| +00000090 00 40 00 3f 00 3e 00 33 00 32 00 31 00 30 00 9a |.@.?.>.3.2.1.0..| +000000a0 00 99 00 98 00 97 00 45 00 44 00 43 00 42 c0 31 |.......E.D.C.B.1| +000000b0 c0 2d c0 29 c0 25 c0 0e c0 04 00 9c 00 3c 00 2f |.-.).%.......<./| +000000c0 00 96 00 41 00 07 c0 11 c0 07 c0 0c c0 02 00 05 |...A............| +000000d0 00 04 c0 12 c0 08 c0 1c c0 1b c0 1a 00 16 00 13 |................| +000000e0 00 10 00 0d c0 0d c0 03 00 0a 00 15 00 12 00 0f |................| +000000f0 00 0c 00 09 00 14 00 11 00 0e 00 0b 00 08 00 06 |................| +00000100 00 03 00 ff 01 00 00 87 00 0b 00 04 03 00 01 02 |................| +00000110 00 0a 00 3a 00 38 00 0e 00 0d 00 19 00 1c 00 0b |...:.8..........| +00000120 00 0c 00 1b 00 18 00 09 00 0a 00 1a 00 16 00 17 |................| +00000130 00 08 00 06 00 07 00 14 00 15 00 04 00 05 00 12 |................| +00000140 00 13 00 01 00 02 00 03 00 0f 00 10 00 11 00 23 |...............#| +00000150 00 00 00 0d 00 20 00 1e 06 01 06 02 06 03 05 01 |..... ..........| +00000160 05 02 05 03 04 01 04 02 04 03 03 01 03 02 03 03 |................| +00000170 02 01 02 02 02 03 00 0f 00 01 01 00 10 00 10 00 |................| +00000180 0e 06 70 72 6f 74 6f 32 06 70 72 6f 74 6f 31 |..proto2.proto1| +>>> Flow 2 (server to client) +00000000 16 03 03 00 35 02 00 00 31 03 03 00 00 00 00 00 |....5...1.......| +00000010 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| +00000020 00 00 00 00 00 00 00 00 00 00 00 00 c0 14 00 00 |................| +00000030 09 00 23 00 00 ff 01 00 01 00 16 03 03 02 be 0b |..#.............| +00000040 00 02 ba 00 02 b7 00 02 b4 30 82 02 b0 30 82 02 |.........0...0..| +00000050 19 a0 03 02 01 02 02 09 00 85 b0 bb a4 8a 7f b8 |................| +00000060 ca 30 0d 06 09 2a 86 48 86 f7 0d 01 01 05 05 00 |.0...*.H........| +00000070 30 45 31 0b 30 09 06 03 55 04 06 13 02 41 55 31 |0E1.0...U....AU1| +00000080 13 30 11 06 03 55 04 08 13 0a 53 6f 6d 65 2d 53 |.0...U....Some-S| +00000090 74 61 74 65 31 21 30 1f 06 03 55 04 0a 13 18 49 |tate1!0...U....I| +000000a0 6e 74 65 72 6e 65 74 20 57 69 64 67 69 74 73 20 |nternet Widgits | +000000b0 50 74 79 20 4c 74 64 30 1e 17 0d 31 30 30 34 32 |Pty Ltd0...10042| +000000c0 34 30 39 30 39 33 38 5a 17 0d 31 31 30 34 32 34 |4090938Z..110424| +000000d0 30 39 30 39 33 38 5a 30 45 31 0b 30 09 06 03 55 |090938Z0E1.0...U| +000000e0 04 06 13 02 41 55 31 13 30 11 06 03 55 04 08 13 |....AU1.0...U...| +000000f0 0a 53 6f 6d 65 2d 53 74 61 74 65 31 21 30 1f 06 |.Some-State1!0..| +00000100 03 55 04 0a 13 18 49 6e 74 65 72 6e 65 74 20 57 |.U....Internet W| +00000110 69 64 67 69 74 73 20 50 74 79 20 4c 74 64 30 81 |idgits Pty Ltd0.| +00000120 9f 30 0d 06 09 2a 86 48 86 f7 0d 01 01 01 05 00 |.0...*.H........| +00000130 03 81 8d 00 30 81 89 02 81 81 00 bb 79 d6 f5 17 |....0.......y...| +00000140 b5 e5 bf 46 10 d0 dc 69 be e6 2b 07 43 5a d0 03 |...F...i..+.CZ..| +00000150 2d 8a 7a 43 85 b7 14 52 e7 a5 65 4c 2c 78 b8 23 |-.zC...R..eL,x.#| +00000160 8c b5 b4 82 e5 de 1f 95 3b 7e 62 a5 2c a5 33 d6 |........;~b.,.3.| +00000170 fe 12 5c 7a 56 fc f5 06 bf fa 58 7b 26 3f b5 cd |..\zV.....X{&?..| +00000180 04 d3 d0 c9 21 96 4a c7 f4 54 9f 5a bf ef 42 71 |....!.J..T.Z..Bq| +00000190 00 fe 18 99 07 7f 7e 88 7d 7d f1 04 39 c4 a2 2e |......~.}}..9...| +000001a0 db 51 c9 7c e3 c0 4c 3b 32 66 01 cf af b1 1d b8 |.Q.|..L;2f......| +000001b0 71 9a 1d db db 89 6b ae da 2d 79 02 03 01 00 01 |q.....k..-y.....| +000001c0 a3 81 a7 30 81 a4 30 1d 06 03 55 1d 0e 04 16 04 |...0..0...U.....| +000001d0 14 b1 ad e2 85 5a cf cb 28 db 69 ce 23 69 de d3 |.....Z..(.i.#i..| +000001e0 26 8e 18 88 39 30 75 06 03 55 1d 23 04 6e 30 6c |&...90u..U.#.n0l| +000001f0 80 14 b1 ad e2 85 5a cf cb 28 db 69 ce 23 69 de |......Z..(.i.#i.| +00000200 d3 26 8e 18 88 39 a1 49 a4 47 30 45 31 0b 30 09 |.&...9.I.G0E1.0.| +00000210 06 03 55 04 06 13 02 41 55 31 13 30 11 06 03 55 |..U....AU1.0...U| +00000220 04 08 13 0a 53 6f 6d 65 2d 53 74 61 74 65 31 21 |....Some-State1!| +00000230 30 1f 06 03 55 04 0a 13 18 49 6e 74 65 72 6e 65 |0...U....Interne| +00000240 74 20 57 69 64 67 69 74 73 20 50 74 79 20 4c 74 |t Widgits Pty Lt| +00000250 64 82 09 00 85 b0 bb a4 8a 7f b8 ca 30 0c 06 03 |d...........0...| +00000260 55 1d 13 04 05 30 03 01 01 ff 30 0d 06 09 2a 86 |U....0....0...*.| +00000270 48 86 f7 0d 01 01 05 05 00 03 81 81 00 08 6c 45 |H.............lE| +00000280 24 c7 6b b1 59 ab 0c 52 cc f2 b0 14 d7 87 9d 7a |$.k.Y..R.......z| +00000290 64 75 b5 5a 95 66 e4 c5 2b 8e ae 12 66 1f eb 4f |du.Z.f..+...f..O| +000002a0 38 b3 6e 60 d3 92 fd f7 41 08 b5 25 13 b1 18 7a |8.n`....A..%...z| +000002b0 24 fb 30 1d ba ed 98 b9 17 ec e7 d7 31 59 db 95 |$.0.........1Y..| +000002c0 d3 1d 78 ea 50 56 5c d5 82 5a 2d 5a 5f 33 c4 b6 |..x.PV\..Z-Z_3..| +000002d0 d8 c9 75 90 96 8c 0f 52 98 b5 cd 98 1f 89 20 5f |..u....R...... _| +000002e0 f2 a0 1c a3 1b 96 94 dd a9 fd 57 e9 70 e8 26 6d |..........W.p.&m| +000002f0 71 99 9b 26 6e 38 50 29 6c 90 a7 bd d9 16 03 03 |q..&n8P)l.......| +00000300 00 cd 0c 00 00 c9 03 00 17 41 04 1e 18 37 ef 0d |.........A...7..| +00000310 19 51 88 35 75 71 b5 e5 54 5b 12 2e 8f 09 67 fd |.Q.5uq..T[....g.| +00000320 a7 24 20 3e b2 56 1c ce 97 28 5e f8 2b 2d 4f 9e |.$ >.V...(^.+-O.| +00000330 f1 07 9f 6c 4b 5b 83 56 e2 32 42 e9 58 b6 d7 49 |...lK[.V.2B.X..I| +00000340 a6 b5 68 1a 41 03 56 6b dc 5a 89 04 01 00 80 b9 |..h.A.Vk.Z......| +00000350 0f 79 8a 16 f4 da 8f 27 b4 16 fc c0 51 db ae d1 |.y.....'....Q...| +00000360 af 79 77 d5 d5 a2 13 05 45 20 cc eb ac ed cb 30 |.yw.....E .....0| +00000370 32 2e 2c bd fa 1c 4d b5 32 a6 37 43 c8 5c 2d f8 |2.,...M.2.7C.\-.| +00000380 6e 85 f5 cd 54 92 29 ad 13 7d d5 9e 8c 1d b7 d0 |n...T.)..}......| +00000390 c1 c7 3d e8 ba 4a 0f 9a a6 3e 25 5f 27 62 b1 00 |..=..J...>%_'b..| +000003a0 91 d9 23 48 3f 10 fe c5 e3 07 9a 58 57 6d cc 10 |..#H?......XWm..| +000003b0 3b f8 1a d5 6e 8b 1f 03 6f 82 84 98 b5 f7 71 5d |;...n...o.....q]| +000003c0 c2 ad 60 14 c1 88 07 5a 3d 99 fd a8 c9 9a 03 16 |..`....Z=.......| +000003d0 03 03 00 04 0e 00 00 00 |........| +>>> Flow 3 (client to server) +00000000 16 03 03 00 46 10 00 00 42 41 04 76 aa 4e b9 f9 |....F...BA.v.N..| +00000010 68 85 81 74 7c d9 f9 64 7f bd 09 83 08 5b 4f 76 |h..t|..d.....[Ov| +00000020 6e be 79 b6 4e 97 17 63 e4 b5 1c 77 e5 85 76 8a |n.y.N..c...w..v.| +00000030 5d 9f f1 21 88 ec f9 a7 7c 41 af f9 c5 fe 11 81 |]..!....|A......| +00000040 11 51 8e a7 20 33 5f cf e7 90 90 14 03 03 00 01 |.Q.. 3_.........| +00000050 01 16 03 03 00 40 44 3e 32 01 71 ac 5a b5 1f 2c |.....@D>2.q.Z..,| +00000060 37 d9 4b 70 72 91 89 d4 d7 c2 c3 e7 ff dc 72 2a |7.Kpr.........r*| +00000070 ba f5 30 b0 e9 dd 48 10 3d cd 98 48 a3 e3 ca de |..0...H.=..H....| +00000080 15 0e 90 8e e5 04 14 74 42 b8 b0 12 cc 68 7b 7d |.......tB....h{}| +00000090 6c 43 72 60 05 0d |lCr`..| +>>> Flow 4 (server to client) +00000000 16 03 03 00 72 04 00 00 6e 00 00 00 00 00 68 00 |....r...n.....h.| +00000010 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 65 |...............e| +00000020 ea 8b c0 ef ba 12 45 17 61 24 cd d2 4c 22 bb 3b |......E.a$..L".;| +00000030 e3 0e d0 ff 83 e9 7c b7 8f 10 3c 16 1c fc c2 44 |......|...<....D| +00000040 ef 45 f8 27 30 56 db ea eb ae f5 b6 17 b2 ef f9 |.E.'0V..........| +00000050 96 0d 2d db e4 59 23 0a fc fa e3 13 48 57 e5 b3 |..-..Y#.....HW..| +00000060 3a d1 f5 5e ca ef d7 3f 7b b5 f4 69 85 c3 bd da |:..^...?{..i....| +00000070 fd 9c 50 05 2f 86 ce 14 03 03 00 01 01 16 03 03 |..P./...........| +00000080 00 40 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |.@..............| +00000090 00 00 60 25 1c ed 6f c6 a5 bd b2 29 39 4e 09 d1 |..`%..o....)9N..| +000000a0 64 cc 75 cd df 91 a8 90 9d 03 aa 92 07 f2 d0 8a |d.u.............| +000000b0 60 bb 3e 85 21 22 fe f8 dc 52 3c 4e 82 77 14 14 |`.>.!"...R<N.w..| +000000c0 0f 1f 17 03 03 00 40 00 00 00 00 00 00 00 00 00 |......@.........| +000000d0 00 00 00 00 00 00 00 0b 87 12 62 3e e5 3e 7d 74 |..........b>.>}t| +000000e0 0d ac c4 a9 df 67 1c 5a ad 3e 01 34 03 88 2f 39 |.....g.Z.>.4../9| +000000f0 f7 3c 06 e4 f6 81 43 66 b1 1b ed a5 e5 b6 a8 43 |.<....Cf.......C| +00000100 7f 36 2f b2 da 45 9a 15 03 03 00 30 00 00 00 00 |.6/..E.....0....| +00000110 00 00 00 00 00 00 00 00 00 00 00 00 fa 63 4e c5 |.............cN.| +00000120 77 89 71 56 e3 0a cf 98 da 2f 89 8f 74 8e 76 24 |w.qV...../..t.v$| +00000130 e2 40 a5 9f 29 1b b2 11 ef 7a 55 7f |.@..)....zU.| diff --git a/libgo/go/crypto/tls/testdata/Server-TLSv12-IssueTicketPreDisable b/libgo/go/crypto/tls/testdata/Server-TLSv12-IssueTicketPreDisable deleted file mode 100644 index 30f00268155..00000000000 --- a/libgo/go/crypto/tls/testdata/Server-TLSv12-IssueTicketPreDisable +++ /dev/null @@ -1,87 +0,0 @@ ->>> Flow 1 (client to server) -00000000 16 03 01 00 60 01 00 00 5c 03 03 54 23 54 02 17 |....`...\..T#T..| -00000010 f3 53 13 3d 48 88 c3 19 b9 d1 3d 33 7f f5 99 56 |.S.=H.....=3...V| -00000020 04 71 1b d9 d5 64 8a 0d 4a 54 00 00 00 04 00 05 |.q...d..JT......| -00000030 00 ff 01 00 00 2f 00 23 00 00 00 0d 00 22 00 20 |...../.#.....". | -00000040 06 01 06 02 06 03 05 01 05 02 05 03 04 01 04 02 |................| -00000050 04 03 03 01 03 02 03 03 02 01 02 02 02 03 01 01 |................| -00000060 00 0f 00 01 01 |.....| ->>> Flow 2 (server to client) -00000000 16 03 03 00 35 02 00 00 31 03 03 00 00 00 00 00 |....5...1.......| -00000010 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| -00000020 00 00 00 00 00 00 00 00 00 00 00 00 00 05 00 00 |................| -00000030 09 00 23 00 00 ff 01 00 01 00 16 03 03 02 be 0b |..#.............| -00000040 00 02 ba 00 02 b7 00 02 b4 30 82 02 b0 30 82 02 |.........0...0..| -00000050 19 a0 03 02 01 02 02 09 00 85 b0 bb a4 8a 7f b8 |................| -00000060 ca 30 0d 06 09 2a 86 48 86 f7 0d 01 01 05 05 00 |.0...*.H........| -00000070 30 45 31 0b 30 09 06 03 55 04 06 13 02 41 55 31 |0E1.0...U....AU1| -00000080 13 30 11 06 03 55 04 08 13 0a 53 6f 6d 65 2d 53 |.0...U....Some-S| -00000090 74 61 74 65 31 21 30 1f 06 03 55 04 0a 13 18 49 |tate1!0...U....I| -000000a0 6e 74 65 72 6e 65 74 20 57 69 64 67 69 74 73 20 |nternet Widgits | -000000b0 50 74 79 20 4c 74 64 30 1e 17 0d 31 30 30 34 32 |Pty Ltd0...10042| -000000c0 34 30 39 30 39 33 38 5a 17 0d 31 31 30 34 32 34 |4090938Z..110424| -000000d0 30 39 30 39 33 38 5a 30 45 31 0b 30 09 06 03 55 |090938Z0E1.0...U| -000000e0 04 06 13 02 41 55 31 13 30 11 06 03 55 04 08 13 |....AU1.0...U...| -000000f0 0a 53 6f 6d 65 2d 53 74 61 74 65 31 21 30 1f 06 |.Some-State1!0..| -00000100 03 55 04 0a 13 18 49 6e 74 65 72 6e 65 74 20 57 |.U....Internet W| -00000110 69 64 67 69 74 73 20 50 74 79 20 4c 74 64 30 81 |idgits Pty Ltd0.| -00000120 9f 30 0d 06 09 2a 86 48 86 f7 0d 01 01 01 05 00 |.0...*.H........| -00000130 03 81 8d 00 30 81 89 02 81 81 00 bb 79 d6 f5 17 |....0.......y...| -00000140 b5 e5 bf 46 10 d0 dc 69 be e6 2b 07 43 5a d0 03 |...F...i..+.CZ..| -00000150 2d 8a 7a 43 85 b7 14 52 e7 a5 65 4c 2c 78 b8 23 |-.zC...R..eL,x.#| -00000160 8c b5 b4 82 e5 de 1f 95 3b 7e 62 a5 2c a5 33 d6 |........;~b.,.3.| -00000170 fe 12 5c 7a 56 fc f5 06 bf fa 58 7b 26 3f b5 cd |..\zV.....X{&?..| -00000180 04 d3 d0 c9 21 96 4a c7 f4 54 9f 5a bf ef 42 71 |....!.J..T.Z..Bq| -00000190 00 fe 18 99 07 7f 7e 88 7d 7d f1 04 39 c4 a2 2e |......~.}}..9...| -000001a0 db 51 c9 7c e3 c0 4c 3b 32 66 01 cf af b1 1d b8 |.Q.|..L;2f......| -000001b0 71 9a 1d db db 89 6b ae da 2d 79 02 03 01 00 01 |q.....k..-y.....| -000001c0 a3 81 a7 30 81 a4 30 1d 06 03 55 1d 0e 04 16 04 |...0..0...U.....| -000001d0 14 b1 ad e2 85 5a cf cb 28 db 69 ce 23 69 de d3 |.....Z..(.i.#i..| -000001e0 26 8e 18 88 39 30 75 06 03 55 1d 23 04 6e 30 6c |&...90u..U.#.n0l| -000001f0 80 14 b1 ad e2 85 5a cf cb 28 db 69 ce 23 69 de |......Z..(.i.#i.| -00000200 d3 26 8e 18 88 39 a1 49 a4 47 30 45 31 0b 30 09 |.&...9.I.G0E1.0.| -00000210 06 03 55 04 06 13 02 41 55 31 13 30 11 06 03 55 |..U....AU1.0...U| -00000220 04 08 13 0a 53 6f 6d 65 2d 53 74 61 74 65 31 21 |....Some-State1!| -00000230 30 1f 06 03 55 04 0a 13 18 49 6e 74 65 72 6e 65 |0...U....Interne| -00000240 74 20 57 69 64 67 69 74 73 20 50 74 79 20 4c 74 |t Widgits Pty Lt| -00000250 64 82 09 00 85 b0 bb a4 8a 7f b8 ca 30 0c 06 03 |d...........0...| -00000260 55 1d 13 04 05 30 03 01 01 ff 30 0d 06 09 2a 86 |U....0....0...*.| -00000270 48 86 f7 0d 01 01 05 05 00 03 81 81 00 08 6c 45 |H.............lE| -00000280 24 c7 6b b1 59 ab 0c 52 cc f2 b0 14 d7 87 9d 7a |$.k.Y..R.......z| -00000290 64 75 b5 5a 95 66 e4 c5 2b 8e ae 12 66 1f eb 4f |du.Z.f..+...f..O| -000002a0 38 b3 6e 60 d3 92 fd f7 41 08 b5 25 13 b1 18 7a |8.n`....A..%...z| -000002b0 24 fb 30 1d ba ed 98 b9 17 ec e7 d7 31 59 db 95 |$.0.........1Y..| -000002c0 d3 1d 78 ea 50 56 5c d5 82 5a 2d 5a 5f 33 c4 b6 |..x.PV\..Z-Z_3..| -000002d0 d8 c9 75 90 96 8c 0f 52 98 b5 cd 98 1f 89 20 5f |..u....R...... _| -000002e0 f2 a0 1c a3 1b 96 94 dd a9 fd 57 e9 70 e8 26 6d |..........W.p.&m| -000002f0 71 99 9b 26 6e 38 50 29 6c 90 a7 bd d9 16 03 03 |q..&n8P)l.......| -00000300 00 04 0e 00 00 00 |......| ->>> Flow 3 (client to server) -00000000 16 03 03 00 86 10 00 00 82 00 80 27 e9 a4 f7 e7 |...........'....| -00000010 df 25 de 84 8c 1f d6 e6 c3 11 28 55 9a c1 91 37 |.%........(U...7| -00000020 84 f5 ba f8 80 0d ca 50 cb 1e 72 f7 97 6f c2 b2 |.......P..r..o..| -00000030 04 4d 13 7c e0 6e a0 1f 91 e1 38 1b a2 c0 55 16 |.M.|.n....8...U.| -00000040 7f 29 fc ed 1c 1a cf 72 14 c3 00 c1 dd 36 36 af |.).....r.....66.| -00000050 a6 e4 a8 be ba ec 13 d0 1e d0 1d fd e1 5b 27 fd |.............['.| -00000060 9a da 2e 12 c8 b0 b9 c2 b9 76 ec 7f 3c 98 b6 63 |.........v..<..c| -00000070 bc da f0 07 7a 3d e7 61 f4 2f 12 80 3b f9 3b cc |....z=.a./..;.;.| -00000080 05 c8 2f 7e 28 b2 73 bf 97 61 29 14 03 03 00 01 |../~(.s..a).....| -00000090 01 16 03 03 00 24 17 59 a9 45 53 46 33 96 50 dd |.....$.Y.ESF3.P.| -000000a0 3e 23 aa 91 38 f8 56 4a 2f 1a f2 b1 44 9b ce 17 |>#..8.VJ/...D...| -000000b0 6b 8a 89 76 bc 67 b8 8b ba 90 |k..v.g....| ->>> Flow 4 (server to client) -00000000 16 03 03 00 72 04 00 00 6e 00 00 00 00 00 68 00 |....r...n.....h.| -00000010 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 65 |...............e| -00000020 ea 4b d1 ef ba 2d db 0c ba 9a d4 20 76 57 c8 ec |.K...-..... vW..| -00000030 dc 2d 77 fb fb 3b 93 5f 53 e0 14 4f 90 fb d6 55 |.-w..;._S..O...U| -00000040 57 8c 8d 0d 25 ea 5d 0d f2 91 e5 12 22 12 ec 7b |W...%.]....."..{| -00000050 5f b6 6e fd 07 59 23 24 fc b1 97 ca ea 56 a5 c2 |_.n..Y#$.....V..| -00000060 a0 e4 9e 99 64 f2 64 d0 75 7a 46 63 e3 dc 21 ed |....d.d.uzFc..!.| -00000070 78 56 e9 e1 ab 66 80 14 03 03 00 01 01 16 03 03 |xV...f..........| -00000080 00 24 fc 14 68 07 17 1f df b7 84 cb fd c1 e0 e4 |.$..h...........| -00000090 f2 1a ea 34 b5 00 7f 70 be c8 1c 0a d6 55 e3 57 |...4...p.....U.W| -000000a0 50 4e 6d 7d 8a 5d 17 03 03 00 21 24 27 50 40 c1 |PNm}.]....!$'P@.| -000000b0 c5 bd c7 9f 95 d9 ba 2e 7b 0e db ea a7 31 81 05 |........{....1..| -000000c0 75 43 b1 63 cf b8 55 92 ef 76 98 a9 15 03 03 00 |uC.c..U..v......| -000000d0 16 d7 ea 3c 79 e7 a6 2f 61 39 ec 4e 95 86 48 5e |...<y../a9.N..H^| -000000e0 75 a0 9e 41 42 89 67 |u..AB.g| diff --git a/libgo/go/crypto/tls/testdata/Server-TLSv12-ResumeDisabled b/libgo/go/crypto/tls/testdata/Server-TLSv12-ResumeDisabled deleted file mode 100644 index db833f65559..00000000000 --- a/libgo/go/crypto/tls/testdata/Server-TLSv12-ResumeDisabled +++ /dev/null @@ -1,87 +0,0 @@ ->>> Flow 1 (client to server) -00000000 16 03 01 00 e8 01 00 00 e4 03 03 54 23 54 02 a5 |...........T#T..| -00000010 10 11 0f 6d e5 2d 2f e8 bb 52 b1 38 3f 65 01 43 |...m.-/..R.8?e.C| -00000020 36 cc 48 f6 09 22 a1 85 20 28 3c 20 35 8b fe 7a |6.H..".. (< 5..z| -00000030 41 3b 59 3a 5d b9 b3 21 f0 62 e9 0d 7b af f5 5d |A;Y:]..!.b..{..]| -00000040 fa 65 1a 40 c8 ca cd 74 8c ef d2 fb 00 04 00 05 |.e.@...t........| -00000050 00 ff 01 00 00 97 00 23 00 68 00 00 00 00 00 00 |.......#.h......| -00000060 00 00 00 00 00 00 00 00 00 00 65 ea 4b d1 ef ba |..........e.K...| -00000070 2d db 0c ba 9a d4 20 76 57 c8 ec dc 2d 77 fb fb |-..... vW...-w..| -00000080 3b 93 5f 53 e0 14 4f 90 fb d6 55 57 8c 8d 0d 25 |;._S..O...UW...%| -00000090 ea 5d 0d f2 91 e5 12 22 12 ec 7b 5f b6 6e fd 07 |.]....."..{_.n..| -000000a0 59 23 24 fc b1 97 ca ea 56 a5 c2 a0 e4 9e 99 64 |Y#$.....V......d| -000000b0 f2 64 d0 75 7a 46 63 e3 dc 21 ed 78 56 e9 e1 ab |.d.uzFc..!.xV...| -000000c0 66 80 00 0d 00 22 00 20 06 01 06 02 06 03 05 01 |f....". ........| -000000d0 05 02 05 03 04 01 04 02 04 03 03 01 03 02 03 03 |................| -000000e0 02 01 02 02 02 03 01 01 00 0f 00 01 01 |.............| ->>> Flow 2 (server to client) -00000000 16 03 03 00 31 02 00 00 2d 03 03 00 00 00 00 00 |....1...-.......| -00000010 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| -00000020 00 00 00 00 00 00 00 00 00 00 00 00 00 05 00 00 |................| -00000030 05 ff 01 00 01 00 16 03 03 02 be 0b 00 02 ba 00 |................| -00000040 02 b7 00 02 b4 30 82 02 b0 30 82 02 19 a0 03 02 |.....0...0......| -00000050 01 02 02 09 00 85 b0 bb a4 8a 7f b8 ca 30 0d 06 |.............0..| -00000060 09 2a 86 48 86 f7 0d 01 01 05 05 00 30 45 31 0b |.*.H........0E1.| -00000070 30 09 06 03 55 04 06 13 02 41 55 31 13 30 11 06 |0...U....AU1.0..| -00000080 03 55 04 08 13 0a 53 6f 6d 65 2d 53 74 61 74 65 |.U....Some-State| -00000090 31 21 30 1f 06 03 55 04 0a 13 18 49 6e 74 65 72 |1!0...U....Inter| -000000a0 6e 65 74 20 57 69 64 67 69 74 73 20 50 74 79 20 |net Widgits Pty | -000000b0 4c 74 64 30 1e 17 0d 31 30 30 34 32 34 30 39 30 |Ltd0...100424090| -000000c0 39 33 38 5a 17 0d 31 31 30 34 32 34 30 39 30 39 |938Z..1104240909| -000000d0 33 38 5a 30 45 31 0b 30 09 06 03 55 04 06 13 02 |38Z0E1.0...U....| -000000e0 41 55 31 13 30 11 06 03 55 04 08 13 0a 53 6f 6d |AU1.0...U....Som| -000000f0 65 2d 53 74 61 74 65 31 21 30 1f 06 03 55 04 0a |e-State1!0...U..| -00000100 13 18 49 6e 74 65 72 6e 65 74 20 57 69 64 67 69 |..Internet Widgi| -00000110 74 73 20 50 74 79 20 4c 74 64 30 81 9f 30 0d 06 |ts Pty Ltd0..0..| -00000120 09 2a 86 48 86 f7 0d 01 01 01 05 00 03 81 8d 00 |.*.H............| -00000130 30 81 89 02 81 81 00 bb 79 d6 f5 17 b5 e5 bf 46 |0.......y......F| -00000140 10 d0 dc 69 be e6 2b 07 43 5a d0 03 2d 8a 7a 43 |...i..+.CZ..-.zC| -00000150 85 b7 14 52 e7 a5 65 4c 2c 78 b8 23 8c b5 b4 82 |...R..eL,x.#....| -00000160 e5 de 1f 95 3b 7e 62 a5 2c a5 33 d6 fe 12 5c 7a |....;~b.,.3...\z| -00000170 56 fc f5 06 bf fa 58 7b 26 3f b5 cd 04 d3 d0 c9 |V.....X{&?......| -00000180 21 96 4a c7 f4 54 9f 5a bf ef 42 71 00 fe 18 99 |!.J..T.Z..Bq....| -00000190 07 7f 7e 88 7d 7d f1 04 39 c4 a2 2e db 51 c9 7c |..~.}}..9....Q.|| -000001a0 e3 c0 4c 3b 32 66 01 cf af b1 1d b8 71 9a 1d db |..L;2f......q...| -000001b0 db 89 6b ae da 2d 79 02 03 01 00 01 a3 81 a7 30 |..k..-y........0| -000001c0 81 a4 30 1d 06 03 55 1d 0e 04 16 04 14 b1 ad e2 |..0...U.........| -000001d0 85 5a cf cb 28 db 69 ce 23 69 de d3 26 8e 18 88 |.Z..(.i.#i..&...| -000001e0 39 30 75 06 03 55 1d 23 04 6e 30 6c 80 14 b1 ad |90u..U.#.n0l....| -000001f0 e2 85 5a cf cb 28 db 69 ce 23 69 de d3 26 8e 18 |..Z..(.i.#i..&..| -00000200 88 39 a1 49 a4 47 30 45 31 0b 30 09 06 03 55 04 |.9.I.G0E1.0...U.| -00000210 06 13 02 41 55 31 13 30 11 06 03 55 04 08 13 0a |...AU1.0...U....| -00000220 53 6f 6d 65 2d 53 74 61 74 65 31 21 30 1f 06 03 |Some-State1!0...| -00000230 55 04 0a 13 18 49 6e 74 65 72 6e 65 74 20 57 69 |U....Internet Wi| -00000240 64 67 69 74 73 20 50 74 79 20 4c 74 64 82 09 00 |dgits Pty Ltd...| -00000250 85 b0 bb a4 8a 7f b8 ca 30 0c 06 03 55 1d 13 04 |........0...U...| -00000260 05 30 03 01 01 ff 30 0d 06 09 2a 86 48 86 f7 0d |.0....0...*.H...| -00000270 01 01 05 05 00 03 81 81 00 08 6c 45 24 c7 6b b1 |..........lE$.k.| -00000280 59 ab 0c 52 cc f2 b0 14 d7 87 9d 7a 64 75 b5 5a |Y..R.......zdu.Z| -00000290 95 66 e4 c5 2b 8e ae 12 66 1f eb 4f 38 b3 6e 60 |.f..+...f..O8.n`| -000002a0 d3 92 fd f7 41 08 b5 25 13 b1 18 7a 24 fb 30 1d |....A..%...z$.0.| -000002b0 ba ed 98 b9 17 ec e7 d7 31 59 db 95 d3 1d 78 ea |........1Y....x.| -000002c0 50 56 5c d5 82 5a 2d 5a 5f 33 c4 b6 d8 c9 75 90 |PV\..Z-Z_3....u.| -000002d0 96 8c 0f 52 98 b5 cd 98 1f 89 20 5f f2 a0 1c a3 |...R...... _....| -000002e0 1b 96 94 dd a9 fd 57 e9 70 e8 26 6d 71 99 9b 26 |......W.p.&mq..&| -000002f0 6e 38 50 29 6c 90 a7 bd d9 16 03 03 00 04 0e 00 |n8P)l...........| -00000300 00 00 |..| ->>> Flow 3 (client to server) -00000000 16 03 03 00 86 10 00 00 82 00 80 ae 02 dd 1f 1a |................| -00000010 86 83 f5 2f 82 46 4b 29 58 aa a1 b3 56 8b 4e 40 |.../.FK)X...V.N@| -00000020 ef 23 65 67 ad 48 e5 e1 fd ae dd bf 68 fd bd a6 |.#eg.H......h...| -00000030 13 a0 7e 05 ab f7 20 e1 6a 4e d1 37 93 08 1d c9 |..~... .jN.7....| -00000040 37 e0 b5 34 28 bf 20 45 45 da 0f 7e 51 a7 c6 ae |7..4(. EE..~Q...| -00000050 61 6c 07 1b 73 ef da 6e 25 c4 ed be e3 3f da ae |al..s..n%....?..| -00000060 cd 3c 17 9c 2e ee fb 47 9d b3 a1 b2 c3 5d e0 83 |.<.....G.....]..| -00000070 74 20 37 2d 72 d6 d0 4d 58 0e 26 1c 50 22 95 08 |t 7-r..MX.&.P"..| -00000080 7d e0 5f 86 99 9e 2c 2e a7 a0 7f 14 03 03 00 01 |}._...,.........| -00000090 01 16 03 03 00 24 a2 ab 41 25 a5 cf 04 18 1d 98 |.....$..A%......| -000000a0 88 6c 59 21 86 33 54 f4 35 b4 21 6e a5 29 d5 6e |.lY!.3T.5.!n.).n| -000000b0 3d 08 72 b0 af 46 b5 8f 6b 86 |=.r..F..k.| ->>> Flow 4 (server to client) -00000000 14 03 03 00 01 01 16 03 03 00 24 59 20 4d c2 17 |..........$Y M..| -00000010 8b 3c 9b 33 d9 f9 ef fb 80 18 1f 67 a7 58 12 89 |.<.3.......g.X..| -00000020 4e 73 0f 2d 7b e6 c4 a6 79 73 01 da 22 e8 54 17 |Ns.-{...ys..".T.| -00000030 03 03 00 21 36 ca 64 0f 4a 12 a5 50 3d 97 bb 39 |...!6.d.J..P=..9| -00000040 02 fc ed d1 82 6a 9a 2e 21 79 f6 e1 b3 cc 32 db |.....j..!y....2.| -00000050 0f 5d b3 fb a5 15 03 03 00 16 51 f4 be 57 7a df |.]........Q..Wz.| -00000060 f1 f2 bd b5 51 5e 45 80 be 0b 9a 0c d1 19 3c 79 |....Q^E.......<y| diff --git a/libgo/go/crypto/tls/tls_test.go b/libgo/go/crypto/tls/tls_test.go index f8c94ff35d4..e82579eee9f 100644 --- a/libgo/go/crypto/tls/tls_test.go +++ b/libgo/go/crypto/tls/tls_test.go @@ -5,6 +5,7 @@ package tls import ( + "bytes" "fmt" "io" "net" @@ -235,3 +236,47 @@ func testConnReadNonzeroAndEOF(t *testing.T, delay time.Duration) error { } return nil } + +func TestTLSUniqueMatches(t *testing.T) { + ln := newLocalListener(t) + defer ln.Close() + + serverTLSUniques := make(chan []byte) + go func() { + for i := 0; i < 2; i++ { + sconn, err := ln.Accept() + if err != nil { + t.Fatal(err) + } + serverConfig := *testConfig + srv := Server(sconn, &serverConfig) + if err := srv.Handshake(); err != nil { + t.Fatal(err) + } + serverTLSUniques <- srv.ConnectionState().TLSUnique + } + }() + + clientConfig := *testConfig + clientConfig.ClientSessionCache = NewLRUClientSessionCache(1) + conn, err := Dial("tcp", ln.Addr().String(), &clientConfig) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(conn.ConnectionState().TLSUnique, <-serverTLSUniques) { + t.Error("client and server channel bindings differ") + } + conn.Close() + + conn, err = Dial("tcp", ln.Addr().String(), &clientConfig) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + if !conn.ConnectionState().DidResume { + t.Error("second session did not use resumption") + } + if !bytes.Equal(conn.ConnectionState().TLSUnique, <-serverTLSUniques) { + t.Error("client and server channel bindings differ when session resumption is used") + } +} diff --git a/libgo/go/crypto/x509/pem_decrypt_test.go b/libgo/go/crypto/x509/pem_decrypt_test.go index 59ba6f90019..13e4700bdda 100644 --- a/libgo/go/crypto/x509/pem_decrypt_test.go +++ b/libgo/go/crypto/x509/pem_decrypt_test.go @@ -14,7 +14,7 @@ import ( func TestDecrypt(t *testing.T) { for i, data := range testData { - t.Logf("test %d. %s", i, data.kind) + t.Logf("test %v. %v", i, data.kind) block, rest := pem.Decode(data.pemData) if len(rest) > 0 { t.Error("extra data") @@ -39,7 +39,7 @@ func TestDecrypt(t *testing.T) { func TestEncrypt(t *testing.T) { for i, data := range testData { - t.Logf("test %d. %s", i, data.kind) + t.Logf("test %v. %v", i, data.kind) plainDER, err := base64.StdEncoding.DecodeString(data.plainDER) if err != nil { t.Fatal("cannot decode test DER data: ", err) diff --git a/libgo/go/crypto/x509/pkix/pkix.go b/libgo/go/crypto/x509/pkix/pkix.go index 58c1e54d100..8768b785908 100644 --- a/libgo/go/crypto/x509/pkix/pkix.go +++ b/libgo/go/crypto/x509/pkix/pkix.go @@ -164,7 +164,7 @@ type TBSCertificateList struct { Signature AlgorithmIdentifier Issuer RDNSequence ThisUpdate time.Time - NextUpdate time.Time + NextUpdate time.Time `asn1:"optional"` RevokedCertificates []RevokedCertificate `asn1:"optional"` Extensions []Extension `asn1:"tag:0,optional,explicit"` } diff --git a/libgo/go/crypto/x509/root_unix.go b/libgo/go/crypto/x509/root_unix.go index 11ad3c440d2..f77d6c0c57f 100644 --- a/libgo/go/crypto/x509/root_unix.go +++ b/libgo/go/crypto/x509/root_unix.go @@ -15,6 +15,15 @@ var certFiles = []string{ "/etc/ssl/ca-bundle.pem", // OpenSUSE "/etc/ssl/cert.pem", // OpenBSD "/usr/local/share/certs/ca-root-nss.crt", // FreeBSD/DragonFly + "/etc/pki/tls/cacert.pem", // OpenELEC + "/etc/certs/ca-certificates.crt", // Solaris 11.2+ +} + +// Possible directories with certificate files; stop after successfully +// reading at least one file from a directory. +var certDirectories = []string{ + "/system/etc/security/cacerts", // Android + } func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) { @@ -32,6 +41,24 @@ func initSystemRoots() { } } + for _, directory := range certDirectories { + fis, err := ioutil.ReadDir(directory) + if err != nil { + continue + } + rootsAdded := false + for _, fi := range fis { + data, err := ioutil.ReadFile(directory + "/" + fi.Name()) + if err == nil && roots.AppendCertsFromPEM(data) { + rootsAdded = true + } + } + if rootsAdded { + systemRoots = roots + return + } + } + // All of the files failed to load. systemRoots will be nil which will // trigger a specific error at verification time. } diff --git a/libgo/go/crypto/x509/verify.go b/libgo/go/crypto/x509/verify.go index 5fd8e371747..ec1981423db 100644 --- a/libgo/go/crypto/x509/verify.go +++ b/libgo/go/crypto/x509/verify.go @@ -116,10 +116,9 @@ func (e UnknownAuthorityError) Error() string { } // SystemRootsError results when we fail to load the system root certificates. -type SystemRootsError struct { -} +type SystemRootsError struct{} -func (e SystemRootsError) Error() string { +func (SystemRootsError) Error() string { return "x509: failed to load system roots and no roots provided" } @@ -206,6 +205,9 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V // needed. If successful, it returns one or more chains where the first // element of the chain is c and the last element is from opts.Roots. // +// If opts.Roots is nil and system roots are unavailable the returned error +// will be of type SystemRootsError. +// // WARNING: this doesn't do any revocation checking. func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err error) { // Use Windows's own verification and chain building. diff --git a/libgo/go/crypto/x509/x509.go b/libgo/go/crypto/x509/x509.go index c347fb384dc..7a37b98e317 100644 --- a/libgo/go/crypto/x509/x509.go +++ b/libgo/go/crypto/x509/x509.go @@ -494,6 +494,11 @@ type Certificate struct { BasicConstraintsValid bool // if true then the next two fields are valid. IsCA bool MaxPathLen int + // MaxPathLenZero indicates that BasicConstraintsValid==true and + // MaxPathLen==0 should be interpreted as an actual maximum path length + // of zero. Otherwise, that combination is interpreted as MaxPathLen + // not being set. + MaxPathLenZero bool SubjectKeyId []byte AuthorityKeyId []byte @@ -913,6 +918,7 @@ func parseCertificate(in *certificate) (*Certificate, error) { out.BasicConstraintsValid = true out.IsCA = constraints.IsCA out.MaxPathLen = constraints.MaxPathLen + out.MaxPathLenZero = out.MaxPathLen == 0 continue } case 17: @@ -1227,8 +1233,15 @@ func buildExtensions(template *Certificate) (ret []pkix.Extension, err error) { } if template.BasicConstraintsValid && !oidInExtensions(oidExtensionBasicConstraints, template.ExtraExtensions) { + // Leaving MaxPathLen as zero indicates that no maximum path + // length is desired, unless MaxPathLenZero is set. A value of + // -1 causes encoding/asn1 to omit the value as desired. + maxPathLen := template.MaxPathLen + if maxPathLen == 0 && !template.MaxPathLenZero { + maxPathLen = -1 + } ret[n].Id = oidExtensionBasicConstraints - ret[n].Value, err = asn1.Marshal(basicConstraints{template.IsCA, template.MaxPathLen}) + ret[n].Value, err = asn1.Marshal(basicConstraints{template.IsCA, maxPathLen}) ret[n].Critical = true if err != nil { return @@ -1328,7 +1341,7 @@ func buildExtensions(template *Certificate) (ret []pkix.Extension, err error) { dp := distributionPoint{ DistributionPoint: distributionPointName{ - FullName: asn1.RawValue{Tag: 0, Class: 2, Bytes: rawFullName}, + FullName: asn1.RawValue{Tag: 0, Class: 2, IsCompound: true, Bytes: rawFullName}, }, } crlDp = append(crlDp, dp) @@ -1657,7 +1670,7 @@ var oidExtensionRequest = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 14} // CreateCertificateRequest creates a new certificate based on a template. The // following members of template are used: Subject, Attributes, -// SignatureAlgorithm, Extension, DNSNames, EmailAddresses, and IPAddresses. +// SignatureAlgorithm, Extensions, DNSNames, EmailAddresses, and IPAddresses. // The private key is the private key of the signer. // // The returned slice is the certificate request in DER encoding. diff --git a/libgo/go/crypto/x509/x509_test.go b/libgo/go/crypto/x509/x509_test.go index 41d186b5b64..4f1f0c2cc69 100644 --- a/libgo/go/crypto/x509/x509_test.go +++ b/libgo/go/crypto/x509/x509_test.go @@ -707,6 +707,17 @@ func TestParseDERCRL(t *testing.T) { // Can't check the signature here without a package cycle. } +func TestCRLWithoutExpiry(t *testing.T) { + derBytes := fromBase64("MIHYMIGZMAkGByqGSM44BAMwEjEQMA4GA1UEAxMHQ2FybERTUxcNOTkwODI3MDcwMDAwWjBpMBMCAgDIFw05OTA4MjIwNzAwMDBaMBMCAgDJFw05OTA4MjIwNzAwMDBaMBMCAgDTFw05OTA4MjIwNzAwMDBaMBMCAgDSFw05OTA4MjIwNzAwMDBaMBMCAgDUFw05OTA4MjQwNzAwMDBaMAkGByqGSM44BAMDLwAwLAIUfmVSdjP+NHMX0feW+aDU2G1cfT0CFAJ6W7fVWxjBz4fvftok8yqDnDWh") + certList, err := ParseDERCRL(derBytes) + if err != nil { + t.Fatal(err) + } + if !certList.TBSCertList.NextUpdate.IsZero() { + t.Errorf("NextUpdate is not the zero value") + } +} + func TestParsePEMCRL(t *testing.T) { pemBytes := fromBase64(pemCRLBase64) certList, err := ParseCRL(pemBytes) @@ -729,8 +740,9 @@ func TestParsePEMCRL(t *testing.T) { func TestImports(t *testing.T) { t.Skip("gccgo does not have a go command") - if runtime.GOOS == "nacl" { - t.Skip("skipping on nacl") + switch runtime.GOOS { + case "android", "nacl": + t.Skipf("skipping on %s", runtime.GOOS) } if err := exec.Command("go", "run", "x509_test_import.go").Run(); err != nil { @@ -851,7 +863,7 @@ func TestCertificateRequestOverrides(t *testing.T) { // An explicit extension should override the DNSNames from the // template. ExtraExtensions: []pkix.Extension{ - pkix.Extension{ + { Id: oidExtensionSubjectAltName, Value: sanContents, }, @@ -869,11 +881,11 @@ func TestCertificateRequestOverrides(t *testing.T) { // with two extension attributes. template.Attributes = []pkix.AttributeTypeAndValueSET{ - pkix.AttributeTypeAndValueSET{ + { Type: oidExtensionRequest, Value: [][]pkix.AttributeTypeAndValue{ - []pkix.AttributeTypeAndValue{ - pkix.AttributeTypeAndValue{ + { + { Type: oidExtensionAuthorityInfoAccess, Value: []byte("foo"), }, @@ -942,6 +954,69 @@ func TestParseCertificateRequest(t *testing.T) { } } +func TestMaxPathLen(t *testing.T) { + block, _ := pem.Decode([]byte(pemPrivateKey)) + rsaPriv, err := ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + t.Fatalf("Failed to parse private key: %s", err) + } + + template := &Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + CommonName: "Σ Acme Co", + }, + NotBefore: time.Unix(1000, 0), + NotAfter: time.Unix(100000, 0), + + BasicConstraintsValid: true, + IsCA: true, + } + + serialiseAndParse := func(template *Certificate) *Certificate { + derBytes, err := CreateCertificate(rand.Reader, template, template, &rsaPriv.PublicKey, rsaPriv) + if err != nil { + t.Fatalf("failed to create certificate: %s", err) + return nil + } + + cert, err := ParseCertificate(derBytes) + if err != nil { + t.Fatalf("failed to parse certificate: %s", err) + return nil + } + + return cert + } + + cert1 := serialiseAndParse(template) + if m := cert1.MaxPathLen; m != -1 { + t.Errorf("Omitting MaxPathLen didn't turn into -1, got %d", m) + } + if cert1.MaxPathLenZero { + t.Errorf("Omitting MaxPathLen resulted in MaxPathLenZero") + } + + template.MaxPathLen = 1 + cert2 := serialiseAndParse(template) + if m := cert2.MaxPathLen; m != 1 { + t.Errorf("Setting MaxPathLen didn't work. Got %d but set 1", m) + } + if cert2.MaxPathLenZero { + t.Errorf("Setting MaxPathLen resulted in MaxPathLenZero") + } + + template.MaxPathLen = 0 + template.MaxPathLenZero = true + cert3 := serialiseAndParse(template) + if m := cert3.MaxPathLen; m != 0 { + t.Errorf("Setting MaxPathLenZero didn't work, got %d", m) + } + if !cert3.MaxPathLenZero { + t.Errorf("Setting MaxPathLen to zero didn't result in MaxPathLenZero") + } +} + // This CSR was generated with OpenSSL: // openssl req -out CSR.csr -new -newkey rsa:2048 -nodes -keyout privateKey.key -config openssl.cnf // diff --git a/libgo/go/database/sql/convert_test.go b/libgo/go/database/sql/convert_test.go index 6e248301283..98af9fb64c5 100644 --- a/libgo/go/database/sql/convert_test.go +++ b/libgo/go/database/sql/convert_test.go @@ -283,6 +283,26 @@ func TestValueConverters(t *testing.T) { // Tests that assigning to RawBytes doesn't allocate (and also works). func TestRawBytesAllocs(t *testing.T) { + var tests = []struct { + name string + in interface{} + want string + }{ + {"uint64", uint64(12345678), "12345678"}, + {"uint32", uint32(1234), "1234"}, + {"uint16", uint16(12), "12"}, + {"uint8", uint8(1), "1"}, + {"uint", uint(123), "123"}, + {"int", int(123), "123"}, + {"int8", int8(1), "1"}, + {"int16", int16(12), "12"}, + {"int32", int32(1234), "1234"}, + {"int64", int64(12345678), "12345678"}, + {"float32", float32(1.5), "1.5"}, + {"float64", float64(64), "64"}, + {"bool", false, "false"}, + } + buf := make(RawBytes, 10) test := func(name string, in interface{}, want string) { if err := convertAssign(&buf, in); err != nil { @@ -301,20 +321,11 @@ func TestRawBytesAllocs(t *testing.T) { t.Fatalf("%s: got %q (len %d); want %q (len %d)", name, buf, len(buf), want, len(want)) } } + n := testing.AllocsPerRun(100, func() { - test("uint64", uint64(12345678), "12345678") - test("uint32", uint32(1234), "1234") - test("uint16", uint16(12), "12") - test("uint8", uint8(1), "1") - test("uint", uint(123), "123") - test("int", int(123), "123") - test("int8", int8(1), "1") - test("int16", int16(12), "12") - test("int32", int32(1234), "1234") - test("int64", int64(12345678), "12345678") - test("float32", float32(1.5), "1.5") - test("float64", float64(64), "64") - test("bool", false, "false") + for _, tt := range tests { + test(tt.name, tt.in, tt.want) + } }) // The numbers below are only valid for 64-bit interface word sizes, diff --git a/libgo/go/database/sql/fakedb_test.go b/libgo/go/database/sql/fakedb_test.go index c7db0dd77b3..a993fd46ede 100644 --- a/libgo/go/database/sql/fakedb_test.go +++ b/libgo/go/database/sql/fakedb_test.go @@ -10,6 +10,7 @@ import ( "fmt" "io" "log" + "sort" "strconv" "strings" "sync" @@ -126,6 +127,29 @@ func init() { Register("test", fdriver) } +func contains(list []string, y string) bool { + for _, x := range list { + if x == y { + return true + } + } + return false +} + +type Dummy struct { + driver.Driver +} + +func TestDrivers(t *testing.T) { + unregisterAllDrivers() + Register("test", fdriver) + Register("invalid", Dummy{}) + all := Drivers() + if len(all) < 2 || !sort.StringsAreSorted(all) || !contains(all, "test") || !contains(all, "invalid") { + t.Fatalf("Drivers = %v, want sorted list with at least [invalid, test]", all) + } +} + // Supports dsn forms: // <dbname> // <dbname>;<opts> (only currently supported option is `badConn`, diff --git a/libgo/go/database/sql/sql.go b/libgo/go/database/sql/sql.go index 765b80c60a2..6e6f246aeec 100644 --- a/libgo/go/database/sql/sql.go +++ b/libgo/go/database/sql/sql.go @@ -13,12 +13,12 @@ package sql import ( - "container/list" "database/sql/driver" "errors" "fmt" "io" "runtime" + "sort" "sync" ) @@ -37,6 +37,21 @@ func Register(name string, driver driver.Driver) { drivers[name] = driver } +func unregisterAllDrivers() { + // For tests. + drivers = make(map[string]driver.Driver) +} + +// Drivers returns a sorted list of the names of the registered drivers. +func Drivers() []string { + var list []string + for name := range drivers { + list = append(list, name) + } + sort.Strings(list) + return list +} + // RawBytes is a byte slice that holds a reference to memory owned by // the database itself. After a Scan into a RawBytes, the slice is only // valid until the next call to Next, Scan, or Close. @@ -198,8 +213,8 @@ type DB struct { dsn string mu sync.Mutex // protects following fields - freeConn *list.List // of *driverConn - connRequests *list.List // of connRequest + freeConn []*driverConn + connRequests []chan connRequest numOpen int pendingOpens int // Used to signal the need for new connections @@ -232,9 +247,6 @@ type driverConn struct { inUse bool onPut []func() // code (with db.mu held) run when conn is next returned dbmuClosed bool // same as closed, but guarded by db.mu, for connIfFree - // This is the Element returned by db.freeConn.PushFront(conn). - // It's used by connIfFree to remove the conn from the freeConn list. - listElem *list.Element } func (dc *driverConn) releaseConn(err error) { @@ -437,8 +449,6 @@ func Open(driverName, dataSourceName string) (*DB, error) { openerCh: make(chan struct{}, connectionRequestQueueSize), lastPut: make(map[*driverConn]string), } - db.freeConn = list.New() - db.connRequests = list.New() go db.connectionOpener() return db, nil } @@ -469,17 +479,13 @@ func (db *DB) Close() error { } close(db.openerCh) var err error - fns := make([]func() error, 0, db.freeConn.Len()) - for db.freeConn.Front() != nil { - dc := db.freeConn.Front().Value.(*driverConn) - dc.listElem = nil + fns := make([]func() error, 0, len(db.freeConn)) + for _, dc := range db.freeConn { fns = append(fns, dc.closeDBLocked()) - db.freeConn.Remove(db.freeConn.Front()) } + db.freeConn = nil db.closed = true - for db.connRequests.Front() != nil { - req := db.connRequests.Front().Value.(connRequest) - db.connRequests.Remove(db.connRequests.Front()) + for _, req := range db.connRequests { close(req) } db.mu.Unlock() @@ -527,11 +533,11 @@ func (db *DB) SetMaxIdleConns(n int) { db.maxIdle = db.maxOpen } var closing []*driverConn - for db.freeConn.Len() > db.maxIdleConnsLocked() { - dc := db.freeConn.Back().Value.(*driverConn) - dc.listElem = nil - db.freeConn.Remove(db.freeConn.Back()) - closing = append(closing, dc) + idleCount := len(db.freeConn) + maxIdle := db.maxIdleConnsLocked() + if idleCount > maxIdle { + closing = db.freeConn[maxIdle:] + db.freeConn = db.freeConn[:maxIdle] } db.mu.Unlock() for _, c := range closing { @@ -564,7 +570,7 @@ func (db *DB) SetMaxOpenConns(n int) { // If there are connRequests and the connection limit hasn't been reached, // then tell the connectionOpener to open new connections. func (db *DB) maybeOpenNewConnections() { - numRequests := db.connRequests.Len() - db.pendingOpens + numRequests := len(db.connRequests) - db.pendingOpens if db.maxOpen > 0 { numCanOpen := db.maxOpen - (db.numOpen + db.pendingOpens) if numRequests > numCanOpen { @@ -580,7 +586,7 @@ func (db *DB) maybeOpenNewConnections() { // Runs in a separate goroutine, opens new connections when requested. func (db *DB) connectionOpener() { - for _ = range db.openerCh { + for range db.openerCh { db.openNewConnection() } } @@ -616,7 +622,10 @@ func (db *DB) openNewConnection() { // connRequest represents one request for a new connection // When there are no idle connections available, DB.conn will create // a new connRequest and put it on the db.connRequests list. -type connRequest chan<- interface{} // takes either a *driverConn or an error +type connRequest struct { + conn *driverConn + err error +} var errDBClosed = errors.New("sql: database is closed") @@ -630,32 +639,21 @@ func (db *DB) conn() (*driverConn, error) { // If db.maxOpen > 0 and the number of open connections is over the limit // and there are no free connection, make a request and wait. - if db.maxOpen > 0 && db.numOpen >= db.maxOpen && db.freeConn.Len() == 0 { + if db.maxOpen > 0 && db.numOpen >= db.maxOpen && len(db.freeConn) == 0 { // Make the connRequest channel. It's buffered so that the // connectionOpener doesn't block while waiting for the req to be read. - ch := make(chan interface{}, 1) - req := connRequest(ch) - db.connRequests.PushBack(req) + req := make(chan connRequest, 1) + db.connRequests = append(db.connRequests, req) db.maybeOpenNewConnections() db.mu.Unlock() - ret, ok := <-ch - if !ok { - return nil, errDBClosed - } - switch ret.(type) { - case *driverConn: - return ret.(*driverConn), nil - case error: - return nil, ret.(error) - default: - panic("sql: Unexpected type passed through connRequest.ch") - } + ret := <-req + return ret.conn, ret.err } - if f := db.freeConn.Front(); f != nil { - conn := f.Value.(*driverConn) - conn.listElem = nil - db.freeConn.Remove(f) + if c := len(db.freeConn); c > 0 { + conn := db.freeConn[0] + copy(db.freeConn, db.freeConn[1:]) + db.freeConn = db.freeConn[:c-1] conn.inUse = true db.mu.Unlock() return conn, nil @@ -702,9 +700,15 @@ func (db *DB) connIfFree(wanted *driverConn) (*driverConn, error) { if wanted.inUse { return nil, errConnBusy } - if wanted.listElem != nil { - db.freeConn.Remove(wanted.listElem) - wanted.listElem = nil + idx := -1 + for ii, v := range db.freeConn { + if v == wanted { + idx = ii + break + } + } + if idx >= 0 { + db.freeConn = append(db.freeConn[:idx], db.freeConn[idx+1:]...) wanted.inUse = true return wanted, nil } @@ -793,18 +797,23 @@ func (db *DB) putConn(dc *driverConn, err error) { // If a connRequest was fulfilled or the *driverConn was placed in the // freeConn list, then true is returned, otherwise false is returned. func (db *DB) putConnDBLocked(dc *driverConn, err error) bool { - if db.connRequests.Len() > 0 { - req := db.connRequests.Front().Value.(connRequest) - db.connRequests.Remove(db.connRequests.Front()) - if err != nil { - req <- err - } else { + if c := len(db.connRequests); c > 0 { + req := db.connRequests[0] + // This copy is O(n) but in practice faster than a linked list. + // TODO: consider compacting it down less often and + // moving the base instead? + copy(db.connRequests, db.connRequests[1:]) + db.connRequests = db.connRequests[:c-1] + if err == nil { dc.inUse = true - req <- dc + } + req <- connRequest{ + conn: dc, + err: err, } return true - } else if err == nil && !db.closed && db.maxIdleConnsLocked() > db.freeConn.Len() { - dc.listElem = db.freeConn.PushFront(dc) + } else if err == nil && !db.closed && db.maxIdleConnsLocked() > len(db.freeConn) { + db.freeConn = append(db.freeConn, dc) return true } return false @@ -1050,6 +1059,13 @@ type Tx struct { // or Rollback. once done, all operations fail with // ErrTxDone. done bool + + // All Stmts prepared for this transaction. These will be closed after the + // transaction has been committed or rolled back. + stmts struct { + sync.Mutex + v []*Stmt + } } var ErrTxDone = errors.New("sql: Transaction has already been committed or rolled back") @@ -1071,6 +1087,15 @@ func (tx *Tx) grabConn() (*driverConn, error) { return tx.dc, nil } +// Closes all Stmts prepared for this transaction. +func (tx *Tx) closePrepared() { + tx.stmts.Lock() + for _, stmt := range tx.stmts.v { + stmt.Close() + } + tx.stmts.Unlock() +} + // Commit commits the transaction. func (tx *Tx) Commit() error { if tx.done { @@ -1078,8 +1103,12 @@ func (tx *Tx) Commit() error { } defer tx.close() tx.dc.Lock() - defer tx.dc.Unlock() - return tx.txi.Commit() + err := tx.txi.Commit() + tx.dc.Unlock() + if err != driver.ErrBadConn { + tx.closePrepared() + } + return err } // Rollback aborts the transaction. @@ -1089,8 +1118,12 @@ func (tx *Tx) Rollback() error { } defer tx.close() tx.dc.Lock() - defer tx.dc.Unlock() - return tx.txi.Rollback() + err := tx.txi.Rollback() + tx.dc.Unlock() + if err != driver.ErrBadConn { + tx.closePrepared() + } + return err } // Prepare creates a prepared statement for use within a transaction. @@ -1134,6 +1167,9 @@ func (tx *Tx) Prepare(query string) (*Stmt, error) { }, query: query, } + tx.stmts.Lock() + tx.stmts.v = append(tx.stmts.v, stmt) + tx.stmts.Unlock() return stmt, nil } @@ -1162,7 +1198,7 @@ func (tx *Tx) Stmt(stmt *Stmt) *Stmt { dc.Lock() si, err := dc.ci.Prepare(stmt.query) dc.Unlock() - return &Stmt{ + txs := &Stmt{ db: tx.db, tx: tx, txsi: &driverStmt{ @@ -1172,6 +1208,10 @@ func (tx *Tx) Stmt(stmt *Stmt) *Stmt { query: stmt.query, stickyErr: err, } + tx.stmts.Lock() + tx.stmts.v = append(tx.stmts.v, txs) + tx.stmts.Unlock() + return txs } // Exec executes a query that doesn't return rows. @@ -1333,15 +1373,12 @@ func (s *Stmt) connStmt() (ci *driverConn, releaseConn func(error), si driver.St return ci, releaseConn, s.txsi.si, nil } - var cs connStmt - match := false for i := 0; i < len(s.css); i++ { v := s.css[i] _, err := s.db.connIfFree(v.dc) if err == nil { - match = true - cs = v - break + s.mu.Unlock() + return v.dc, v.dc.releaseConn, v.si, nil } if err == errConnClosed { // Lazily remove dead conn from our freelist. @@ -1353,28 +1390,41 @@ func (s *Stmt) connStmt() (ci *driverConn, releaseConn func(error), si driver.St } s.mu.Unlock() - // Make a new conn if all are busy. - // TODO(bradfitz): or wait for one? make configurable later? - if !match { - dc, err := s.db.conn() - if err != nil { - return nil, nil, nil, err - } - dc.Lock() - si, err := dc.prepareLocked(s.query) - dc.Unlock() - if err != nil { - s.db.putConn(dc, err) - return nil, nil, nil, err + // If all connections are busy, either wait for one to become available (if + // we've already hit the maximum number of open connections) or create a + // new one. + // + // TODO(bradfitz): or always wait for one? make configurable later? + dc, err := s.db.conn() + if err != nil { + return nil, nil, nil, err + } + + // Do another pass over the list to see whether this statement has + // already been prepared on the connection assigned to us. + s.mu.Lock() + for _, v := range s.css { + if v.dc == dc { + s.mu.Unlock() + return dc, dc.releaseConn, v.si, nil } - s.mu.Lock() - cs = connStmt{dc, si} - s.css = append(s.css, cs) - s.mu.Unlock() } + s.mu.Unlock() + + // No luck; we need to prepare the statement on this connection + dc.Lock() + si, err = dc.prepareLocked(s.query) + dc.Unlock() + if err != nil { + s.db.putConn(dc, err) + return nil, nil, nil, err + } + s.mu.Lock() + cs := connStmt{dc, si} + s.css = append(s.css, cs) + s.mu.Unlock() - conn := cs.dc - return conn, conn.releaseConn, cs.si, nil + return dc, dc.releaseConn, si, nil } // Query executes a prepared query statement with the given arguments diff --git a/libgo/go/database/sql/sql_test.go b/libgo/go/database/sql/sql_test.go index 7971f149174..34efdf254c6 100644 --- a/libgo/go/database/sql/sql_test.go +++ b/libgo/go/database/sql/sql_test.go @@ -24,7 +24,14 @@ func init() { } freedFrom := make(map[dbConn]string) putConnHook = func(db *DB, c *driverConn) { - if c.listElem != nil { + idx := -1 + for i, v := range db.freeConn { + if v == c { + idx = i + break + } + } + if idx >= 0 { // print before panic, as panic may get lost due to conflicting panic // (all goroutines asleep) elsewhere, since we might not unlock // the mutex in freeConn here. @@ -79,15 +86,14 @@ func closeDB(t testing.TB, db *DB) { t.Errorf("Error closing fakeConn: %v", err) } }) - for node, i := db.freeConn.Front(), 0; node != nil; node, i = node.Next(), i+1 { - dc := node.Value.(*driverConn) + for i, dc := range db.freeConn { if n := len(dc.openStmt); n > 0 { // Just a sanity check. This is legal in // general, but if we make the tests clean up // their statements first, then we can safely // verify this is always zero here, and any // other value is a leak. - t.Errorf("while closing db, freeConn %d/%d had %d open stmts; want 0", i, db.freeConn.Len(), n) + t.Errorf("while closing db, freeConn %d/%d had %d open stmts; want 0", i, len(db.freeConn), n) } } err := db.Close() @@ -105,10 +111,10 @@ func closeDB(t testing.TB, db *DB) { // numPrepares assumes that db has exactly 1 idle conn and returns // its count of calls to Prepare func numPrepares(t *testing.T, db *DB) int { - if n := db.freeConn.Len(); n != 1 { + if n := len(db.freeConn); n != 1 { t.Fatalf("free conns = %d; want 1", n) } - return (db.freeConn.Front().Value.(*driverConn)).ci.(*fakeConn).numPrepare + return db.freeConn[0].ci.(*fakeConn).numPrepare } func (db *DB) numDeps() int { @@ -133,7 +139,7 @@ func (db *DB) numDepsPollUntil(want int, d time.Duration) int { func (db *DB) numFreeConns() int { db.mu.Lock() defer db.mu.Unlock() - return db.freeConn.Len() + return len(db.freeConn) } func (db *DB) dumpDeps(t *testing.T) { @@ -435,6 +441,33 @@ func TestExec(t *testing.T) { } } +func TestTxPrepare(t *testing.T) { + db := newTestDB(t, "") + defer closeDB(t, db) + exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool") + tx, err := db.Begin() + if err != nil { + t.Fatalf("Begin = %v", err) + } + stmt, err := tx.Prepare("INSERT|t1|name=?,age=?") + if err != nil { + t.Fatalf("Stmt, err = %v, %v", stmt, err) + } + defer stmt.Close() + _, err = stmt.Exec("Bobby", 7) + if err != nil { + t.Fatalf("Exec = %v", err) + } + err = tx.Commit() + if err != nil { + t.Fatalf("Commit = %v", err) + } + // Commit() should have closed the statement + if !stmt.closed { + t.Fatal("Stmt not closed after Commit") + } +} + func TestTxStmt(t *testing.T) { db := newTestDB(t, "") defer closeDB(t, db) @@ -458,6 +491,10 @@ func TestTxStmt(t *testing.T) { if err != nil { t.Fatalf("Commit = %v", err) } + // Commit() should have closed the statement + if !txs.closed { + t.Fatal("Stmt not closed after Commit") + } } // Issue: http://golang.org/issue/2784 @@ -650,10 +687,10 @@ func TestQueryRowClosingStmt(t *testing.T) { if err != nil { t.Fatal(err) } - if db.freeConn.Len() != 1 { + if len(db.freeConn) != 1 { t.Fatalf("expected 1 free conn") } - fakeConn := (db.freeConn.Front().Value.(*driverConn)).ci.(*fakeConn) + fakeConn := db.freeConn[0].ci.(*fakeConn) if made, closed := fakeConn.stmtsMade, fakeConn.stmtsClosed; made != closed { t.Errorf("statement close mismatch: made %d, closed %d", made, closed) } @@ -878,13 +915,13 @@ func TestMaxIdleConns(t *testing.T) { t.Fatal(err) } tx.Commit() - if got := db.freeConn.Len(); got != 1 { + if got := len(db.freeConn); got != 1 { t.Errorf("freeConns = %d; want 1", got) } db.SetMaxIdleConns(0) - if got := db.freeConn.Len(); got != 0 { + if got := len(db.freeConn); got != 0 { t.Errorf("freeConns after set to zero = %d; want 0", got) } @@ -893,7 +930,7 @@ func TestMaxIdleConns(t *testing.T) { t.Fatal(err) } tx.Commit() - if got := db.freeConn.Len(); got != 0 { + if got := len(db.freeConn); got != 0 { t.Errorf("freeConns = %d; want 0", got) } } @@ -1180,10 +1217,10 @@ func TestCloseConnBeforeStmts(t *testing.T) { t.Fatal(err) } - if db.freeConn.Len() != 1 { - t.Fatalf("expected 1 freeConn; got %d", db.freeConn.Len()) + if len(db.freeConn) != 1 { + t.Fatalf("expected 1 freeConn; got %d", len(db.freeConn)) } - dc := db.freeConn.Front().Value.(*driverConn) + dc := db.freeConn[0] if dc.closed { t.Errorf("conn shouldn't be closed") } @@ -1342,6 +1379,11 @@ func TestErrBadConnReconnect(t *testing.T) { return nil }) + // Provide a way to force a re-prepare of a statement on next execution + forcePrepare := func(stmt *Stmt) { + stmt.css = nil + } + // stmt.Exec stmt1, err := db.Prepare("INSERT|t1|name=?,age=?,dead=?") if err != nil { @@ -1349,9 +1391,7 @@ func TestErrBadConnReconnect(t *testing.T) { } defer stmt1.Close() // make sure we must prepare the stmt first - for _, cs := range stmt1.css { - cs.dc.inUse = true - } + forcePrepare(stmt1) stmtExec := func() error { _, err := stmt1.Exec("Gopher", 3, false) @@ -1367,9 +1407,7 @@ func TestErrBadConnReconnect(t *testing.T) { } defer stmt2.Close() // make sure we must prepare the stmt first - for _, cs := range stmt2.css { - cs.dc.inUse = true - } + forcePrepare(stmt2) stmtQuery := func() error { rows, err := stmt2.Query() @@ -1708,7 +1746,7 @@ func doConcurrentTest(t testing.TB, ct concurrentTest) { for i := 0; i < maxProcs*2; i++ { go func() { - for _ = range reqs { + for range reqs { err := ct.test(t) if err != nil { wg.Done() @@ -1750,7 +1788,7 @@ func manyConcurrentQueries(t testing.TB) { for i := 0; i < maxProcs*2; i++ { go func() { - for _ = range reqs { + for range reqs { rows, err := stmt.Query() if err != nil { t.Errorf("error on query: %v", err) diff --git a/libgo/go/debug/dwarf/type.go b/libgo/go/debug/dwarf/type.go index 68866d0b7bf..6986b19e722 100644 --- a/libgo/go/debug/dwarf/type.go +++ b/libgo/go/debug/dwarf/type.go @@ -88,6 +88,11 @@ type AddrType struct { BasicType } +// An UnspecifiedType represents an implicit, unknown, ambiguous or nonexistent type. +type UnspecifiedType struct { + BasicType +} + // qualifiers // A QualType represents a type that has the C/C++ "const", "restrict", or "volatile" qualifier. @@ -113,7 +118,12 @@ func (t *ArrayType) String() string { return "[" + strconv.FormatInt(t.Count, 10) + "]" + t.Type.String() } -func (t *ArrayType) Size() int64 { return t.Count * t.Type.Size() } +func (t *ArrayType) Size() int64 { + if t.Count == -1 { + return 0 + } + return t.Count * t.Type.Size() +} // A VoidType represents the C void type. type VoidType struct { @@ -364,32 +374,36 @@ func (d *Data) readType(name string, r typeReader, off Offset, typeCache map[Off t.StrideBitSize, _ = e.Val(AttrStrideSize).(int64) // Accumulate dimensions, - ndim := 0 + var dims []int64 for kid := next(); kid != nil; kid = next() { // TODO(rsc): Can also be TagEnumerationType // but haven't seen that in the wild yet. switch kid.Tag { case TagSubrangeType: - max, ok := kid.Val(AttrUpperBound).(int64) + count, ok := kid.Val(AttrCount).(int64) if !ok { - max = -2 // Count == -1, as in x[]. - } - if ndim == 0 { - t.Count = max + 1 - } else { - // Multidimensional array. - // Create new array type underneath this one. - t.Type = &ArrayType{Type: t.Type, Count: max + 1} + // Old binaries may have an upper bound instead. + count, ok = kid.Val(AttrUpperBound).(int64) + if ok { + count++ // Length is one more than upper bound. + } else if len(dims) == 0 { + count = -1 // As in x[]. + } } - ndim++ + dims = append(dims, count) case TagEnumerationType: err = DecodeError{name, kid.Offset, "cannot handle enumeration type as array bound"} goto Error } } - if ndim == 0 { + if len(dims) == 0 { // LLVM generates this for x[]. - t.Count = -1 + dims = []int64{-1} + } + + t.Count = dims[0] + for i := len(dims) - 1; i >= 1; i-- { + t.Type = &ArrayType{Type: t.Type, Count: dims[i]} } case TagBaseType: @@ -417,6 +431,17 @@ func (d *Data) readType(name string, r typeReader, off Offset, typeCache map[Off typ = new(BoolType) case encComplexFloat: typ = new(ComplexType) + if name == "complex" { + // clang writes out 'complex' instead of 'complex float' or 'complex double'. + // clang also writes out a byte size that we can use to distinguish. + // See issue 8694. + switch byteSize, _ := e.Val(AttrByteSize).(int64); byteSize { + case 8: + name = "complex float" + case 16: + name = "complex double" + } + } case encFloat: typ = new(FloatType) case encSigned: @@ -465,7 +490,7 @@ func (d *Data) readType(name string, r typeReader, off Offset, typeCache map[Off t.StructName, _ = e.Val(AttrName).(string) t.Incomplete = e.Val(AttrDeclaration) != nil t.Field = make([]*StructField, 0, 8) - var lastFieldType Type + var lastFieldType *Type var lastFieldBitOffset int64 for kid := next(); kid != nil; kid = next() { if kid.Tag == TagMember { @@ -507,7 +532,7 @@ func (d *Data) readType(name string, r typeReader, off Offset, typeCache map[Off // (DWARF writes out 0-length arrays as if they were 1-length arrays.) zeroArray(lastFieldType) } - lastFieldType = f.Type + lastFieldType = &f.Type lastFieldBitOffset = bito } } @@ -624,6 +649,15 @@ func (d *Data) readType(name string, r typeReader, off Offset, typeCache map[Off typeCache[off] = t t.Name, _ = e.Val(AttrName).(string) t.Type = typeOf(e) + + case TagUnspecifiedType: + // Unspecified type (DWARF v3 §5.2) + // Attributes: + // AttrName: name + t := new(UnspecifiedType) + typ = t + typeCache[off] = t + t.Name, _ = e.Val(AttrName).(string) } if err != nil { @@ -647,13 +681,16 @@ Error: return nil, err } -func zeroArray(t Type) { - for { - at, ok := t.(*ArrayType) - if !ok { - break - } - at.Count = 0 - t = at.Type +func zeroArray(t *Type) { + if t == nil { + return + } + at, ok := (*t).(*ArrayType) + if !ok || at.Type.Size() == 0 { + return } + // Make a copy to avoid invalidating typeCache. + tt := *at + tt.Count = 0 + *t = &tt } diff --git a/libgo/go/debug/elf/elf.go b/libgo/go/debug/elf/elf.go index a7986a57535..a9466bbdcd6 100644 --- a/libgo/go/debug/elf/elf.go +++ b/libgo/go/debug/elf/elf.go @@ -11,6 +11,7 @@ * $FreeBSD: src/sys/i386/include/elf.h,v 1.16 2004/08/02 19:12:17 dfr Exp $ * $FreeBSD: src/sys/powerpc/include/elf.h,v 1.7 2004/11/02 09:47:01 ssouhlal Exp $ * $FreeBSD: src/sys/sparc64/include/elf.h,v 1.12 2003/09/25 01:10:26 peter Exp $ + * "ELF for the ARM® 64-bit Architecture (AArch64)" (ARM IHI 0056B) * * Copyright (c) 1996-1998 John D. Polstra. All rights reserved. * Copyright (c) 2001 David E. O'Brien @@ -192,49 +193,50 @@ func (i Type) GoString() string { return stringName(uint32(i), typeStrings, true type Machine uint16 const ( - EM_NONE Machine = 0 /* Unknown machine. */ - EM_M32 Machine = 1 /* AT&T WE32100. */ - EM_SPARC Machine = 2 /* Sun SPARC. */ - EM_386 Machine = 3 /* Intel i386. */ - EM_68K Machine = 4 /* Motorola 68000. */ - EM_88K Machine = 5 /* Motorola 88000. */ - EM_860 Machine = 7 /* Intel i860. */ - EM_MIPS Machine = 8 /* MIPS R3000 Big-Endian only. */ - EM_S370 Machine = 9 /* IBM System/370. */ - EM_MIPS_RS3_LE Machine = 10 /* MIPS R3000 Little-Endian. */ - EM_PARISC Machine = 15 /* HP PA-RISC. */ - EM_VPP500 Machine = 17 /* Fujitsu VPP500. */ - EM_SPARC32PLUS Machine = 18 /* SPARC v8plus. */ - EM_960 Machine = 19 /* Intel 80960. */ - EM_PPC Machine = 20 /* PowerPC 32-bit. */ - EM_PPC64 Machine = 21 /* PowerPC 64-bit. */ - EM_S390 Machine = 22 /* IBM System/390. */ - EM_V800 Machine = 36 /* NEC V800. */ - EM_FR20 Machine = 37 /* Fujitsu FR20. */ - EM_RH32 Machine = 38 /* TRW RH-32. */ - EM_RCE Machine = 39 /* Motorola RCE. */ - EM_ARM Machine = 40 /* ARM. */ - EM_SH Machine = 42 /* Hitachi SH. */ - EM_SPARCV9 Machine = 43 /* SPARC v9 64-bit. */ - EM_TRICORE Machine = 44 /* Siemens TriCore embedded processor. */ - EM_ARC Machine = 45 /* Argonaut RISC Core. */ - EM_H8_300 Machine = 46 /* Hitachi H8/300. */ - EM_H8_300H Machine = 47 /* Hitachi H8/300H. */ - EM_H8S Machine = 48 /* Hitachi H8S. */ - EM_H8_500 Machine = 49 /* Hitachi H8/500. */ - EM_IA_64 Machine = 50 /* Intel IA-64 Processor. */ - EM_MIPS_X Machine = 51 /* Stanford MIPS-X. */ - EM_COLDFIRE Machine = 52 /* Motorola ColdFire. */ - EM_68HC12 Machine = 53 /* Motorola M68HC12. */ - EM_MMA Machine = 54 /* Fujitsu MMA. */ - EM_PCP Machine = 55 /* Siemens PCP. */ - EM_NCPU Machine = 56 /* Sony nCPU. */ - EM_NDR1 Machine = 57 /* Denso NDR1 microprocessor. */ - EM_STARCORE Machine = 58 /* Motorola Star*Core processor. */ - EM_ME16 Machine = 59 /* Toyota ME16 processor. */ - EM_ST100 Machine = 60 /* STMicroelectronics ST100 processor. */ - EM_TINYJ Machine = 61 /* Advanced Logic Corp. TinyJ processor. */ - EM_X86_64 Machine = 62 /* Advanced Micro Devices x86-64 */ + EM_NONE Machine = 0 /* Unknown machine. */ + EM_M32 Machine = 1 /* AT&T WE32100. */ + EM_SPARC Machine = 2 /* Sun SPARC. */ + EM_386 Machine = 3 /* Intel i386. */ + EM_68K Machine = 4 /* Motorola 68000. */ + EM_88K Machine = 5 /* Motorola 88000. */ + EM_860 Machine = 7 /* Intel i860. */ + EM_MIPS Machine = 8 /* MIPS R3000 Big-Endian only. */ + EM_S370 Machine = 9 /* IBM System/370. */ + EM_MIPS_RS3_LE Machine = 10 /* MIPS R3000 Little-Endian. */ + EM_PARISC Machine = 15 /* HP PA-RISC. */ + EM_VPP500 Machine = 17 /* Fujitsu VPP500. */ + EM_SPARC32PLUS Machine = 18 /* SPARC v8plus. */ + EM_960 Machine = 19 /* Intel 80960. */ + EM_PPC Machine = 20 /* PowerPC 32-bit. */ + EM_PPC64 Machine = 21 /* PowerPC 64-bit. */ + EM_S390 Machine = 22 /* IBM System/390. */ + EM_V800 Machine = 36 /* NEC V800. */ + EM_FR20 Machine = 37 /* Fujitsu FR20. */ + EM_RH32 Machine = 38 /* TRW RH-32. */ + EM_RCE Machine = 39 /* Motorola RCE. */ + EM_ARM Machine = 40 /* ARM. */ + EM_SH Machine = 42 /* Hitachi SH. */ + EM_SPARCV9 Machine = 43 /* SPARC v9 64-bit. */ + EM_TRICORE Machine = 44 /* Siemens TriCore embedded processor. */ + EM_ARC Machine = 45 /* Argonaut RISC Core. */ + EM_H8_300 Machine = 46 /* Hitachi H8/300. */ + EM_H8_300H Machine = 47 /* Hitachi H8/300H. */ + EM_H8S Machine = 48 /* Hitachi H8S. */ + EM_H8_500 Machine = 49 /* Hitachi H8/500. */ + EM_IA_64 Machine = 50 /* Intel IA-64 Processor. */ + EM_MIPS_X Machine = 51 /* Stanford MIPS-X. */ + EM_COLDFIRE Machine = 52 /* Motorola ColdFire. */ + EM_68HC12 Machine = 53 /* Motorola M68HC12. */ + EM_MMA Machine = 54 /* Fujitsu MMA. */ + EM_PCP Machine = 55 /* Siemens PCP. */ + EM_NCPU Machine = 56 /* Sony nCPU. */ + EM_NDR1 Machine = 57 /* Denso NDR1 microprocessor. */ + EM_STARCORE Machine = 58 /* Motorola Star*Core processor. */ + EM_ME16 Machine = 59 /* Toyota ME16 processor. */ + EM_ST100 Machine = 60 /* STMicroelectronics ST100 processor. */ + EM_TINYJ Machine = 61 /* Advanced Logic Corp. TinyJ processor. */ + EM_X86_64 Machine = 62 /* Advanced Micro Devices x86-64 */ + EM_AARCH64 Machine = 183 /* ARM 64-bit Architecture (AArch64) */ /* Non-standard or deprecated. */ EM_486 Machine = 6 /* Intel i486. */ @@ -774,6 +776,256 @@ var rx86_64Strings = []intName{ func (i R_X86_64) String() string { return stringName(uint32(i), rx86_64Strings, false) } func (i R_X86_64) GoString() string { return stringName(uint32(i), rx86_64Strings, true) } +// Relocation types for AArch64 (aka arm64) +type R_AARCH64 int + +const ( + R_AARCH64_NONE R_AARCH64 = 0 + R_AARCH64_P32_ABS32 R_AARCH64 = 1 + R_AARCH64_P32_ABS16 R_AARCH64 = 2 + R_AARCH64_P32_PREL32 R_AARCH64 = 3 + R_AARCH64_P32_PREL16 R_AARCH64 = 4 + R_AARCH64_P32_MOVW_UABS_G0 R_AARCH64 = 5 + R_AARCH64_P32_MOVW_UABS_G0_NC R_AARCH64 = 6 + R_AARCH64_P32_MOVW_UABS_G1 R_AARCH64 = 7 + R_AARCH64_P32_MOVW_SABS_G0 R_AARCH64 = 8 + R_AARCH64_P32_LD_PREL_LO19 R_AARCH64 = 9 + R_AARCH64_P32_ADR_PREL_LO21 R_AARCH64 = 10 + R_AARCH64_P32_ADR_PREL_PG_HI21 R_AARCH64 = 11 + R_AARCH64_P32_ADD_ABS_LO12_NC R_AARCH64 = 12 + R_AARCH64_P32_LDST8_ABS_LO12_NC R_AARCH64 = 13 + R_AARCH64_P32_LDST16_ABS_LO12_NC R_AARCH64 = 14 + R_AARCH64_P32_LDST32_ABS_LO12_NC R_AARCH64 = 15 + R_AARCH64_P32_LDST64_ABS_LO12_NC R_AARCH64 = 16 + R_AARCH64_P32_LDST128_ABS_LO12_NC R_AARCH64 = 17 + R_AARCH64_P32_TSTBR14 R_AARCH64 = 18 + R_AARCH64_P32_CONDBR19 R_AARCH64 = 19 + R_AARCH64_P32_JUMP26 R_AARCH64 = 20 + R_AARCH64_P32_CALL26 R_AARCH64 = 21 + R_AARCH64_P32_GOT_LD_PREL19 R_AARCH64 = 25 + R_AARCH64_P32_ADR_GOT_PAGE R_AARCH64 = 26 + R_AARCH64_P32_LD32_GOT_LO12_NC R_AARCH64 = 27 + R_AARCH64_P32_TLSGD_ADR_PAGE21 R_AARCH64 = 81 + R_AARCH64_P32_TLSGD_ADD_LO12_NC R_AARCH64 = 82 + R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21 R_AARCH64 = 103 + R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC R_AARCH64 = 104 + R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19 R_AARCH64 = 105 + R_AARCH64_P32_TLSLE_MOVW_TPREL_G1 R_AARCH64 = 106 + R_AARCH64_P32_TLSLE_MOVW_TPREL_G0 R_AARCH64 = 107 + R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC R_AARCH64 = 108 + R_AARCH64_P32_TLSLE_ADD_TPREL_HI12 R_AARCH64 = 109 + R_AARCH64_P32_TLSLE_ADD_TPREL_LO12 R_AARCH64 = 110 + R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC R_AARCH64 = 111 + R_AARCH64_P32_TLSDESC_LD_PREL19 R_AARCH64 = 122 + R_AARCH64_P32_TLSDESC_ADR_PREL21 R_AARCH64 = 123 + R_AARCH64_P32_TLSDESC_ADR_PAGE21 R_AARCH64 = 124 + R_AARCH64_P32_TLSDESC_LD32_LO12_NC R_AARCH64 = 125 + R_AARCH64_P32_TLSDESC_ADD_LO12_NC R_AARCH64 = 126 + R_AARCH64_P32_TLSDESC_CALL R_AARCH64 = 127 + R_AARCH64_P32_COPY R_AARCH64 = 180 + R_AARCH64_P32_GLOB_DAT R_AARCH64 = 181 + R_AARCH64_P32_JUMP_SLOT R_AARCH64 = 182 + R_AARCH64_P32_RELATIVE R_AARCH64 = 183 + R_AARCH64_P32_TLS_DTPMOD R_AARCH64 = 184 + R_AARCH64_P32_TLS_DTPREL R_AARCH64 = 185 + R_AARCH64_P32_TLS_TPREL R_AARCH64 = 186 + R_AARCH64_P32_TLSDESC R_AARCH64 = 187 + R_AARCH64_P32_IRELATIVE R_AARCH64 = 188 + R_AARCH64_NULL R_AARCH64 = 256 + R_AARCH64_ABS64 R_AARCH64 = 257 + R_AARCH64_ABS32 R_AARCH64 = 258 + R_AARCH64_ABS16 R_AARCH64 = 259 + R_AARCH64_PREL64 R_AARCH64 = 260 + R_AARCH64_PREL32 R_AARCH64 = 261 + R_AARCH64_PREL16 R_AARCH64 = 262 + R_AARCH64_MOVW_UABS_G0 R_AARCH64 = 263 + R_AARCH64_MOVW_UABS_G0_NC R_AARCH64 = 264 + R_AARCH64_MOVW_UABS_G1 R_AARCH64 = 265 + R_AARCH64_MOVW_UABS_G1_NC R_AARCH64 = 266 + R_AARCH64_MOVW_UABS_G2 R_AARCH64 = 267 + R_AARCH64_MOVW_UABS_G2_NC R_AARCH64 = 268 + R_AARCH64_MOVW_UABS_G3 R_AARCH64 = 269 + R_AARCH64_MOVW_SABS_G0 R_AARCH64 = 270 + R_AARCH64_MOVW_SABS_G1 R_AARCH64 = 271 + R_AARCH64_MOVW_SABS_G2 R_AARCH64 = 272 + R_AARCH64_LD_PREL_LO19 R_AARCH64 = 273 + R_AARCH64_ADR_PREL_LO21 R_AARCH64 = 274 + R_AARCH64_ADR_PREL_PG_HI21 R_AARCH64 = 275 + R_AARCH64_ADR_PREL_PG_HI21_NC R_AARCH64 = 276 + R_AARCH64_ADD_ABS_LO12_NC R_AARCH64 = 277 + R_AARCH64_LDST8_ABS_LO12_NC R_AARCH64 = 278 + R_AARCH64_TSTBR14 R_AARCH64 = 279 + R_AARCH64_CONDBR19 R_AARCH64 = 280 + R_AARCH64_JUMP26 R_AARCH64 = 282 + R_AARCH64_CALL26 R_AARCH64 = 283 + R_AARCH64_LDST16_ABS_LO12_NC R_AARCH64 = 284 + R_AARCH64_LDST32_ABS_LO12_NC R_AARCH64 = 285 + R_AARCH64_LDST64_ABS_LO12_NC R_AARCH64 = 286 + R_AARCH64_LDST128_ABS_LO12_NC R_AARCH64 = 299 + R_AARCH64_GOT_LD_PREL19 R_AARCH64 = 309 + R_AARCH64_ADR_GOT_PAGE R_AARCH64 = 311 + R_AARCH64_LD64_GOT_LO12_NC R_AARCH64 = 312 + R_AARCH64_TLSGD_ADR_PAGE21 R_AARCH64 = 513 + R_AARCH64_TLSGD_ADD_LO12_NC R_AARCH64 = 514 + R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 R_AARCH64 = 539 + R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC R_AARCH64 = 540 + R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 R_AARCH64 = 541 + R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC R_AARCH64 = 542 + R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 R_AARCH64 = 543 + R_AARCH64_TLSLE_MOVW_TPREL_G2 R_AARCH64 = 544 + R_AARCH64_TLSLE_MOVW_TPREL_G1 R_AARCH64 = 545 + R_AARCH64_TLSLE_MOVW_TPREL_G1_NC R_AARCH64 = 546 + R_AARCH64_TLSLE_MOVW_TPREL_G0 R_AARCH64 = 547 + R_AARCH64_TLSLE_MOVW_TPREL_G0_NC R_AARCH64 = 548 + R_AARCH64_TLSLE_ADD_TPREL_HI12 R_AARCH64 = 549 + R_AARCH64_TLSLE_ADD_TPREL_LO12 R_AARCH64 = 550 + R_AARCH64_TLSLE_ADD_TPREL_LO12_NC R_AARCH64 = 551 + R_AARCH64_TLSDESC_LD_PREL19 R_AARCH64 = 560 + R_AARCH64_TLSDESC_ADR_PREL21 R_AARCH64 = 561 + R_AARCH64_TLSDESC_ADR_PAGE21 R_AARCH64 = 562 + R_AARCH64_TLSDESC_LD64_LO12_NC R_AARCH64 = 563 + R_AARCH64_TLSDESC_ADD_LO12_NC R_AARCH64 = 564 + R_AARCH64_TLSDESC_OFF_G1 R_AARCH64 = 565 + R_AARCH64_TLSDESC_OFF_G0_NC R_AARCH64 = 566 + R_AARCH64_TLSDESC_LDR R_AARCH64 = 567 + R_AARCH64_TLSDESC_ADD R_AARCH64 = 568 + R_AARCH64_TLSDESC_CALL R_AARCH64 = 569 + R_AARCH64_COPY R_AARCH64 = 1024 + R_AARCH64_GLOB_DAT R_AARCH64 = 1025 + R_AARCH64_JUMP_SLOT R_AARCH64 = 1026 + R_AARCH64_RELATIVE R_AARCH64 = 1027 + R_AARCH64_TLS_DTPMOD64 R_AARCH64 = 1028 + R_AARCH64_TLS_DTPREL64 R_AARCH64 = 1029 + R_AARCH64_TLS_TPREL64 R_AARCH64 = 1030 + R_AARCH64_TLSDESC R_AARCH64 = 1031 + R_AARCH64_IRELATIVE R_AARCH64 = 1032 +) + +var raarch64Strings = []intName{ + {0, "R_AARCH64_NONE"}, + {1, "R_AARCH64_P32_ABS32"}, + {2, "R_AARCH64_P32_ABS16"}, + {3, "R_AARCH64_P32_PREL32"}, + {4, "R_AARCH64_P32_PREL16"}, + {5, "R_AARCH64_P32_MOVW_UABS_G0"}, + {6, "R_AARCH64_P32_MOVW_UABS_G0_NC"}, + {7, "R_AARCH64_P32_MOVW_UABS_G1"}, + {8, "R_AARCH64_P32_MOVW_SABS_G0"}, + {9, "R_AARCH64_P32_LD_PREL_LO19"}, + {10, "R_AARCH64_P32_ADR_PREL_LO21"}, + {11, "R_AARCH64_P32_ADR_PREL_PG_HI21"}, + {12, "R_AARCH64_P32_ADD_ABS_LO12_NC"}, + {13, "R_AARCH64_P32_LDST8_ABS_LO12_NC"}, + {14, "R_AARCH64_P32_LDST16_ABS_LO12_NC"}, + {15, "R_AARCH64_P32_LDST32_ABS_LO12_NC"}, + {16, "R_AARCH64_P32_LDST64_ABS_LO12_NC"}, + {17, "R_AARCH64_P32_LDST128_ABS_LO12_NC"}, + {18, "R_AARCH64_P32_TSTBR14"}, + {19, "R_AARCH64_P32_CONDBR19"}, + {20, "R_AARCH64_P32_JUMP26"}, + {21, "R_AARCH64_P32_CALL26"}, + {25, "R_AARCH64_P32_GOT_LD_PREL19"}, + {26, "R_AARCH64_P32_ADR_GOT_PAGE"}, + {27, "R_AARCH64_P32_LD32_GOT_LO12_NC"}, + {81, "R_AARCH64_P32_TLSGD_ADR_PAGE21"}, + {82, "R_AARCH64_P32_TLSGD_ADD_LO12_NC"}, + {103, "R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21"}, + {104, "R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC"}, + {105, "R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19"}, + {106, "R_AARCH64_P32_TLSLE_MOVW_TPREL_G1"}, + {107, "R_AARCH64_P32_TLSLE_MOVW_TPREL_G0"}, + {108, "R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC"}, + {109, "R_AARCH64_P32_TLSLE_ADD_TPREL_HI12"}, + {110, "R_AARCH64_P32_TLSLE_ADD_TPREL_LO12"}, + {111, "R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC"}, + {122, "R_AARCH64_P32_TLSDESC_LD_PREL19"}, + {123, "R_AARCH64_P32_TLSDESC_ADR_PREL21"}, + {124, "R_AARCH64_P32_TLSDESC_ADR_PAGE21"}, + {125, "R_AARCH64_P32_TLSDESC_LD32_LO12_NC"}, + {126, "R_AARCH64_P32_TLSDESC_ADD_LO12_NC"}, + {127, "R_AARCH64_P32_TLSDESC_CALL"}, + {180, "R_AARCH64_P32_COPY"}, + {181, "R_AARCH64_P32_GLOB_DAT"}, + {182, "R_AARCH64_P32_JUMP_SLOT"}, + {183, "R_AARCH64_P32_RELATIVE"}, + {184, "R_AARCH64_P32_TLS_DTPMOD"}, + {185, "R_AARCH64_P32_TLS_DTPREL"}, + {186, "R_AARCH64_P32_TLS_TPREL"}, + {187, "R_AARCH64_P32_TLSDESC"}, + {188, "R_AARCH64_P32_IRELATIVE"}, + {256, "R_AARCH64_NULL"}, + {257, "R_AARCH64_ABS64"}, + {258, "R_AARCH64_ABS32"}, + {259, "R_AARCH64_ABS16"}, + {260, "R_AARCH64_PREL64"}, + {261, "R_AARCH64_PREL32"}, + {262, "R_AARCH64_PREL16"}, + {263, "R_AARCH64_MOVW_UABS_G0"}, + {264, "R_AARCH64_MOVW_UABS_G0_NC"}, + {265, "R_AARCH64_MOVW_UABS_G1"}, + {266, "R_AARCH64_MOVW_UABS_G1_NC"}, + {267, "R_AARCH64_MOVW_UABS_G2"}, + {268, "R_AARCH64_MOVW_UABS_G2_NC"}, + {269, "R_AARCH64_MOVW_UABS_G3"}, + {270, "R_AARCH64_MOVW_SABS_G0"}, + {271, "R_AARCH64_MOVW_SABS_G1"}, + {272, "R_AARCH64_MOVW_SABS_G2"}, + {273, "R_AARCH64_LD_PREL_LO19"}, + {274, "R_AARCH64_ADR_PREL_LO21"}, + {275, "R_AARCH64_ADR_PREL_PG_HI21"}, + {276, "R_AARCH64_ADR_PREL_PG_HI21_NC"}, + {277, "R_AARCH64_ADD_ABS_LO12_NC"}, + {278, "R_AARCH64_LDST8_ABS_LO12_NC"}, + {279, "R_AARCH64_TSTBR14"}, + {280, "R_AARCH64_CONDBR19"}, + {282, "R_AARCH64_JUMP26"}, + {283, "R_AARCH64_CALL26"}, + {284, "R_AARCH64_LDST16_ABS_LO12_NC"}, + {285, "R_AARCH64_LDST32_ABS_LO12_NC"}, + {286, "R_AARCH64_LDST64_ABS_LO12_NC"}, + {299, "R_AARCH64_LDST128_ABS_LO12_NC"}, + {309, "R_AARCH64_GOT_LD_PREL19"}, + {311, "R_AARCH64_ADR_GOT_PAGE"}, + {312, "R_AARCH64_LD64_GOT_LO12_NC"}, + {513, "R_AARCH64_TLSGD_ADR_PAGE21"}, + {514, "R_AARCH64_TLSGD_ADD_LO12_NC"}, + {539, "R_AARCH64_TLSIE_MOVW_GOTTPREL_G1"}, + {540, "R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC"}, + {541, "R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21"}, + {542, "R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC"}, + {543, "R_AARCH64_TLSIE_LD_GOTTPREL_PREL19"}, + {544, "R_AARCH64_TLSLE_MOVW_TPREL_G2"}, + {545, "R_AARCH64_TLSLE_MOVW_TPREL_G1"}, + {546, "R_AARCH64_TLSLE_MOVW_TPREL_G1_NC"}, + {547, "R_AARCH64_TLSLE_MOVW_TPREL_G0"}, + {548, "R_AARCH64_TLSLE_MOVW_TPREL_G0_NC"}, + {549, "R_AARCH64_TLSLE_ADD_TPREL_HI12"}, + {550, "R_AARCH64_TLSLE_ADD_TPREL_LO12"}, + {551, "R_AARCH64_TLSLE_ADD_TPREL_LO12_NC"}, + {560, "R_AARCH64_TLSDESC_LD_PREL19"}, + {561, "R_AARCH64_TLSDESC_ADR_PREL21"}, + {562, "R_AARCH64_TLSDESC_ADR_PAGE21"}, + {563, "R_AARCH64_TLSDESC_LD64_LO12_NC"}, + {564, "R_AARCH64_TLSDESC_ADD_LO12_NC"}, + {565, "R_AARCH64_TLSDESC_OFF_G1"}, + {566, "R_AARCH64_TLSDESC_OFF_G0_NC"}, + {567, "R_AARCH64_TLSDESC_LDR"}, + {568, "R_AARCH64_TLSDESC_ADD"}, + {569, "R_AARCH64_TLSDESC_CALL"}, + {1024, "R_AARCH64_COPY"}, + {1025, "R_AARCH64_GLOB_DAT"}, + {1026, "R_AARCH64_JUMP_SLOT"}, + {1027, "R_AARCH64_RELATIVE"}, + {1028, "R_AARCH64_TLS_DTPMOD64"}, + {1029, "R_AARCH64_TLS_DTPREL64"}, + {1030, "R_AARCH64_TLS_TPREL64"}, + {1031, "R_AARCH64_TLSDESC"}, + {1032, "R_AARCH64_IRELATIVE"}, +} + +func (i R_AARCH64) String() string { return stringName(uint32(i), raarch64Strings, false) } +func (i R_AARCH64) GoString() string { return stringName(uint32(i), raarch64Strings, true) } + // Relocation types for Alpha. type R_ALPHA int diff --git a/libgo/go/debug/elf/file.go b/libgo/go/debug/elf/file.go index 0c8dff506f3..5a418d81021 100644 --- a/libgo/go/debug/elf/file.go +++ b/libgo/go/debug/elf/file.go @@ -405,10 +405,14 @@ func (f *File) getSymbols(typ SectionType) ([]Symbol, []byte, error) { return nil, nil, errors.New("not implemented") } +// ErrNoSymbols is returned by File.Symbols and File.DynamicSymbols +// if there is no such section in the File. +var ErrNoSymbols = errors.New("no symbol section") + func (f *File) getSymbols32(typ SectionType) ([]Symbol, []byte, error) { symtabSection := f.SectionByType(typ) if symtabSection == nil { - return nil, nil, errors.New("no symbol section") + return nil, nil, ErrNoSymbols } data, err := symtabSection.Data() @@ -451,7 +455,7 @@ func (f *File) getSymbols32(typ SectionType) ([]Symbol, []byte, error) { func (f *File) getSymbols64(typ SectionType) ([]Symbol, []byte, error) { symtabSection := f.SectionByType(typ) if symtabSection == nil { - return nil, nil, errors.New("no symbol section") + return nil, nil, ErrNoSymbols } data, err := symtabSection.Data() @@ -525,6 +529,9 @@ func (f *File) applyRelocations(dst []byte, rels []byte) error { if f.Class == ELFCLASS32 && f.Machine == EM_386 { return f.applyRelocations386(dst, rels) } + if f.Class == ELFCLASS64 && f.Machine == EM_AARCH64 { + return f.applyRelocationsARM64(dst, rels) + } if f.Class == ELFCLASS64 && f.Machine == EM_PPC64 { return f.applyRelocationsPPC64(dst, rels) } @@ -563,6 +570,10 @@ func (f *File) applyRelocationsAMD64(dst []byte, rels []byte) error { continue } + // There are relocations, so this must be a normal + // object file, and we only look at section symbols, + // so we assume that the symbol value is 0. + switch t { case R_X86_64_64: if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { @@ -617,6 +628,55 @@ func (f *File) applyRelocations386(dst []byte, rels []byte) error { return nil } +func (f *File) applyRelocationsARM64(dst []byte, rels []byte) error { + // 24 is the size of Rela64. + if len(rels)%24 != 0 { + return errors.New("length of relocation section is not a multiple of 24") + } + + symbols, _, err := f.getSymbols(SHT_SYMTAB) + if err != nil { + return err + } + + b := bytes.NewReader(rels) + var rela Rela64 + + for b.Len() > 0 { + binary.Read(b, f.ByteOrder, &rela) + symNo := rela.Info >> 32 + t := R_AARCH64(rela.Info & 0xffff) + + if symNo == 0 || symNo > uint64(len(symbols)) { + continue + } + sym := &symbols[symNo-1] + if SymType(sym.Info&0xf) != STT_SECTION { + // We don't handle non-section relocations for now. + continue + } + + // There are relocations, so this must be a normal + // object file, and we only look at section symbols, + // so we assume that the symbol value is 0. + + switch t { + case R_AARCH64_ABS64: + if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { + continue + } + f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], uint64(rela.Addend)) + case R_AARCH64_ABS32: + if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { + continue + } + f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], uint32(rela.Addend)) + } + } + + return nil +} + func (f *File) applyRelocationsPPC64(dst []byte, rels []byte) error { // 24 is the size of Rela64. if len(rels)%24 != 0 { @@ -725,7 +785,7 @@ func (f *File) DWARF() (*dwarf.Data, error) { // If there's a relocation table for .debug_info, we have to process it // now otherwise the data in .debug_info is invalid for x86-64 objects. rela := f.Section(".rela.debug_info") - if rela != nil && rela.Type == SHT_RELA && (f.Machine == EM_X86_64 || f.Machine == EM_PPC64 || f.Machine == EM_S390) { + if rela != nil && rela.Type == SHT_RELA && (f.Machine == EM_X86_64 || f.Machine == EM_AARCH64 || f.Machine == EM_PPC64 || f.Machine == EM_S390) { data, err := rela.Data() if err != nil { return nil, err @@ -790,7 +850,8 @@ func (f *File) DWARF() (*dwarf.Data, error) { return d, nil } -// Symbols returns the symbol table for f. +// Symbols returns the symbol table for f. The symbols will be listed in the order +// they appear in f. // // For compatibility with Go 1.0, Symbols omits the null symbol at index 0. // After retrieving the symbols as symtab, an externally supplied index x @@ -800,6 +861,17 @@ func (f *File) Symbols() ([]Symbol, error) { return sym, err } +// DynamicSymbols returns the dynamic symbol table for f. The symbols +// will be listed in the order they appear in f. +// +// For compatibility with Symbols, DynamicSymbols omits the null symbol at index 0. +// After retrieving the symbols as symtab, an externally supplied index x +// corresponds to symtab[x-1], not symtab[x]. +func (f *File) DynamicSymbols() ([]Symbol, error) { + sym, _, err := f.getSymbols(SHT_DYNSYM) + return sym, err +} + type ImportedSymbol struct { Name string Version string diff --git a/libgo/go/debug/elf/file_test.go b/libgo/go/debug/elf/file_test.go index db83bad2253..ce10da71d2b 100644 --- a/libgo/go/debug/elf/file_test.go +++ b/libgo/go/debug/elf/file_test.go @@ -166,11 +166,11 @@ func TestOpen(t *testing.T) { } else { f, err = Open(tt.file) } - defer f.Close() if err != nil { t.Errorf("cannot open file %s: %v", tt.file, err) continue } + defer f.Close() if !reflect.DeepEqual(f.FileHeader, tt.hdr) { t.Errorf("open %s:\n\thave %#v\n\twant %#v\n", tt.file, f.FileHeader, tt.hdr) continue @@ -267,6 +267,12 @@ var relocationTests = []relocationTest{ }, }, { + "testdata/go-relocation-test-gcc482-aarch64.obj", + []relocationTestEntry{ + {0, &dwarf.Entry{Offset: 0xb, Tag: dwarf.TagCompileUnit, Children: true, Field: []dwarf.Field{{Attr: dwarf.AttrProducer, Val: "GNU C 4.8.2 -g -fstack-protector"}, {Attr: dwarf.AttrLanguage, Val: int64(1)}, {Attr: dwarf.AttrName, Val: "go-relocation-test-gcc482.c"}, {Attr: dwarf.AttrCompDir, Val: "/tmp"}, {Attr: dwarf.AttrLowpc, Val: uint64(0x0)}, {Attr: dwarf.AttrHighpc, Val: int64(0x24)}, {Attr: dwarf.AttrStmtList, Val: int64(0)}}}}, + }, + }, + { "testdata/go-relocation-test-clang-x86.obj", []relocationTestEntry{ {0, &dwarf.Entry{Offset: 0xb, Tag: dwarf.TagCompileUnit, Children: true, Field: []dwarf.Field{{Attr: dwarf.AttrProducer, Val: "clang version google3-trunk (trunk r209387)"}, {Attr: dwarf.AttrLanguage, Val: int64(12)}, {Attr: dwarf.AttrName, Val: "go-relocation-test-clang.c"}, {Attr: dwarf.AttrStmtList, Val: int64(0)}, {Attr: dwarf.AttrCompDir, Val: "/tmp"}}}}, diff --git a/libgo/go/debug/elf/symbols_test.go b/libgo/go/debug/elf/symbols_test.go new file mode 100644 index 00000000000..1b79520e3cc --- /dev/null +++ b/libgo/go/debug/elf/symbols_test.go @@ -0,0 +1,834 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package elf + +import ( + "io" + "path" + "reflect" + "testing" +) + +// TODO: remove duplicate code +func TestSymbols(t *testing.T) { + do := func(file string, ts []Symbol, getfunc func(*File) ([]Symbol, error)) { + var f *File + var err error + if path.Ext(file) == ".gz" { + var r io.ReaderAt + if r, err = decompress(file); err == nil { + f, err = NewFile(r) + } + } else { + f, err = Open(file) + } + if err != nil { + t.Errorf("TestSymbols: cannot open file %s: %v", file, err) + return + } + defer f.Close() + fs, err := getfunc(f) + if err != nil && err != ErrNoSymbols { + t.Error(err) + return + } else if err == ErrNoSymbols { + fs = []Symbol{} + } + if !reflect.DeepEqual(ts, fs) { + t.Errorf("%s: Symbols = %v, want %v", file, ts, fs) + } + } + for file, ts := range symbolsGolden { + do(file, ts, (*File).Symbols) + } + for file, ts := range dynamicSymbolsGolden { + do(file, ts, (*File).DynamicSymbols) + } +} + +// golden symbol table data generated by testdata/getgoldsym.c + +var symbolsGolden = map[string][]Symbol{ + "testdata/gcc-amd64-linux-exec": { + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x1, + Value: 0x400200, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x2, + Value: 0x40021C, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x3, + Value: 0x400240, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x4, + Value: 0x400268, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x5, + Value: 0x400288, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x6, + Value: 0x4002E8, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x7, + Value: 0x400326, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x8, + Value: 0x400330, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x9, + Value: 0x400350, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0xA, + Value: 0x400368, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0xB, + Value: 0x400398, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0xC, + Value: 0x4003B0, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0xD, + Value: 0x4003E0, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0xE, + Value: 0x400594, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0xF, + Value: 0x4005A4, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x10, + Value: 0x4005B8, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x11, + Value: 0x4005E0, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x12, + Value: 0x600688, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x13, + Value: 0x600698, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x14, + Value: 0x6006A8, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x15, + Value: 0x6006B0, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x16, + Value: 0x600850, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x17, + Value: 0x600858, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x18, + Value: 0x600880, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x19, + Value: 0x600898, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x1A, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x1B, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x1C, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x1D, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x1E, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x1F, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x20, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x21, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "init.c", + Info: 0x4, + Other: 0x0, + Section: 0xFFF1, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "initfini.c", + Info: 0x4, + Other: 0x0, + Section: 0xFFF1, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "call_gmon_start", + Info: 0x2, + Other: 0x0, + Section: 0xD, + Value: 0x40040C, + Size: 0x0, + }, + Symbol{ + Name: "crtstuff.c", + Info: 0x4, + Other: 0x0, + Section: 0xFFF1, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "__CTOR_LIST__", + Info: 0x1, + Other: 0x0, + Section: 0x12, + Value: 0x600688, + Size: 0x0, + }, + Symbol{ + Name: "__DTOR_LIST__", + Info: 0x1, + Other: 0x0, + Section: 0x13, + Value: 0x600698, + Size: 0x0, + }, + Symbol{ + Name: "__JCR_LIST__", + Info: 0x1, + Other: 0x0, + Section: 0x14, + Value: 0x6006A8, + Size: 0x0, + }, + Symbol{ + Name: "__do_global_dtors_aux", + Info: 0x2, + Other: 0x0, + Section: 0xD, + Value: 0x400430, + Size: 0x0, + }, + Symbol{ + Name: "completed.6183", + Info: 0x1, + Other: 0x0, + Section: 0x19, + Value: 0x600898, + Size: 0x1, + }, + Symbol{ + Name: "p.6181", + Info: 0x1, + Other: 0x0, + Section: 0x18, + Value: 0x600890, + Size: 0x0, + }, + Symbol{ + Name: "frame_dummy", + Info: 0x2, + Other: 0x0, + Section: 0xD, + Value: 0x400470, + Size: 0x0, + }, + Symbol{ + Name: "crtstuff.c", + Info: 0x4, + Other: 0x0, + Section: 0xFFF1, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "__CTOR_END__", + Info: 0x1, + Other: 0x0, + Section: 0x12, + Value: 0x600690, + Size: 0x0, + }, + Symbol{ + Name: "__DTOR_END__", + Info: 0x1, + Other: 0x0, + Section: 0x13, + Value: 0x6006A0, + Size: 0x0, + }, + Symbol{ + Name: "__FRAME_END__", + Info: 0x1, + Other: 0x0, + Section: 0x11, + Value: 0x400680, + Size: 0x0, + }, + Symbol{ + Name: "__JCR_END__", + Info: 0x1, + Other: 0x0, + Section: 0x14, + Value: 0x6006A8, + Size: 0x0, + }, + Symbol{ + Name: "__do_global_ctors_aux", + Info: 0x2, + Other: 0x0, + Section: 0xD, + Value: 0x400560, + Size: 0x0, + }, + Symbol{ + Name: "initfini.c", + Info: 0x4, + Other: 0x0, + Section: 0xFFF1, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "hello.c", + Info: 0x4, + Other: 0x0, + Section: 0xFFF1, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "_GLOBAL_OFFSET_TABLE_", + Info: 0x1, + Other: 0x2, + Section: 0x17, + Value: 0x600858, + Size: 0x0, + }, + Symbol{ + Name: "__init_array_end", + Info: 0x0, + Other: 0x2, + Section: 0x12, + Value: 0x600684, + Size: 0x0, + }, + Symbol{ + Name: "__init_array_start", + Info: 0x0, + Other: 0x2, + Section: 0x12, + Value: 0x600684, + Size: 0x0, + }, + Symbol{ + Name: "_DYNAMIC", + Info: 0x1, + Other: 0x2, + Section: 0x15, + Value: 0x6006B0, + Size: 0x0, + }, + Symbol{ + Name: "data_start", + Info: 0x20, + Other: 0x0, + Section: 0x18, + Value: 0x600880, + Size: 0x0, + }, + Symbol{ + Name: "__libc_csu_fini", + Info: 0x12, + Other: 0x0, + Section: 0xD, + Value: 0x4004C0, + Size: 0x2, + }, + Symbol{ + Name: "_start", + Info: 0x12, + Other: 0x0, + Section: 0xD, + Value: 0x4003E0, + Size: 0x0, + }, + Symbol{ + Name: "__gmon_start__", + Info: 0x20, + Other: 0x0, + Section: 0x0, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "_Jv_RegisterClasses", + Info: 0x20, + Other: 0x0, + Section: 0x0, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "puts@@GLIBC_2.2.5", + Info: 0x12, + Other: 0x0, + Section: 0x0, + Value: 0x0, + Size: 0x18C, + }, + Symbol{ + Name: "_fini", + Info: 0x12, + Other: 0x0, + Section: 0xE, + Value: 0x400594, + Size: 0x0, + }, + Symbol{ + Name: "__libc_start_main@@GLIBC_2.2.5", + Info: 0x12, + Other: 0x0, + Section: 0x0, + Value: 0x0, + Size: 0x1C2, + }, + Symbol{ + Name: "_IO_stdin_used", + Info: 0x11, + Other: 0x0, + Section: 0xF, + Value: 0x4005A4, + Size: 0x4, + }, + Symbol{ + Name: "__data_start", + Info: 0x10, + Other: 0x0, + Section: 0x18, + Value: 0x600880, + Size: 0x0, + }, + Symbol{ + Name: "__dso_handle", + Info: 0x11, + Other: 0x2, + Section: 0x18, + Value: 0x600888, + Size: 0x0, + }, + Symbol{ + Name: "__libc_csu_init", + Info: 0x12, + Other: 0x0, + Section: 0xD, + Value: 0x4004D0, + Size: 0x89, + }, + Symbol{ + Name: "__bss_start", + Info: 0x10, + Other: 0x0, + Section: 0xFFF1, + Value: 0x600898, + Size: 0x0, + }, + Symbol{ + Name: "_end", + Info: 0x10, + Other: 0x0, + Section: 0xFFF1, + Value: 0x6008A0, + Size: 0x0, + }, + Symbol{ + Name: "_edata", + Info: 0x10, + Other: 0x0, + Section: 0xFFF1, + Value: 0x600898, + Size: 0x0, + }, + Symbol{ + Name: "main", + Info: 0x12, + Other: 0x0, + Section: 0xD, + Value: 0x400498, + Size: 0x1B, + }, + Symbol{ + Name: "_init", + Info: 0x12, + Other: 0x0, + Section: 0xB, + Value: 0x400398, + Size: 0x0, + }, + }, + "testdata/go-relocation-test-clang-x86.obj": { + Symbol{ + Name: "go-relocation-test-clang.c", + Info: 0x4, + Other: 0x0, + Section: 0xFFF1, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: ".Linfo_string0", + Info: 0x0, + Other: 0x0, + Section: 0xC, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: ".Linfo_string1", + Info: 0x0, + Other: 0x0, + Section: 0xC, + Value: 0x2C, + Size: 0x0, + }, + Symbol{ + Name: ".Linfo_string2", + Info: 0x0, + Other: 0x0, + Section: 0xC, + Value: 0x47, + Size: 0x0, + }, + Symbol{ + Name: ".Linfo_string3", + Info: 0x0, + Other: 0x0, + Section: 0xC, + Value: 0x4C, + Size: 0x0, + }, + Symbol{ + Name: ".Linfo_string4", + Info: 0x0, + Other: 0x0, + Section: 0xC, + Value: 0x4E, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x1, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x2, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x3, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x4, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x6, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x7, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x8, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0xA, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0xC, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0xD, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0xE, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0xF, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "", + Info: 0x3, + Other: 0x0, + Section: 0x10, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "v", + Info: 0x11, + Other: 0x0, + Section: 0xFFF2, + Value: 0x4, + Size: 0x4, + }, + }, + "testdata/hello-world-core.gz": {}, +} + +var dynamicSymbolsGolden = map[string][]Symbol{ + "testdata/gcc-amd64-linux-exec": { + Symbol{ + Name: "__gmon_start__", + Info: 0x20, + Other: 0x0, + Section: 0x0, + Value: 0x0, + Size: 0x0, + }, + Symbol{ + Name: "puts", + Info: 0x12, + Other: 0x0, + Section: 0x0, + Value: 0x0, + Size: 0x18C, + }, + Symbol{ + Name: "__libc_start_main", + Info: 0x12, + Other: 0x0, + Section: 0x0, + Value: 0x0, + Size: 0x1C2, + }, + }, + "testdata/go-relocation-test-clang-x86.obj": {}, + "testdata/hello-world-core.gz": {}, +} diff --git a/libgo/go/debug/elf/testdata/go-relocation-test-gcc482-aarch64.obj b/libgo/go/debug/elf/testdata/go-relocation-test-gcc482-aarch64.obj Binary files differnew file mode 100644 index 00000000000..849e2644ec7 --- /dev/null +++ b/libgo/go/debug/elf/testdata/go-relocation-test-gcc482-aarch64.obj diff --git a/libgo/go/debug/gosym/symtab.go b/libgo/go/debug/gosym/symtab.go index 3864e3cb4fa..ee18499d111 100644 --- a/libgo/go/debug/gosym/symtab.go +++ b/libgo/go/debug/gosym/symtab.go @@ -402,7 +402,7 @@ func NewTable(symtab []byte, pcln *LineTable) (*Table, error) { if n := len(t.Funcs); n > 0 { t.Funcs[n-1].End = sym.Value } - if sym.Name == "etext" { + if sym.Name == "runtime.etext" || sym.Name == "etext" { continue } diff --git a/libgo/go/debug/pe/file.go b/libgo/go/debug/pe/file.go index ce6f1408fe9..759e5674fd6 100644 --- a/libgo/go/debug/pe/file.go +++ b/libgo/go/debug/pe/file.go @@ -13,7 +13,6 @@ import ( "io" "os" "strconv" - "unsafe" ) // A File represents an open PE file. @@ -125,6 +124,11 @@ func (f *File) Close() error { return err } +var ( + sizeofOptionalHeader32 = uint16(binary.Size(OptionalHeader32{})) + sizeofOptionalHeader64 = uint16(binary.Size(OptionalHeader64{})) +) + // NewFile creates a new File for accessing a PE binary in an underlying reader. func NewFile(r io.ReaderAt) (*File, error) { f := new(File) @@ -205,8 +209,8 @@ func NewFile(r io.ReaderAt) (*File, error) { } var oh32 OptionalHeader32 var oh64 OptionalHeader64 - switch uintptr(f.FileHeader.SizeOfOptionalHeader) { - case unsafe.Sizeof(oh32): + switch f.FileHeader.SizeOfOptionalHeader { + case sizeofOptionalHeader32: if err := binary.Read(sr, binary.LittleEndian, &oh32); err != nil { return nil, err } @@ -214,7 +218,7 @@ func NewFile(r io.ReaderAt) (*File, error) { return nil, fmt.Errorf("pe32 optional header has unexpected Magic of 0x%x", oh32.Magic) } f.OptionalHeader = &oh32 - case unsafe.Sizeof(oh64): + case sizeofOptionalHeader64: if err := binary.Read(sr, binary.LittleEndian, &oh64); err != nil { return nil, err } diff --git a/libgo/go/debug/pe/file_test.go b/libgo/go/debug/pe/file_test.go index ddbb2717441..0d73969bca9 100644 --- a/libgo/go/debug/pe/file_test.go +++ b/libgo/go/debug/pe/file_test.go @@ -125,9 +125,9 @@ var fileTests = []fileTest{ }, { "testdata/gcc-amd64-mingw-exec", - FileHeader{0x8664, 0x9, 0x53472993, 0x0, 0x0, 0xf0, 0x22f}, + FileHeader{0x8664, 0x11, 0x53e4364f, 0x39600, 0x6fc, 0xf0, 0x27}, &OptionalHeader64{ - 0x20b, 0x2, 0x16, 0x6a00, 0x2400, 0x1600, 0x14e0, 0x1000, 0x400000, 0x1000, 0x200, 0x4, 0x0, 0x0, 0x0, 0x5, 0x2, 0x0, 0x11000, 0x400, 0x1841e, 0x3, 0x0, 0x200000, 0x1000, 0x100000, 0x1000, 0x0, 0x10, + 0x20b, 0x2, 0x16, 0x6a00, 0x2400, 0x1600, 0x14e0, 0x1000, 0x400000, 0x1000, 0x200, 0x4, 0x0, 0x0, 0x0, 0x5, 0x2, 0x0, 0x45000, 0x600, 0x46f19, 0x3, 0x0, 0x200000, 0x1000, 0x100000, 0x1000, 0x0, 0x10, [16]DataDirectory{ {0x0, 0x0}, {0xe000, 0x990}, @@ -145,18 +145,25 @@ var fileTests = []fileTest{ {0x0, 0x0}, {0x0, 0x0}, {0x0, 0x0}, - }, - }, + }}, []*SectionHeader{ - {".text", 0x6860, 0x1000, 0x6a00, 0x400, 0x0, 0x0, 0x0, 0x0, 0x60500020}, - {".data", 0xe0, 0x8000, 0x200, 0x6e00, 0x0, 0x0, 0x0, 0x0, 0xc0500040}, - {".rdata", 0x6b0, 0x9000, 0x800, 0x7000, 0x0, 0x0, 0x0, 0x0, 0x40600040}, - {".pdata", 0x498, 0xa000, 0x600, 0x7800, 0x0, 0x0, 0x0, 0x0, 0x40300040}, - {".xdata", 0x488, 0xb000, 0x600, 0x7e00, 0x0, 0x0, 0x0, 0x0, 0x40300040}, + {".text", 0x6860, 0x1000, 0x6a00, 0x600, 0x0, 0x0, 0x0, 0x0, 0x60500020}, + {".data", 0xe0, 0x8000, 0x200, 0x7000, 0x0, 0x0, 0x0, 0x0, 0xc0500040}, + {".rdata", 0x6b0, 0x9000, 0x800, 0x7200, 0x0, 0x0, 0x0, 0x0, 0x40600040}, + {".pdata", 0x498, 0xa000, 0x600, 0x7a00, 0x0, 0x0, 0x0, 0x0, 0x40300040}, + {".xdata", 0x488, 0xb000, 0x600, 0x8000, 0x0, 0x0, 0x0, 0x0, 0x40300040}, {".bss", 0x1410, 0xc000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0600080}, - {".idata", 0x990, 0xe000, 0xa00, 0x8400, 0x0, 0x0, 0x0, 0x0, 0xc0300040}, - {".CRT", 0x68, 0xf000, 0x200, 0x8e00, 0x0, 0x0, 0x0, 0x0, 0xc0400040}, - {".tls", 0x48, 0x10000, 0x200, 0x9000, 0x0, 0x0, 0x0, 0x0, 0xc0600040}, + {".idata", 0x990, 0xe000, 0xa00, 0x8600, 0x0, 0x0, 0x0, 0x0, 0xc0300040}, + {".CRT", 0x68, 0xf000, 0x200, 0x9000, 0x0, 0x0, 0x0, 0x0, 0xc0400040}, + {".tls", 0x48, 0x10000, 0x200, 0x9200, 0x0, 0x0, 0x0, 0x0, 0xc0600040}, + {".debug_aranges", 0x600, 0x11000, 0x600, 0x9400, 0x0, 0x0, 0x0, 0x0, 0x42500040}, + {".debug_info", 0x1316e, 0x12000, 0x13200, 0x9a00, 0x0, 0x0, 0x0, 0x0, 0x42100040}, + {".debug_abbrev", 0x2ccb, 0x26000, 0x2e00, 0x1cc00, 0x0, 0x0, 0x0, 0x0, 0x42100040}, + {".debug_line", 0x3c4d, 0x29000, 0x3e00, 0x1fa00, 0x0, 0x0, 0x0, 0x0, 0x42100040}, + {".debug_frame", 0x18b8, 0x2d000, 0x1a00, 0x23800, 0x0, 0x0, 0x0, 0x0, 0x42400040}, + {".debug_str", 0x396, 0x2f000, 0x400, 0x25200, 0x0, 0x0, 0x0, 0x0, 0x42100040}, + {".debug_loc", 0x13240, 0x30000, 0x13400, 0x25600, 0x0, 0x0, 0x0, 0x0, 0x42100040}, + {".debug_ranges", 0xa70, 0x44000, 0xc00, 0x38a00, 0x0, 0x0, 0x0, 0x0, 0x42100040}, }, []*Symbol{}, }, diff --git a/libgo/go/debug/pe/testdata/gcc-amd64-mingw-exec b/libgo/go/debug/pe/testdata/gcc-amd64-mingw-exec Binary files differindex 78d4e5fed98..ce6feb6b7b6 100644 --- a/libgo/go/debug/pe/testdata/gcc-amd64-mingw-exec +++ b/libgo/go/debug/pe/testdata/gcc-amd64-mingw-exec diff --git a/libgo/go/debug/plan9obj/file.go b/libgo/go/debug/plan9obj/file.go index 60a5857193e..b11ed86f185 100644 --- a/libgo/go/debug/plan9obj/file.go +++ b/libgo/go/debug/plan9obj/file.go @@ -15,10 +15,12 @@ import ( // A FileHeader represents a Plan 9 a.out file header. type FileHeader struct { - Magic uint32 - Bss uint32 - Entry uint64 - PtrSize int + Magic uint32 + Bss uint32 + Entry uint64 + PtrSize int + LoadAddress uint64 + HdrSize uint64 } // A File represents an open Plan 9 a.out file. @@ -148,20 +150,21 @@ func NewFile(r io.ReaderAt) (*File, error) { } f := &File{FileHeader: FileHeader{ - Magic: ph.Magic, - Bss: ph.Bss, - Entry: uint64(ph.Entry), - PtrSize: 4, + Magic: ph.Magic, + Bss: ph.Bss, + Entry: uint64(ph.Entry), + PtrSize: 4, + LoadAddress: 0x1000, + HdrSize: 4 * 8, }} - hdrSize := 4 * 8 - if ph.Magic&Magic64 != 0 { if err := binary.Read(sr, binary.BigEndian, &f.Entry); err != nil { return nil, err } f.PtrSize = 8 - hdrSize += 8 + f.LoadAddress = 0x200000 + f.HdrSize += 8 } var sects = []struct { @@ -177,7 +180,7 @@ func NewFile(r io.ReaderAt) (*File, error) { f.Sections = make([]*Section, 5) - off := uint32(hdrSize) + off := uint32(f.HdrSize) for i, sect := range sects { s := new(Section) diff --git a/libgo/go/debug/plan9obj/file_test.go b/libgo/go/debug/plan9obj/file_test.go index 96186d81565..cfd7a61d1cd 100644 --- a/libgo/go/debug/plan9obj/file_test.go +++ b/libgo/go/debug/plan9obj/file_test.go @@ -18,7 +18,7 @@ type fileTest struct { var fileTests = []fileTest{ { "testdata/386-plan9-exec", - FileHeader{Magic386, 0x324, 0x14, 4}, + FileHeader{Magic386, 0x324, 0x14, 4, 0x1000, 32}, []*SectionHeader{ {"text", 0x4c5f, 0x20}, {"data", 0x94c, 0x4c7f}, @@ -29,7 +29,7 @@ var fileTests = []fileTest{ }, { "testdata/amd64-plan9-exec", - FileHeader{MagicAMD64, 0x618, 0x13, 8}, + FileHeader{MagicAMD64, 0x618, 0x13, 8, 0x200000, 40}, []*SectionHeader{ {"text", 0x4213, 0x28}, {"data", 0xa80, 0x423b}, diff --git a/libgo/go/encoding/ascii85/ascii85.go b/libgo/go/encoding/ascii85/ascii85.go index 60da304b55e..4d7193873a2 100644 --- a/libgo/go/encoding/ascii85/ascii85.go +++ b/libgo/go/encoding/ascii85/ascii85.go @@ -249,7 +249,6 @@ type decoder struct { err error readErr error r io.Reader - end bool // saw end of message buf [1024]byte // leftover input nbuf int out []byte // leftover decoded output diff --git a/libgo/go/encoding/asn1/asn1.go b/libgo/go/encoding/asn1/asn1.go index ec7f91c1bba..8b3d1b34121 100644 --- a/libgo/go/encoding/asn1/asn1.go +++ b/libgo/go/encoding/asn1/asn1.go @@ -640,15 +640,19 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam // when it sees a string, so if we see a different string type on the // wire, we change the universal type to match. if universalTag == tagPrintableString { - switch t.tag { - case tagIA5String, tagGeneralString, tagT61String, tagUTF8String: - universalTag = t.tag + if t.class == classUniversal { + switch t.tag { + case tagIA5String, tagGeneralString, tagT61String, tagUTF8String: + universalTag = t.tag + } + } else if params.stringType != 0 { + universalTag = params.stringType } } // Special case for time: UTCTime and GeneralizedTime both map to the // Go type time.Time. - if universalTag == tagUTCTime && t.tag == tagGeneralizedTime { + if universalTag == tagUTCTime && t.tag == tagGeneralizedTime && t.class == classUniversal { universalTag = tagGeneralizedTime } @@ -822,8 +826,19 @@ func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParam return } +// canHaveDefaultValue reports whether k is a Kind that we will set a default +// value for. (A signed integer, essentially.) +func canHaveDefaultValue(k reflect.Kind) bool { + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return true + } + + return false +} + // setDefaultValue is used to install a default value, from a tag string, into -// a Value. It is successful is the field was optional, even if a default value +// a Value. It is successful if the field was optional, even if a default value // wasn't provided or it failed to install it into the Value. func setDefaultValue(v reflect.Value, params fieldParameters) (ok bool) { if !params.optional { @@ -833,9 +848,8 @@ func setDefaultValue(v reflect.Value, params fieldParameters) (ok bool) { if params.defaultValue == nil { return } - switch val := v; val.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - val.SetInt(*params.defaultValue) + if canHaveDefaultValue(v.Kind()) { + v.SetInt(*params.defaultValue) } return } diff --git a/libgo/go/encoding/asn1/asn1_test.go b/libgo/go/encoding/asn1/asn1_test.go index b553f78e0a6..4e864d08ac0 100644 --- a/libgo/go/encoding/asn1/asn1_test.go +++ b/libgo/go/encoding/asn1/asn1_test.go @@ -392,6 +392,10 @@ type TestContextSpecificTags2 struct { B int } +type TestContextSpecificTags3 struct { + S string `asn1:"tag:1,utf8"` +} + type TestElementsAfterString struct { S string A, B int @@ -420,6 +424,7 @@ var unmarshalTestData = []struct { {[]byte{0x04, 0x04, 1, 2, 3, 4}, &RawValue{0, 4, false, []byte{1, 2, 3, 4}, []byte{4, 4, 1, 2, 3, 4}}}, {[]byte{0x30, 0x03, 0x81, 0x01, 0x01}, &TestContextSpecificTags{1}}, {[]byte{0x30, 0x08, 0xa1, 0x03, 0x02, 0x01, 0x01, 0x02, 0x01, 0x02}, &TestContextSpecificTags2{1, 2}}, + {[]byte{0x30, 0x03, 0x81, 0x01, '@'}, &TestContextSpecificTags3{"@"}}, {[]byte{0x01, 0x01, 0x00}, newBool(false)}, {[]byte{0x01, 0x01, 0xff}, newBool(true)}, {[]byte{0x30, 0x0b, 0x13, 0x03, 0x66, 0x6f, 0x6f, 0x02, 0x01, 0x22, 0x02, 0x01, 0x33}, &TestElementsAfterString{"foo", 0x22, 0x33}}, @@ -812,3 +817,51 @@ func TestStringSlice(t *testing.T) { } } } + +type explicitTaggedTimeTest struct { + Time time.Time `asn1:"explicit,tag:0"` +} + +var explicitTaggedTimeTestData = []struct { + in []byte + out explicitTaggedTimeTest +}{ + {[]byte{0x30, 0x11, 0xa0, 0xf, 0x17, 0xd, '9', '1', '0', '5', '0', '6', '1', '6', '4', '5', '4', '0', 'Z'}, + explicitTaggedTimeTest{time.Date(1991, 05, 06, 16, 45, 40, 0, time.UTC)}}, + {[]byte{0x30, 0x17, 0xa0, 0xf, 0x18, 0x13, '2', '0', '1', '0', '0', '1', '0', '2', '0', '3', '0', '4', '0', '5', '+', '0', '6', '0', '7'}, + explicitTaggedTimeTest{time.Date(2010, 01, 02, 03, 04, 05, 0, time.FixedZone("", 6*60*60+7*60))}}, +} + +func TestExplicitTaggedTime(t *testing.T) { + // Test that a time.Time will match either tagUTCTime or + // tagGeneralizedTime. + for i, test := range explicitTaggedTimeTestData { + var got explicitTaggedTimeTest + _, err := Unmarshal(test.in, &got) + if err != nil { + t.Errorf("Unmarshal failed at index %d %v", i, err) + } + if !got.Time.Equal(test.out.Time) { + t.Errorf("#%d: got %v, want %v", i, got.Time, test.out.Time) + } + } +} + +type implicitTaggedTimeTest struct { + Time time.Time `asn1:"tag:24"` +} + +func TestImplicitTaggedTime(t *testing.T) { + // An implicitly tagged time value, that happens to have an implicit + // tag equal to a GENERALIZEDTIME, should still be parsed as a UTCTime. + // (There's no "timeType" in fieldParameters to determine what type of + // time should be expected when implicitly tagged.) + der := []byte{0x30, 0x0f, 0x80 | 24, 0xd, '9', '1', '0', '5', '0', '6', '1', '6', '4', '5', '4', '0', 'Z'} + var result implicitTaggedTimeTest + if _, err := Unmarshal(der, &result); err != nil { + t.Fatalf("Error while parsing: %s", err) + } + if expected := time.Date(1991, 05, 06, 16, 45, 40, 0, time.UTC); !result.Time.Equal(expected) { + t.Errorf("Wrong result. Got %v, want %v", result.Time, expected) + } +} diff --git a/libgo/go/encoding/asn1/marshal.go b/libgo/go/encoding/asn1/marshal.go index e26fe59b305..b2f104b4cbe 100644 --- a/libgo/go/encoding/asn1/marshal.go +++ b/libgo/go/encoding/asn1/marshal.go @@ -513,8 +513,22 @@ func marshalField(out *forkableWriter, v reflect.Value, params fieldParameters) return } - if params.optional && reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) { - return + if params.optional && params.defaultValue != nil && canHaveDefaultValue(v.Kind()) { + defaultValue := reflect.New(v.Type()).Elem() + defaultValue.SetInt(*params.defaultValue) + + if reflect.DeepEqual(v.Interface(), defaultValue.Interface()) { + return + } + } + + // If no default value is given then the zero value for the type is + // assumed to be the default value. This isn't obviously the correct + // behaviour, but it's what Go has traditionally done. + if params.optional && params.defaultValue == nil { + if reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) { + return + } } if v.Type() == rawValueType { diff --git a/libgo/go/encoding/asn1/marshal_test.go b/libgo/go/encoding/asn1/marshal_test.go index a15acbed012..5b0115f28c5 100644 --- a/libgo/go/encoding/asn1/marshal_test.go +++ b/libgo/go/encoding/asn1/marshal_test.go @@ -58,6 +58,10 @@ type omitEmptyTest struct { A []string `asn1:"omitempty"` } +type defaultTest struct { + A int `asn1:"optional,default:1"` +} + type testSET []int var PST = time.FixedZone("PST", -8*60*60) @@ -133,6 +137,9 @@ var marshalTests = []marshalTest{ {omitEmptyTest{[]string{}}, "3000"}, {omitEmptyTest{[]string{"1"}}, "30053003130131"}, {"Σ", "0c02cea3"}, + {defaultTest{0}, "3003020100"}, + {defaultTest{1}, "3000"}, + {defaultTest{2}, "3003020102"}, } func TestMarshal(t *testing.T) { diff --git a/libgo/go/encoding/base32/base32.go b/libgo/go/encoding/base32/base32.go index d770de3915f..5a9e86919d8 100644 --- a/libgo/go/encoding/base32/base32.go +++ b/libgo/go/encoding/base32/base32.go @@ -73,45 +73,43 @@ func (enc *Encoding) Encode(dst, src []byte) { } for len(src) > 0 { - dst[0] = 0 - dst[1] = 0 - dst[2] = 0 - dst[3] = 0 - dst[4] = 0 - dst[5] = 0 - dst[6] = 0 - dst[7] = 0 + var b0, b1, b2, b3, b4, b5, b6, b7 byte // Unpack 8x 5-bit source blocks into a 5 byte // destination quantum switch len(src) { default: - dst[7] |= src[4] & 0x1F - dst[6] |= src[4] >> 5 + b7 = src[4] & 0x1F + b6 = src[4] >> 5 fallthrough case 4: - dst[6] |= (src[3] << 3) & 0x1F - dst[5] |= (src[3] >> 2) & 0x1F - dst[4] |= src[3] >> 7 + b6 |= (src[3] << 3) & 0x1F + b5 = (src[3] >> 2) & 0x1F + b4 = src[3] >> 7 fallthrough case 3: - dst[4] |= (src[2] << 1) & 0x1F - dst[3] |= (src[2] >> 4) & 0x1F + b4 |= (src[2] << 1) & 0x1F + b3 = (src[2] >> 4) & 0x1F fallthrough case 2: - dst[3] |= (src[1] << 4) & 0x1F - dst[2] |= (src[1] >> 1) & 0x1F - dst[1] |= (src[1] >> 6) & 0x1F + b3 |= (src[1] << 4) & 0x1F + b2 = (src[1] >> 1) & 0x1F + b1 = (src[1] >> 6) & 0x1F fallthrough case 1: - dst[1] |= (src[0] << 2) & 0x1F - dst[0] |= src[0] >> 3 + b1 |= (src[0] << 2) & 0x1F + b0 = src[0] >> 3 } // Encode 5-bit blocks using the base32 alphabet - for j := 0; j < 8; j++ { - dst[j] = enc.encode[dst[j]] - } + dst[0] = enc.encode[b0] + dst[1] = enc.encode[b1] + dst[2] = enc.encode[b2] + dst[3] = enc.encode[b3] + dst[4] = enc.encode[b4] + dst[5] = enc.encode[b5] + dst[6] = enc.encode[b6] + dst[7] = enc.encode[b7] // Pad the final quantum if len(src) < 5 { @@ -330,7 +328,7 @@ func (enc *Encoding) Decode(dst, src []byte) (n int, err error) { func (enc *Encoding) DecodeString(s string) ([]byte, error) { s = strings.Map(removeNewlinesMapper, s) dbuf := make([]byte, enc.DecodedLen(len(s))) - n, err := enc.Decode(dbuf, []byte(s)) + n, _, err := enc.decode(dbuf, []byte(s)) return dbuf[:n], err } diff --git a/libgo/go/encoding/base32/base32_test.go b/libgo/go/encoding/base32/base32_test.go index f56b996faaf..5a68f06e1c9 100644 --- a/libgo/go/encoding/base32/base32_test.go +++ b/libgo/go/encoding/base32/base32_test.go @@ -284,3 +284,19 @@ LNEBUWIIDFON2CA3DBMJXXE5LNFY== t.Error("Decoded results not equal") } } + +func BenchmarkEncodeToString(b *testing.B) { + data := make([]byte, 8192) + b.SetBytes(int64(len(data))) + for i := 0; i < b.N; i++ { + StdEncoding.EncodeToString(data) + } +} + +func BenchmarkDecodeString(b *testing.B) { + data := StdEncoding.EncodeToString(make([]byte, 8192)) + b.SetBytes(int64(len(data))) + for i := 0; i < b.N; i++ { + StdEncoding.DecodeString(data) + } +} diff --git a/libgo/go/encoding/base64/base64.go b/libgo/go/encoding/base64/base64.go index e38c26d0ec7..ad3abe66239 100644 --- a/libgo/go/encoding/base64/base64.go +++ b/libgo/go/encoding/base64/base64.go @@ -74,31 +74,29 @@ func (enc *Encoding) Encode(dst, src []byte) { } for len(src) > 0 { - dst[0] = 0 - dst[1] = 0 - dst[2] = 0 - dst[3] = 0 + var b0, b1, b2, b3 byte // Unpack 4x 6-bit source blocks into a 4 byte // destination quantum switch len(src) { default: - dst[3] |= src[2] & 0x3F - dst[2] |= src[2] >> 6 + b3 = src[2] & 0x3F + b2 = src[2] >> 6 fallthrough case 2: - dst[2] |= (src[1] << 2) & 0x3F - dst[1] |= src[1] >> 4 + b2 |= (src[1] << 2) & 0x3F + b1 = src[1] >> 4 fallthrough case 1: - dst[1] |= (src[0] << 4) & 0x3F - dst[0] |= src[0] >> 2 + b1 |= (src[0] << 4) & 0x3F + b0 = src[0] >> 2 } // Encode 6-bit blocks using the base64 alphabet - for j := 0; j < 4; j++ { - dst[j] = enc.encode[dst[j]] - } + dst[0] = enc.encode[b0] + dst[1] = enc.encode[b1] + dst[2] = enc.encode[b2] + dst[3] = enc.encode[b3] // Pad the final quantum if len(src) < 3 { @@ -295,7 +293,7 @@ func (enc *Encoding) Decode(dst, src []byte) (n int, err error) { func (enc *Encoding) DecodeString(s string) ([]byte, error) { s = strings.Map(removeNewlinesMapper, s) dbuf := make([]byte, enc.DecodedLen(len(s))) - n, err := enc.Decode(dbuf, []byte(s)) + n, _, err := enc.decode(dbuf, []byte(s)) return dbuf[:n], err } diff --git a/libgo/go/encoding/base64/base64_test.go b/libgo/go/encoding/base64/base64_test.go index a075194e03e..7d199bfa089 100644 --- a/libgo/go/encoding/base64/base64_test.go +++ b/libgo/go/encoding/base64/base64_test.go @@ -342,3 +342,19 @@ func TestDecoderIssue7733(t *testing.T) { t.Errorf("DecodeString = %q; want abcd", s) } } + +func BenchmarkEncodeToString(b *testing.B) { + data := make([]byte, 8192) + b.SetBytes(int64(len(data))) + for i := 0; i < b.N; i++ { + StdEncoding.EncodeToString(data) + } +} + +func BenchmarkDecodeString(b *testing.B) { + data := StdEncoding.EncodeToString(make([]byte, 8192)) + b.SetBytes(int64(len(data))) + for i := 0; i < b.N; i++ { + StdEncoding.DecodeString(data) + } +} diff --git a/libgo/go/encoding/binary/binary.go b/libgo/go/encoding/binary/binary.go index a5694876ac4..466bf97c973 100644 --- a/libgo/go/encoding/binary/binary.go +++ b/libgo/go/encoding/binary/binary.go @@ -10,9 +10,10 @@ // type (int8, uint8, int16, float32, complex64, ...) // or an array or struct containing only fixed-size values. // -// Varints are a method of encoding integers using one or more bytes; -// numbers with smaller absolute value take a smaller number of bytes. -// For a specification, see http://code.google.com/apis/protocolbuffers/docs/encoding.html. +// The varint functions encode and decode single integer values using +// a variable-length encoding; smaller values require fewer bytes. +// For a specification, see +// http://code.google.com/apis/protocolbuffers/docs/encoding.html. // // This package favors simplicity over efficiency. Clients that require // high-performance serialization, especially for large data structures, @@ -199,18 +200,17 @@ func Read(r io.Reader, order ByteOrder, data interface{}) error { } // Fallback to reflect-based decoding. - var v reflect.Value - switch d := reflect.ValueOf(data); d.Kind() { + v := reflect.ValueOf(data) + size := -1 + switch v.Kind() { case reflect.Ptr: - v = d.Elem() + v = v.Elem() + size = dataSize(v) case reflect.Slice: - v = d - default: - return errors.New("binary.Read: invalid type " + d.Type().String()) + size = dataSize(v) } - size, err := dataSize(v) - if err != nil { - return errors.New("binary.Read: " + err.Error()) + if size < 0 { + return errors.New("binary.Read: invalid type " + reflect.TypeOf(data).String()) } d := &decoder{order: order, buf: make([]byte, size)} if _, err := io.ReadFull(r, d.buf); err != nil { @@ -323,68 +323,64 @@ func Write(w io.Writer, order ByteOrder, data interface{}) error { // Fallback to reflect-based encoding. v := reflect.Indirect(reflect.ValueOf(data)) - size, err := dataSize(v) - if err != nil { - return errors.New("binary.Write: " + err.Error()) + size := dataSize(v) + if size < 0 { + return errors.New("binary.Write: invalid type " + reflect.TypeOf(data).String()) } buf := make([]byte, size) e := &encoder{order: order, buf: buf} e.value(v) - _, err = w.Write(buf) + _, err := w.Write(buf) return err } // Size returns how many bytes Write would generate to encode the value v, which // must be a fixed-size value or a slice of fixed-size values, or a pointer to such data. +// If v is neither of these, Size returns -1. func Size(v interface{}) int { - n, err := dataSize(reflect.Indirect(reflect.ValueOf(v))) - if err != nil { - return -1 - } - return n + return dataSize(reflect.Indirect(reflect.ValueOf(v))) } // dataSize returns the number of bytes the actual data represented by v occupies in memory. // For compound structures, it sums the sizes of the elements. Thus, for instance, for a slice // it returns the length of the slice times the element size and does not count the memory -// occupied by the header. -func dataSize(v reflect.Value) (int, error) { +// occupied by the header. If the type of v is not acceptable, dataSize returns -1. +func dataSize(v reflect.Value) int { if v.Kind() == reflect.Slice { - elem, err := sizeof(v.Type().Elem()) - if err != nil { - return 0, err + if s := sizeof(v.Type().Elem()); s >= 0 { + return s * v.Len() } - return v.Len() * elem, nil + return -1 } return sizeof(v.Type()) } -func sizeof(t reflect.Type) (int, error) { +// sizeof returns the size >= 0 of variables for the given type or -1 if the type is not acceptable. +func sizeof(t reflect.Type) int { switch t.Kind() { case reflect.Array: - n, err := sizeof(t.Elem()) - if err != nil { - return 0, err + if s := sizeof(t.Elem()); s >= 0 { + return s * t.Len() } - return t.Len() * n, nil case reflect.Struct: sum := 0 for i, n := 0, t.NumField(); i < n; i++ { - s, err := sizeof(t.Field(i).Type) - if err != nil { - return 0, err + s := sizeof(t.Field(i).Type) + if s < 0 { + return -1 } sum += s } - return sum, nil + return sum case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: - return int(t.Size()), nil + return int(t.Size()) } - return 0, errors.New("invalid type " + t.String()) + + return -1 } type coder struct { @@ -594,12 +590,11 @@ func (e *encoder) value(v reflect.Value) { } func (d *decoder) skip(v reflect.Value) { - n, _ := dataSize(v) - d.buf = d.buf[n:] + d.buf = d.buf[dataSize(v):] } func (e *encoder) skip(v reflect.Value) { - n, _ := dataSize(v) + n := dataSize(v) for i := range e.buf[0:n] { e.buf[i] = 0 } diff --git a/libgo/go/encoding/binary/binary_test.go b/libgo/go/encoding/binary/binary_test.go index c80c90383af..8ee595fa476 100644 --- a/libgo/go/encoding/binary/binary_test.go +++ b/libgo/go/encoding/binary/binary_test.go @@ -289,6 +289,26 @@ func TestUnexportedRead(t *testing.T) { Read(&buf, LittleEndian, &u2) } +func TestReadErrorMsg(t *testing.T) { + var buf bytes.Buffer + read := func(data interface{}) { + err := Read(&buf, LittleEndian, data) + want := "binary.Read: invalid type " + reflect.TypeOf(data).String() + if err == nil { + t.Errorf("%T: got no error; want %q", data, want) + return + } + if got := err.Error(); got != want { + t.Errorf("%T: got %q; want %q", data, got, want) + } + } + read(0) + s := new(struct{}) + read(&s) + p := &s + read(&p) +} + type byteSliceReader struct { remain []byte } @@ -315,8 +335,7 @@ func BenchmarkReadStruct(b *testing.B) { bsr := &byteSliceReader{} var buf bytes.Buffer Write(&buf, BigEndian, &s) - n, _ := dataSize(reflect.ValueOf(s)) - b.SetBytes(int64(n)) + b.SetBytes(int64(dataSize(reflect.ValueOf(s)))) t := s b.ResetTimer() for i := 0; i < b.N; i++ { diff --git a/libgo/go/encoding/csv/writer.go b/libgo/go/encoding/csv/writer.go index 1faecb66480..17e7bb7f5c7 100644 --- a/libgo/go/encoding/csv/writer.go +++ b/libgo/go/encoding/csv/writer.go @@ -115,10 +115,22 @@ func (w *Writer) WriteAll(records [][]string) (err error) { } // fieldNeedsQuotes returns true if our field must be enclosed in quotes. -// Empty fields, files with a Comma, fields with a quote or newline, and +// Fields with a Comma, fields with a quote or newline, and // fields which start with a space must be enclosed in quotes. +// We used to quote empty strings, but we do not anymore (as of Go 1.4). +// The two representations should be equivalent, but Postgres distinguishes +// quoted vs non-quoted empty string during database imports, and it has +// an option to force the quoted behavior for non-quoted CSV but it has +// no option to force the non-quoted behavior for quoted CSV, making +// CSV with quoted empty strings strictly less useful. +// Not quoting the empty string also makes this package match the behavior +// of Microsoft Excel and Google Drive. +// For Postgres, quote the data termating string `\.`. func (w *Writer) fieldNeedsQuotes(field string) bool { - if len(field) == 0 || strings.IndexRune(field, w.Comma) >= 0 || strings.IndexAny(field, "\"\r\n") >= 0 { + if field == "" { + return false + } + if field == `\.` || strings.IndexRune(field, w.Comma) >= 0 || strings.IndexAny(field, "\"\r\n") >= 0 { return true } diff --git a/libgo/go/encoding/csv/writer_test.go b/libgo/go/encoding/csv/writer_test.go index 22b740c0745..8ddca0abe0c 100644 --- a/libgo/go/encoding/csv/writer_test.go +++ b/libgo/go/encoding/csv/writer_test.go @@ -28,6 +28,17 @@ var writeTests = []struct { {Input: [][]string{{"abc\ndef"}}, Output: "\"abc\r\ndef\"\r\n", UseCRLF: true}, {Input: [][]string{{"abc\rdef"}}, Output: "\"abcdef\"\r\n", UseCRLF: true}, {Input: [][]string{{"abc\rdef"}}, Output: "\"abc\rdef\"\n", UseCRLF: false}, + {Input: [][]string{{""}}, Output: "\n"}, + {Input: [][]string{{"", ""}}, Output: ",\n"}, + {Input: [][]string{{"", "", ""}}, Output: ",,\n"}, + {Input: [][]string{{"", "", "a"}}, Output: ",,a\n"}, + {Input: [][]string{{"", "a", ""}}, Output: ",a,\n"}, + {Input: [][]string{{"", "a", "a"}}, Output: ",a,a\n"}, + {Input: [][]string{{"a", "", ""}}, Output: "a,,\n"}, + {Input: [][]string{{"a", "", "a"}}, Output: "a,,a\n"}, + {Input: [][]string{{"a", "a", ""}}, Output: "a,a,\n"}, + {Input: [][]string{{"a", "a", "a"}}, Output: "a,a,a\n"}, + {Input: [][]string{{`\.`}}, Output: "\"\\.\"\n"}, } func TestWrite(t *testing.T) { diff --git a/libgo/go/encoding/gob/codec_test.go b/libgo/go/encoding/gob/codec_test.go index fa57f3761d0..56a7298fa55 100644 --- a/libgo/go/encoding/gob/codec_test.go +++ b/libgo/go/encoding/gob/codec_test.go @@ -14,7 +14,6 @@ import ( "strings" "testing" "time" - "unsafe" ) var doFuzzTests = flag.Bool("gob.fuzz", false, "run the fuzz tests, which are large and very slow") @@ -51,10 +50,16 @@ func testError(t *testing.T) { return } +func newDecBuffer(data []byte) *decBuffer { + return &decBuffer{ + data: data, + } +} + // Test basic encode/decode routines for unsigned integers func TestUintCodec(t *testing.T) { defer testError(t) - b := new(bytes.Buffer) + b := new(encBuffer) encState := newEncoderState(b) for _, tt := range encodeT { b.Reset() @@ -63,10 +68,10 @@ func TestUintCodec(t *testing.T) { t.Errorf("encodeUint: %#x encode: expected % x got % x", tt.x, tt.b, b.Bytes()) } } - decState := newDecodeState(b) for u := uint64(0); ; u = (u + 1) * 7 { b.Reset() encState.encodeUint(u) + decState := newDecodeState(newDecBuffer(b.Bytes())) v := decState.decodeUint() if u != v { t.Errorf("Encode/Decode: sent %#x received %#x", u, v) @@ -79,10 +84,10 @@ func TestUintCodec(t *testing.T) { func verifyInt(i int64, t *testing.T) { defer testError(t) - var b = new(bytes.Buffer) + var b = new(encBuffer) encState := newEncoderState(b) encState.encodeInt(i) - decState := newDecodeState(b) + decState := newDecodeState(newDecBuffer(b.Bytes())) decState.buf = make([]byte, 8) j := decState.decodeInt() if i != j { @@ -119,14 +124,14 @@ var complexResult = []byte{0x07, 0xFE, 0x31, 0x40, 0xFE, 0x33, 0x40} // The result of encoding "hello" with field number 7 var bytesResult = []byte{0x07, 0x05, 'h', 'e', 'l', 'l', 'o'} -func newDecodeState(buf *bytes.Buffer) *decoderState { +func newDecodeState(buf *decBuffer) *decoderState { d := new(decoderState) d.b = buf d.buf = make([]byte, uint64Size) return d } -func newEncoderState(b *bytes.Buffer) *encoderState { +func newEncoderState(b *encBuffer) *encoderState { b.Reset() state := &encoderState{enc: nil, b: b} state.fieldnum = -1 @@ -136,14 +141,14 @@ func newEncoderState(b *bytes.Buffer) *encoderState { // Test instruction execution for encoding. // Do not run the machine yet; instead do individual instructions crafted by hand. func TestScalarEncInstructions(t *testing.T) { - var b = new(bytes.Buffer) + var b = new(encBuffer) // bool { - data := struct{ a bool }{true} - instr := &encInstr{encBool, 6, 0, 0} + var data bool = true + instr := &encInstr{encBool, 6, nil, 0} state := newEncoderState(b) - instr.op(instr, state, unsafe.Pointer(&data)) + instr.op(instr, state, reflect.ValueOf(data)) if !bytes.Equal(boolResult, b.Bytes()) { t.Errorf("bool enc instructions: expected % x got % x", boolResult, b.Bytes()) } @@ -152,10 +157,10 @@ func TestScalarEncInstructions(t *testing.T) { // int { b.Reset() - data := struct{ a int }{17} - instr := &encInstr{encInt, 6, 0, 0} + var data int = 17 + instr := &encInstr{encInt, 6, nil, 0} state := newEncoderState(b) - instr.op(instr, state, unsafe.Pointer(&data)) + instr.op(instr, state, reflect.ValueOf(data)) if !bytes.Equal(signedResult, b.Bytes()) { t.Errorf("int enc instructions: expected % x got % x", signedResult, b.Bytes()) } @@ -164,10 +169,10 @@ func TestScalarEncInstructions(t *testing.T) { // uint { b.Reset() - data := struct{ a uint }{17} - instr := &encInstr{encUint, 6, 0, 0} + var data uint = 17 + instr := &encInstr{encUint, 6, nil, 0} state := newEncoderState(b) - instr.op(instr, state, unsafe.Pointer(&data)) + instr.op(instr, state, reflect.ValueOf(data)) if !bytes.Equal(unsignedResult, b.Bytes()) { t.Errorf("uint enc instructions: expected % x got % x", unsignedResult, b.Bytes()) } @@ -176,10 +181,10 @@ func TestScalarEncInstructions(t *testing.T) { // int8 { b.Reset() - data := struct{ a int8 }{17} - instr := &encInstr{encInt8, 6, 0, 0} + var data int8 = 17 + instr := &encInstr{encInt, 6, nil, 0} state := newEncoderState(b) - instr.op(instr, state, unsafe.Pointer(&data)) + instr.op(instr, state, reflect.ValueOf(data)) if !bytes.Equal(signedResult, b.Bytes()) { t.Errorf("int8 enc instructions: expected % x got % x", signedResult, b.Bytes()) } @@ -188,10 +193,10 @@ func TestScalarEncInstructions(t *testing.T) { // uint8 { b.Reset() - data := struct{ a uint8 }{17} - instr := &encInstr{encUint8, 6, 0, 0} + var data uint8 = 17 + instr := &encInstr{encUint, 6, nil, 0} state := newEncoderState(b) - instr.op(instr, state, unsafe.Pointer(&data)) + instr.op(instr, state, reflect.ValueOf(data)) if !bytes.Equal(unsignedResult, b.Bytes()) { t.Errorf("uint8 enc instructions: expected % x got % x", unsignedResult, b.Bytes()) } @@ -200,10 +205,10 @@ func TestScalarEncInstructions(t *testing.T) { // int16 { b.Reset() - data := struct{ a int16 }{17} - instr := &encInstr{encInt16, 6, 0, 0} + var data int16 = 17 + instr := &encInstr{encInt, 6, nil, 0} state := newEncoderState(b) - instr.op(instr, state, unsafe.Pointer(&data)) + instr.op(instr, state, reflect.ValueOf(data)) if !bytes.Equal(signedResult, b.Bytes()) { t.Errorf("int16 enc instructions: expected % x got % x", signedResult, b.Bytes()) } @@ -212,10 +217,10 @@ func TestScalarEncInstructions(t *testing.T) { // uint16 { b.Reset() - data := struct{ a uint16 }{17} - instr := &encInstr{encUint16, 6, 0, 0} + var data uint16 = 17 + instr := &encInstr{encUint, 6, nil, 0} state := newEncoderState(b) - instr.op(instr, state, unsafe.Pointer(&data)) + instr.op(instr, state, reflect.ValueOf(data)) if !bytes.Equal(unsignedResult, b.Bytes()) { t.Errorf("uint16 enc instructions: expected % x got % x", unsignedResult, b.Bytes()) } @@ -224,10 +229,10 @@ func TestScalarEncInstructions(t *testing.T) { // int32 { b.Reset() - data := struct{ a int32 }{17} - instr := &encInstr{encInt32, 6, 0, 0} + var data int32 = 17 + instr := &encInstr{encInt, 6, nil, 0} state := newEncoderState(b) - instr.op(instr, state, unsafe.Pointer(&data)) + instr.op(instr, state, reflect.ValueOf(data)) if !bytes.Equal(signedResult, b.Bytes()) { t.Errorf("int32 enc instructions: expected % x got % x", signedResult, b.Bytes()) } @@ -236,10 +241,10 @@ func TestScalarEncInstructions(t *testing.T) { // uint32 { b.Reset() - data := struct{ a uint32 }{17} - instr := &encInstr{encUint32, 6, 0, 0} + var data uint32 = 17 + instr := &encInstr{encUint, 6, nil, 0} state := newEncoderState(b) - instr.op(instr, state, unsafe.Pointer(&data)) + instr.op(instr, state, reflect.ValueOf(data)) if !bytes.Equal(unsignedResult, b.Bytes()) { t.Errorf("uint32 enc instructions: expected % x got % x", unsignedResult, b.Bytes()) } @@ -248,10 +253,10 @@ func TestScalarEncInstructions(t *testing.T) { // int64 { b.Reset() - data := struct{ a int64 }{17} - instr := &encInstr{encInt64, 6, 0, 0} + var data int64 = 17 + instr := &encInstr{encInt, 6, nil, 0} state := newEncoderState(b) - instr.op(instr, state, unsafe.Pointer(&data)) + instr.op(instr, state, reflect.ValueOf(data)) if !bytes.Equal(signedResult, b.Bytes()) { t.Errorf("int64 enc instructions: expected % x got % x", signedResult, b.Bytes()) } @@ -260,10 +265,10 @@ func TestScalarEncInstructions(t *testing.T) { // uint64 { b.Reset() - data := struct{ a uint64 }{17} - instr := &encInstr{encUint64, 6, 0, 0} + var data uint64 = 17 + instr := &encInstr{encUint, 6, nil, 0} state := newEncoderState(b) - instr.op(instr, state, unsafe.Pointer(&data)) + instr.op(instr, state, reflect.ValueOf(data)) if !bytes.Equal(unsignedResult, b.Bytes()) { t.Errorf("uint64 enc instructions: expected % x got % x", unsignedResult, b.Bytes()) } @@ -272,10 +277,10 @@ func TestScalarEncInstructions(t *testing.T) { // float32 { b.Reset() - data := struct{ a float32 }{17} - instr := &encInstr{encFloat32, 6, 0, 0} + var data float32 = 17 + instr := &encInstr{encFloat, 6, nil, 0} state := newEncoderState(b) - instr.op(instr, state, unsafe.Pointer(&data)) + instr.op(instr, state, reflect.ValueOf(data)) if !bytes.Equal(floatResult, b.Bytes()) { t.Errorf("float32 enc instructions: expected % x got % x", floatResult, b.Bytes()) } @@ -284,10 +289,10 @@ func TestScalarEncInstructions(t *testing.T) { // float64 { b.Reset() - data := struct{ a float64 }{17} - instr := &encInstr{encFloat64, 6, 0, 0} + var data float64 = 17 + instr := &encInstr{encFloat, 6, nil, 0} state := newEncoderState(b) - instr.op(instr, state, unsafe.Pointer(&data)) + instr.op(instr, state, reflect.ValueOf(data)) if !bytes.Equal(floatResult, b.Bytes()) { t.Errorf("float64 enc instructions: expected % x got % x", floatResult, b.Bytes()) } @@ -296,10 +301,10 @@ func TestScalarEncInstructions(t *testing.T) { // bytes == []uint8 { b.Reset() - data := struct{ a []byte }{[]byte("hello")} - instr := &encInstr{encUint8Array, 6, 0, 0} + data := []byte("hello") + instr := &encInstr{encUint8Array, 6, nil, 0} state := newEncoderState(b) - instr.op(instr, state, unsafe.Pointer(&data)) + instr.op(instr, state, reflect.ValueOf(data)) if !bytes.Equal(bytesResult, b.Bytes()) { t.Errorf("bytes enc instructions: expected % x got % x", bytesResult, b.Bytes()) } @@ -308,28 +313,28 @@ func TestScalarEncInstructions(t *testing.T) { // string { b.Reset() - data := struct{ a string }{"hello"} - instr := &encInstr{encString, 6, 0, 0} + var data string = "hello" + instr := &encInstr{encString, 6, nil, 0} state := newEncoderState(b) - instr.op(instr, state, unsafe.Pointer(&data)) + instr.op(instr, state, reflect.ValueOf(data)) if !bytes.Equal(bytesResult, b.Bytes()) { t.Errorf("string enc instructions: expected % x got % x", bytesResult, b.Bytes()) } } } -func execDec(typ string, instr *decInstr, state *decoderState, t *testing.T, p unsafe.Pointer) { +func execDec(typ string, instr *decInstr, state *decoderState, t *testing.T, value reflect.Value) { defer testError(t) v := int(state.decodeUint()) if v+state.fieldnum != 6 { t.Fatalf("decoding field number %d, got %d", 6, v+state.fieldnum) } - instr.op(instr, state, decIndirect(p, instr.indir)) + instr.op(instr, state, value.Elem()) state.fieldnum = 6 } func newDecodeStateFromData(data []byte) *decoderState { - b := bytes.NewBuffer(data) + b := newDecBuffer(data) state := newDecodeState(b) state.fieldnum = -1 return state @@ -342,234 +347,198 @@ func TestScalarDecInstructions(t *testing.T) { // bool { - var data struct { - a bool - } - instr := &decInstr{decBool, 6, 0, 0, ovfl} + var data bool + instr := &decInstr{decBool, 6, nil, ovfl} state := newDecodeStateFromData(boolResult) - execDec("bool", instr, state, t, unsafe.Pointer(&data)) - if data.a != true { - t.Errorf("bool a = %v not true", data.a) + execDec("bool", instr, state, t, reflect.ValueOf(&data)) + if data != true { + t.Errorf("bool a = %v not true", data) } } // int { - var data struct { - a int - } - instr := &decInstr{decOpTable[reflect.Int], 6, 0, 0, ovfl} + var data int + instr := &decInstr{decOpTable[reflect.Int], 6, nil, ovfl} state := newDecodeStateFromData(signedResult) - execDec("int", instr, state, t, unsafe.Pointer(&data)) - if data.a != 17 { - t.Errorf("int a = %v not 17", data.a) + execDec("int", instr, state, t, reflect.ValueOf(&data)) + if data != 17 { + t.Errorf("int a = %v not 17", data) } } // uint { - var data struct { - a uint - } - instr := &decInstr{decOpTable[reflect.Uint], 6, 0, 0, ovfl} + var data uint + instr := &decInstr{decOpTable[reflect.Uint], 6, nil, ovfl} state := newDecodeStateFromData(unsignedResult) - execDec("uint", instr, state, t, unsafe.Pointer(&data)) - if data.a != 17 { - t.Errorf("uint a = %v not 17", data.a) + execDec("uint", instr, state, t, reflect.ValueOf(&data)) + if data != 17 { + t.Errorf("uint a = %v not 17", data) } } // int8 { - var data struct { - a int8 - } - instr := &decInstr{decInt8, 6, 0, 0, ovfl} + var data int8 + instr := &decInstr{decInt8, 6, nil, ovfl} state := newDecodeStateFromData(signedResult) - execDec("int8", instr, state, t, unsafe.Pointer(&data)) - if data.a != 17 { - t.Errorf("int8 a = %v not 17", data.a) + execDec("int8", instr, state, t, reflect.ValueOf(&data)) + if data != 17 { + t.Errorf("int8 a = %v not 17", data) } } // uint8 { - var data struct { - a uint8 - } - instr := &decInstr{decUint8, 6, 0, 0, ovfl} + var data uint8 + instr := &decInstr{decUint8, 6, nil, ovfl} state := newDecodeStateFromData(unsignedResult) - execDec("uint8", instr, state, t, unsafe.Pointer(&data)) - if data.a != 17 { - t.Errorf("uint8 a = %v not 17", data.a) + execDec("uint8", instr, state, t, reflect.ValueOf(&data)) + if data != 17 { + t.Errorf("uint8 a = %v not 17", data) } } // int16 { - var data struct { - a int16 - } - instr := &decInstr{decInt16, 6, 0, 0, ovfl} + var data int16 + instr := &decInstr{decInt16, 6, nil, ovfl} state := newDecodeStateFromData(signedResult) - execDec("int16", instr, state, t, unsafe.Pointer(&data)) - if data.a != 17 { - t.Errorf("int16 a = %v not 17", data.a) + execDec("int16", instr, state, t, reflect.ValueOf(&data)) + if data != 17 { + t.Errorf("int16 a = %v not 17", data) } } // uint16 { - var data struct { - a uint16 - } - instr := &decInstr{decUint16, 6, 0, 0, ovfl} + var data uint16 + instr := &decInstr{decUint16, 6, nil, ovfl} state := newDecodeStateFromData(unsignedResult) - execDec("uint16", instr, state, t, unsafe.Pointer(&data)) - if data.a != 17 { - t.Errorf("uint16 a = %v not 17", data.a) + execDec("uint16", instr, state, t, reflect.ValueOf(&data)) + if data != 17 { + t.Errorf("uint16 a = %v not 17", data) } } // int32 { - var data struct { - a int32 - } - instr := &decInstr{decInt32, 6, 0, 0, ovfl} + var data int32 + instr := &decInstr{decInt32, 6, nil, ovfl} state := newDecodeStateFromData(signedResult) - execDec("int32", instr, state, t, unsafe.Pointer(&data)) - if data.a != 17 { - t.Errorf("int32 a = %v not 17", data.a) + execDec("int32", instr, state, t, reflect.ValueOf(&data)) + if data != 17 { + t.Errorf("int32 a = %v not 17", data) } } // uint32 { - var data struct { - a uint32 - } - instr := &decInstr{decUint32, 6, 0, 0, ovfl} + var data uint32 + instr := &decInstr{decUint32, 6, nil, ovfl} state := newDecodeStateFromData(unsignedResult) - execDec("uint32", instr, state, t, unsafe.Pointer(&data)) - if data.a != 17 { - t.Errorf("uint32 a = %v not 17", data.a) + execDec("uint32", instr, state, t, reflect.ValueOf(&data)) + if data != 17 { + t.Errorf("uint32 a = %v not 17", data) } } // uintptr { - var data struct { - a uintptr - } - instr := &decInstr{decOpTable[reflect.Uintptr], 6, 0, 0, ovfl} + var data uintptr + instr := &decInstr{decOpTable[reflect.Uintptr], 6, nil, ovfl} state := newDecodeStateFromData(unsignedResult) - execDec("uintptr", instr, state, t, unsafe.Pointer(&data)) - if data.a != 17 { - t.Errorf("uintptr a = %v not 17", data.a) + execDec("uintptr", instr, state, t, reflect.ValueOf(&data)) + if data != 17 { + t.Errorf("uintptr a = %v not 17", data) } } // int64 { - var data struct { - a int64 - } - instr := &decInstr{decInt64, 6, 0, 0, ovfl} + var data int64 + instr := &decInstr{decInt64, 6, nil, ovfl} state := newDecodeStateFromData(signedResult) - execDec("int64", instr, state, t, unsafe.Pointer(&data)) - if data.a != 17 { - t.Errorf("int64 a = %v not 17", data.a) + execDec("int64", instr, state, t, reflect.ValueOf(&data)) + if data != 17 { + t.Errorf("int64 a = %v not 17", data) } } // uint64 { - var data struct { - a uint64 - } - instr := &decInstr{decUint64, 6, 0, 0, ovfl} + var data uint64 + instr := &decInstr{decUint64, 6, nil, ovfl} state := newDecodeStateFromData(unsignedResult) - execDec("uint64", instr, state, t, unsafe.Pointer(&data)) - if data.a != 17 { - t.Errorf("uint64 a = %v not 17", data.a) + execDec("uint64", instr, state, t, reflect.ValueOf(&data)) + if data != 17 { + t.Errorf("uint64 a = %v not 17", data) } } // float32 { - var data struct { - a float32 - } - instr := &decInstr{decFloat32, 6, 0, 0, ovfl} + var data float32 + instr := &decInstr{decFloat32, 6, nil, ovfl} state := newDecodeStateFromData(floatResult) - execDec("float32", instr, state, t, unsafe.Pointer(&data)) - if data.a != 17 { - t.Errorf("float32 a = %v not 17", data.a) + execDec("float32", instr, state, t, reflect.ValueOf(&data)) + if data != 17 { + t.Errorf("float32 a = %v not 17", data) } } // float64 { - var data struct { - a float64 - } - instr := &decInstr{decFloat64, 6, 0, 0, ovfl} + var data float64 + instr := &decInstr{decFloat64, 6, nil, ovfl} state := newDecodeStateFromData(floatResult) - execDec("float64", instr, state, t, unsafe.Pointer(&data)) - if data.a != 17 { - t.Errorf("float64 a = %v not 17", data.a) + execDec("float64", instr, state, t, reflect.ValueOf(&data)) + if data != 17 { + t.Errorf("float64 a = %v not 17", data) } } // complex64 { - var data struct { - a complex64 - } - instr := &decInstr{decOpTable[reflect.Complex64], 6, 0, 0, ovfl} + var data complex64 + instr := &decInstr{decOpTable[reflect.Complex64], 6, nil, ovfl} state := newDecodeStateFromData(complexResult) - execDec("complex", instr, state, t, unsafe.Pointer(&data)) - if data.a != 17+19i { - t.Errorf("complex a = %v not 17+19i", data.a) + execDec("complex", instr, state, t, reflect.ValueOf(&data)) + if data != 17+19i { + t.Errorf("complex a = %v not 17+19i", data) } } // complex128 { - var data struct { - a complex128 - } - instr := &decInstr{decOpTable[reflect.Complex128], 6, 0, 0, ovfl} + var data complex128 + instr := &decInstr{decOpTable[reflect.Complex128], 6, nil, ovfl} state := newDecodeStateFromData(complexResult) - execDec("complex", instr, state, t, unsafe.Pointer(&data)) - if data.a != 17+19i { - t.Errorf("complex a = %v not 17+19i", data.a) + execDec("complex", instr, state, t, reflect.ValueOf(&data)) + if data != 17+19i { + t.Errorf("complex a = %v not 17+19i", data) } } // bytes == []uint8 { - var data struct { - a []byte - } - instr := &decInstr{decUint8Slice, 6, 0, 0, ovfl} + var data []byte + instr := &decInstr{decUint8Slice, 6, nil, ovfl} state := newDecodeStateFromData(bytesResult) - execDec("bytes", instr, state, t, unsafe.Pointer(&data)) - if string(data.a) != "hello" { - t.Errorf(`bytes a = %q not "hello"`, string(data.a)) + execDec("bytes", instr, state, t, reflect.ValueOf(&data)) + if string(data) != "hello" { + t.Errorf(`bytes a = %q not "hello"`, string(data)) } } // string { - var data struct { - a string - } - instr := &decInstr{decString, 6, 0, 0, ovfl} + var data string + instr := &decInstr{decString, 6, nil, ovfl} state := newDecodeStateFromData(bytesResult) - execDec("bytes", instr, state, t, unsafe.Pointer(&data)) - if data.a != "hello" { - t.Errorf(`bytes a = %q not "hello"`, data.a) + execDec("bytes", instr, state, t, reflect.ValueOf(&data)) + if data != "hello" { + t.Errorf(`bytes a = %q not "hello"`, data) } } } diff --git a/libgo/go/encoding/gob/debug.go b/libgo/go/encoding/gob/debug.go index 6117eb08373..536bbdb5ac6 100644 --- a/libgo/go/encoding/gob/debug.go +++ b/libgo/go/encoding/gob/debug.go @@ -306,7 +306,7 @@ func (deb *debugger) common() CommonType { // Id typeId id = deb.typeId() default: - errorf("corrupted CommonType") + errorf("corrupted CommonType, delta is %d fieldNum is %d", delta, fieldNum) } } return CommonType{name, id} @@ -598,11 +598,11 @@ func (deb *debugger) printBuiltin(indent tab, id typeId) { fmt.Fprintf(os.Stderr, "%s%d\n", indent, x) case tFloat: x := deb.uint64() - fmt.Fprintf(os.Stderr, "%s%g\n", indent, floatFromBits(x)) + fmt.Fprintf(os.Stderr, "%s%g\n", indent, float64FromBits(x)) case tComplex: r := deb.uint64() i := deb.uint64() - fmt.Fprintf(os.Stderr, "%s%g+%gi\n", indent, floatFromBits(r), floatFromBits(i)) + fmt.Fprintf(os.Stderr, "%s%g+%gi\n", indent, float64FromBits(r), float64FromBits(i)) case tBytes: x := int(deb.uint64()) b := make([]byte, x) diff --git a/libgo/go/encoding/gob/dec_helpers.go b/libgo/go/encoding/gob/dec_helpers.go new file mode 100644 index 00000000000..a1b67661d8f --- /dev/null +++ b/libgo/go/encoding/gob/dec_helpers.go @@ -0,0 +1,468 @@ +// Created by decgen --output dec_helpers.go; DO NOT EDIT + +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gob + +import ( + "math" + "reflect" +) + +var decArrayHelper = map[reflect.Kind]decHelper{ + reflect.Bool: decBoolArray, + reflect.Complex64: decComplex64Array, + reflect.Complex128: decComplex128Array, + reflect.Float32: decFloat32Array, + reflect.Float64: decFloat64Array, + reflect.Int: decIntArray, + reflect.Int16: decInt16Array, + reflect.Int32: decInt32Array, + reflect.Int64: decInt64Array, + reflect.Int8: decInt8Array, + reflect.String: decStringArray, + reflect.Uint: decUintArray, + reflect.Uint16: decUint16Array, + reflect.Uint32: decUint32Array, + reflect.Uint64: decUint64Array, + reflect.Uintptr: decUintptrArray, +} + +var decSliceHelper = map[reflect.Kind]decHelper{ + reflect.Bool: decBoolSlice, + reflect.Complex64: decComplex64Slice, + reflect.Complex128: decComplex128Slice, + reflect.Float32: decFloat32Slice, + reflect.Float64: decFloat64Slice, + reflect.Int: decIntSlice, + reflect.Int16: decInt16Slice, + reflect.Int32: decInt32Slice, + reflect.Int64: decInt64Slice, + reflect.Int8: decInt8Slice, + reflect.String: decStringSlice, + reflect.Uint: decUintSlice, + reflect.Uint16: decUint16Slice, + reflect.Uint32: decUint32Slice, + reflect.Uint64: decUint64Slice, + reflect.Uintptr: decUintptrSlice, +} + +func decBoolArray(state *decoderState, v reflect.Value, length int, ovfl error) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return decBoolSlice(state, v.Slice(0, v.Len()), length, ovfl) +} + +func decBoolSlice(state *decoderState, v reflect.Value, length int, ovfl error) bool { + slice, ok := v.Interface().([]bool) + if !ok { + // It is kind bool but not type bool. TODO: We can handle this unsafely. + return false + } + for i := 0; i < length; i++ { + if state.b.Len() == 0 { + errorf("decoding bool array or slice: length exceeds input size (%d elements)", length) + } + slice[i] = state.decodeUint() != 0 + } + return true +} + +func decComplex64Array(state *decoderState, v reflect.Value, length int, ovfl error) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return decComplex64Slice(state, v.Slice(0, v.Len()), length, ovfl) +} + +func decComplex64Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool { + slice, ok := v.Interface().([]complex64) + if !ok { + // It is kind complex64 but not type complex64. TODO: We can handle this unsafely. + return false + } + for i := 0; i < length; i++ { + if state.b.Len() == 0 { + errorf("decoding complex64 array or slice: length exceeds input size (%d elements)", length) + } + real := float32FromBits(state.decodeUint(), ovfl) + imag := float32FromBits(state.decodeUint(), ovfl) + slice[i] = complex(float32(real), float32(imag)) + } + return true +} + +func decComplex128Array(state *decoderState, v reflect.Value, length int, ovfl error) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return decComplex128Slice(state, v.Slice(0, v.Len()), length, ovfl) +} + +func decComplex128Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool { + slice, ok := v.Interface().([]complex128) + if !ok { + // It is kind complex128 but not type complex128. TODO: We can handle this unsafely. + return false + } + for i := 0; i < length; i++ { + if state.b.Len() == 0 { + errorf("decoding complex128 array or slice: length exceeds input size (%d elements)", length) + } + real := float64FromBits(state.decodeUint()) + imag := float64FromBits(state.decodeUint()) + slice[i] = complex(real, imag) + } + return true +} + +func decFloat32Array(state *decoderState, v reflect.Value, length int, ovfl error) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return decFloat32Slice(state, v.Slice(0, v.Len()), length, ovfl) +} + +func decFloat32Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool { + slice, ok := v.Interface().([]float32) + if !ok { + // It is kind float32 but not type float32. TODO: We can handle this unsafely. + return false + } + for i := 0; i < length; i++ { + if state.b.Len() == 0 { + errorf("decoding float32 array or slice: length exceeds input size (%d elements)", length) + } + slice[i] = float32(float32FromBits(state.decodeUint(), ovfl)) + } + return true +} + +func decFloat64Array(state *decoderState, v reflect.Value, length int, ovfl error) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return decFloat64Slice(state, v.Slice(0, v.Len()), length, ovfl) +} + +func decFloat64Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool { + slice, ok := v.Interface().([]float64) + if !ok { + // It is kind float64 but not type float64. TODO: We can handle this unsafely. + return false + } + for i := 0; i < length; i++ { + if state.b.Len() == 0 { + errorf("decoding float64 array or slice: length exceeds input size (%d elements)", length) + } + slice[i] = float64FromBits(state.decodeUint()) + } + return true +} + +func decIntArray(state *decoderState, v reflect.Value, length int, ovfl error) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return decIntSlice(state, v.Slice(0, v.Len()), length, ovfl) +} + +func decIntSlice(state *decoderState, v reflect.Value, length int, ovfl error) bool { + slice, ok := v.Interface().([]int) + if !ok { + // It is kind int but not type int. TODO: We can handle this unsafely. + return false + } + for i := 0; i < length; i++ { + if state.b.Len() == 0 { + errorf("decoding int array or slice: length exceeds input size (%d elements)", length) + } + x := state.decodeInt() + // MinInt and MaxInt + if x < ^int64(^uint(0)>>1) || int64(^uint(0)>>1) < x { + error_(ovfl) + } + slice[i] = int(x) + } + return true +} + +func decInt16Array(state *decoderState, v reflect.Value, length int, ovfl error) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return decInt16Slice(state, v.Slice(0, v.Len()), length, ovfl) +} + +func decInt16Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool { + slice, ok := v.Interface().([]int16) + if !ok { + // It is kind int16 but not type int16. TODO: We can handle this unsafely. + return false + } + for i := 0; i < length; i++ { + if state.b.Len() == 0 { + errorf("decoding int16 array or slice: length exceeds input size (%d elements)", length) + } + x := state.decodeInt() + if x < math.MinInt16 || math.MaxInt16 < x { + error_(ovfl) + } + slice[i] = int16(x) + } + return true +} + +func decInt32Array(state *decoderState, v reflect.Value, length int, ovfl error) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return decInt32Slice(state, v.Slice(0, v.Len()), length, ovfl) +} + +func decInt32Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool { + slice, ok := v.Interface().([]int32) + if !ok { + // It is kind int32 but not type int32. TODO: We can handle this unsafely. + return false + } + for i := 0; i < length; i++ { + if state.b.Len() == 0 { + errorf("decoding int32 array or slice: length exceeds input size (%d elements)", length) + } + x := state.decodeInt() + if x < math.MinInt32 || math.MaxInt32 < x { + error_(ovfl) + } + slice[i] = int32(x) + } + return true +} + +func decInt64Array(state *decoderState, v reflect.Value, length int, ovfl error) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return decInt64Slice(state, v.Slice(0, v.Len()), length, ovfl) +} + +func decInt64Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool { + slice, ok := v.Interface().([]int64) + if !ok { + // It is kind int64 but not type int64. TODO: We can handle this unsafely. + return false + } + for i := 0; i < length; i++ { + if state.b.Len() == 0 { + errorf("decoding int64 array or slice: length exceeds input size (%d elements)", length) + } + slice[i] = state.decodeInt() + } + return true +} + +func decInt8Array(state *decoderState, v reflect.Value, length int, ovfl error) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return decInt8Slice(state, v.Slice(0, v.Len()), length, ovfl) +} + +func decInt8Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool { + slice, ok := v.Interface().([]int8) + if !ok { + // It is kind int8 but not type int8. TODO: We can handle this unsafely. + return false + } + for i := 0; i < length; i++ { + if state.b.Len() == 0 { + errorf("decoding int8 array or slice: length exceeds input size (%d elements)", length) + } + x := state.decodeInt() + if x < math.MinInt8 || math.MaxInt8 < x { + error_(ovfl) + } + slice[i] = int8(x) + } + return true +} + +func decStringArray(state *decoderState, v reflect.Value, length int, ovfl error) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return decStringSlice(state, v.Slice(0, v.Len()), length, ovfl) +} + +func decStringSlice(state *decoderState, v reflect.Value, length int, ovfl error) bool { + slice, ok := v.Interface().([]string) + if !ok { + // It is kind string but not type string. TODO: We can handle this unsafely. + return false + } + for i := 0; i < length; i++ { + if state.b.Len() == 0 { + errorf("decoding string array or slice: length exceeds input size (%d elements)", length) + } + u := state.decodeUint() + n := int(u) + if n < 0 || uint64(n) != u || n > state.b.Len() { + errorf("length of string exceeds input size (%d bytes)", u) + } + if n > state.b.Len() { + errorf("string data too long for buffer: %d", n) + } + // Read the data. + data := make([]byte, n) + if _, err := state.b.Read(data); err != nil { + errorf("error decoding string: %s", err) + } + slice[i] = string(data) + } + return true +} + +func decUintArray(state *decoderState, v reflect.Value, length int, ovfl error) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return decUintSlice(state, v.Slice(0, v.Len()), length, ovfl) +} + +func decUintSlice(state *decoderState, v reflect.Value, length int, ovfl error) bool { + slice, ok := v.Interface().([]uint) + if !ok { + // It is kind uint but not type uint. TODO: We can handle this unsafely. + return false + } + for i := 0; i < length; i++ { + if state.b.Len() == 0 { + errorf("decoding uint array or slice: length exceeds input size (%d elements)", length) + } + x := state.decodeUint() + /*TODO if math.MaxUint32 < x { + error_(ovfl) + }*/ + slice[i] = uint(x) + } + return true +} + +func decUint16Array(state *decoderState, v reflect.Value, length int, ovfl error) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return decUint16Slice(state, v.Slice(0, v.Len()), length, ovfl) +} + +func decUint16Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool { + slice, ok := v.Interface().([]uint16) + if !ok { + // It is kind uint16 but not type uint16. TODO: We can handle this unsafely. + return false + } + for i := 0; i < length; i++ { + if state.b.Len() == 0 { + errorf("decoding uint16 array or slice: length exceeds input size (%d elements)", length) + } + x := state.decodeUint() + if math.MaxUint16 < x { + error_(ovfl) + } + slice[i] = uint16(x) + } + return true +} + +func decUint32Array(state *decoderState, v reflect.Value, length int, ovfl error) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return decUint32Slice(state, v.Slice(0, v.Len()), length, ovfl) +} + +func decUint32Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool { + slice, ok := v.Interface().([]uint32) + if !ok { + // It is kind uint32 but not type uint32. TODO: We can handle this unsafely. + return false + } + for i := 0; i < length; i++ { + if state.b.Len() == 0 { + errorf("decoding uint32 array or slice: length exceeds input size (%d elements)", length) + } + x := state.decodeUint() + if math.MaxUint32 < x { + error_(ovfl) + } + slice[i] = uint32(x) + } + return true +} + +func decUint64Array(state *decoderState, v reflect.Value, length int, ovfl error) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return decUint64Slice(state, v.Slice(0, v.Len()), length, ovfl) +} + +func decUint64Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool { + slice, ok := v.Interface().([]uint64) + if !ok { + // It is kind uint64 but not type uint64. TODO: We can handle this unsafely. + return false + } + for i := 0; i < length; i++ { + if state.b.Len() == 0 { + errorf("decoding uint64 array or slice: length exceeds input size (%d elements)", length) + } + slice[i] = state.decodeUint() + } + return true +} + +func decUintptrArray(state *decoderState, v reflect.Value, length int, ovfl error) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return decUintptrSlice(state, v.Slice(0, v.Len()), length, ovfl) +} + +func decUintptrSlice(state *decoderState, v reflect.Value, length int, ovfl error) bool { + slice, ok := v.Interface().([]uintptr) + if !ok { + // It is kind uintptr but not type uintptr. TODO: We can handle this unsafely. + return false + } + for i := 0; i < length; i++ { + if state.b.Len() == 0 { + errorf("decoding uintptr array or slice: length exceeds input size (%d elements)", length) + } + x := state.decodeUint() + if uint64(^uintptr(0)) < x { + error_(ovfl) + } + slice[i] = uintptr(x) + } + return true +} diff --git a/libgo/go/encoding/gob/decgen.go b/libgo/go/encoding/gob/decgen.go new file mode 100644 index 00000000000..da41a899ed0 --- /dev/null +++ b/libgo/go/encoding/gob/decgen.go @@ -0,0 +1,240 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// encgen writes the helper functions for encoding. Intended to be +// used with go generate; see the invocation in encode.go. + +// TODO: We could do more by being unsafe. Add a -unsafe flag? + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "log" + "os" +) + +var output = flag.String("output", "dec_helpers.go", "file name to write") + +type Type struct { + lower string + upper string + decoder string +} + +var types = []Type{ + { + "bool", + "Bool", + `slice[i] = state.decodeUint() != 0`, + }, + { + "complex64", + "Complex64", + `real := float32FromBits(state.decodeUint(), ovfl) + imag := float32FromBits(state.decodeUint(), ovfl) + slice[i] = complex(float32(real), float32(imag))`, + }, + { + "complex128", + "Complex128", + `real := float64FromBits(state.decodeUint()) + imag := float64FromBits(state.decodeUint()) + slice[i] = complex(real, imag)`, + }, + { + "float32", + "Float32", + `slice[i] = float32(float32FromBits(state.decodeUint(), ovfl))`, + }, + { + "float64", + "Float64", + `slice[i] = float64FromBits(state.decodeUint())`, + }, + { + "int", + "Int", + `x := state.decodeInt() + // MinInt and MaxInt + if x < ^int64(^uint(0)>>1) || int64(^uint(0)>>1) < x { + error_(ovfl) + } + slice[i] = int(x)`, + }, + { + "int16", + "Int16", + `x := state.decodeInt() + if x < math.MinInt16 || math.MaxInt16 < x { + error_(ovfl) + } + slice[i] = int16(x)`, + }, + { + "int32", + "Int32", + `x := state.decodeInt() + if x < math.MinInt32 || math.MaxInt32 < x { + error_(ovfl) + } + slice[i] = int32(x)`, + }, + { + "int64", + "Int64", + `slice[i] = state.decodeInt()`, + }, + { + "int8", + "Int8", + `x := state.decodeInt() + if x < math.MinInt8 || math.MaxInt8 < x { + error_(ovfl) + } + slice[i] = int8(x)`, + }, + { + "string", + "String", + `u := state.decodeUint() + n := int(u) + if n < 0 || uint64(n) != u || n > state.b.Len() { + errorf("length of string exceeds input size (%d bytes)", u) + } + if n > state.b.Len() { + errorf("string data too long for buffer: %d", n) + } + // Read the data. + data := make([]byte, n) + if _, err := state.b.Read(data); err != nil { + errorf("error decoding string: %s", err) + } + slice[i] = string(data)`, + }, + { + "uint", + "Uint", + `x := state.decodeUint() + /*TODO if math.MaxUint32 < x { + error_(ovfl) + }*/ + slice[i] = uint(x)`, + }, + { + "uint16", + "Uint16", + `x := state.decodeUint() + if math.MaxUint16 < x { + error_(ovfl) + } + slice[i] = uint16(x)`, + }, + { + "uint32", + "Uint32", + `x := state.decodeUint() + if math.MaxUint32 < x { + error_(ovfl) + } + slice[i] = uint32(x)`, + }, + { + "uint64", + "Uint64", + `slice[i] = state.decodeUint()`, + }, + { + "uintptr", + "Uintptr", + `x := state.decodeUint() + if uint64(^uintptr(0)) < x { + error_(ovfl) + } + slice[i] = uintptr(x)`, + }, + // uint8 Handled separately. +} + +func main() { + log.SetFlags(0) + log.SetPrefix("decgen: ") + flag.Parse() + if flag.NArg() != 0 { + log.Fatal("usage: decgen [--output filename]") + } + var b bytes.Buffer + fmt.Fprintf(&b, "// Created by decgen --output %s; DO NOT EDIT\n", *output) + fmt.Fprint(&b, header) + printMaps(&b, "Array") + fmt.Fprint(&b, "\n") + printMaps(&b, "Slice") + for _, t := range types { + fmt.Fprintf(&b, arrayHelper, t.lower, t.upper) + fmt.Fprintf(&b, sliceHelper, t.lower, t.upper, t.decoder) + } + source, err := format.Source(b.Bytes()) + if err != nil { + log.Fatal("source format error:", err) + } + fd, err := os.Create(*output) + _, err = fd.Write(source) + if err != nil { + log.Fatal(err) + } +} + +func printMaps(b *bytes.Buffer, upperClass string) { + fmt.Fprintf(b, "var dec%sHelper = map[reflect.Kind]decHelper{\n", upperClass) + for _, t := range types { + fmt.Fprintf(b, "reflect.%s: dec%s%s,\n", t.upper, t.upper, upperClass) + } + fmt.Fprintf(b, "}\n") +} + +const header = ` +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gob + +import ( + "math" + "reflect" +) + +` + +const arrayHelper = ` +func dec%[2]sArray(state *decoderState, v reflect.Value, length int, ovfl error) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return dec%[2]sSlice(state, v.Slice(0, v.Len()), length, ovfl) +} +` + +const sliceHelper = ` +func dec%[2]sSlice(state *decoderState, v reflect.Value, length int, ovfl error) bool { + slice, ok := v.Interface().([]%[1]s) + if !ok { + // It is kind %[1]s but not type %[1]s. TODO: We can handle this unsafely. + return false + } + for i := 0; i < length; i++ { + if state.b.Len() == 0 { + errorf("decoding %[1]s array or slice: length exceeds input size (%%d elements)", length) + } + %[3]s + } + return true +} +` diff --git a/libgo/go/encoding/gob/decode.go b/libgo/go/encoding/gob/decode.go index d8513148ec2..a5bef93141b 100644 --- a/libgo/go/encoding/gob/decode.go +++ b/libgo/go/encoding/gob/decode.go @@ -2,19 +2,16 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package gob +//go:generate go run decgen.go -output dec_helpers.go -// TODO(rsc): When garbage collector changes, revisit -// the allocations in this file that use unsafe.Pointer. +package gob import ( - "bytes" "encoding" "errors" "io" "math" "reflect" - "unsafe" ) var ( @@ -23,21 +20,79 @@ var ( errRange = errors.New("gob: bad data: field numbers out of bounds") ) +type decHelper func(state *decoderState, v reflect.Value, length int, ovfl error) bool + // decoderState is the execution state of an instance of the decoder. A new state // is created for nested objects. type decoderState struct { dec *Decoder // The buffer is stored with an extra indirection because it may be replaced // if we load a type during decode (when reading an interface value). - b *bytes.Buffer + b *decBuffer fieldnum int // the last field number read. buf []byte next *decoderState // for free list } +// decBuffer is an extremely simple, fast implementation of a read-only byte buffer. +// It is initialized by calling Size and then copying the data into the slice returned by Bytes(). +type decBuffer struct { + data []byte + offset int // Read offset. +} + +func (d *decBuffer) Read(p []byte) (int, error) { + n := copy(p, d.data[d.offset:]) + if n == 0 && len(p) != 0 { + return 0, io.EOF + } + d.offset += n + return n, nil +} + +func (d *decBuffer) Drop(n int) { + if n > d.Len() { + panic("drop") + } + d.offset += n +} + +// Size grows the buffer to exactly n bytes, so d.Bytes() will +// return a slice of length n. Existing data is first discarded. +func (d *decBuffer) Size(n int) { + d.Reset() + if cap(d.data) < n { + d.data = make([]byte, n) + } else { + d.data = d.data[0:n] + } +} + +func (d *decBuffer) ReadByte() (byte, error) { + if d.offset >= len(d.data) { + return 0, io.EOF + } + c := d.data[d.offset] + d.offset++ + return c, nil +} + +func (d *decBuffer) Len() int { + return len(d.data) - d.offset +} + +func (d *decBuffer) Bytes() []byte { + return d.data[d.offset:] +} + +func (d *decBuffer) Reset() { + d.data = d.data[0:0] + d.offset = 0 +} + // We pass the bytes.Buffer separately for easier testing of the infrastructure // without requiring a full Decoder. -func (dec *Decoder) newDecoderState(buf *bytes.Buffer) *decoderState { +func (dec *Decoder) newDecoderState(buf *decBuffer) *decoderState { d := dec.freeList if d == nil { d = new(decoderState) @@ -128,175 +183,118 @@ func (state *decoderState) decodeInt() int64 { } // decOp is the signature of a decoding operator for a given type. -type decOp func(i *decInstr, state *decoderState, p unsafe.Pointer) +type decOp func(i *decInstr, state *decoderState, v reflect.Value) // The 'instructions' of the decoding machine type decInstr struct { - op decOp - field int // field number of the wire type - indir int // how many pointer indirections to reach the value in the struct - offset uintptr // offset in the structure of the field to encode - ovfl error // error message for overflow/underflow (for arrays, of the elements) -} - -// Since the encoder writes no zeros, if we arrive at a decoder we have -// a value to extract and store. The field number has already been read -// (it's how we knew to call this decoder). -// Each decoder is responsible for handling any indirections associated -// with the data structure. If any pointer so reached is nil, allocation must -// be done. - -// Walk the pointer hierarchy, allocating if we find a nil. Stop one before the end. -func decIndirect(p unsafe.Pointer, indir int) unsafe.Pointer { - for ; indir > 1; indir-- { - if *(*unsafe.Pointer)(p) == nil { - // Allocation required - *(*unsafe.Pointer)(p) = unsafe.Pointer(new(unsafe.Pointer)) - } - p = *(*unsafe.Pointer)(p) - } - return p + op decOp + field int // field number of the wire type + index []int // field access indices for destination type + ovfl error // error message for overflow/underflow (for arrays, of the elements) } // ignoreUint discards a uint value with no destination. -func ignoreUint(i *decInstr, state *decoderState, p unsafe.Pointer) { +func ignoreUint(i *decInstr, state *decoderState, v reflect.Value) { state.decodeUint() } // ignoreTwoUints discards a uint value with no destination. It's used to skip // complex values. -func ignoreTwoUints(i *decInstr, state *decoderState, p unsafe.Pointer) { +func ignoreTwoUints(i *decInstr, state *decoderState, v reflect.Value) { state.decodeUint() state.decodeUint() } -// decBool decodes a uint and stores it as a boolean through p. -func decBool(i *decInstr, state *decoderState, p unsafe.Pointer) { - if i.indir > 0 { - if *(*unsafe.Pointer)(p) == nil { - *(*unsafe.Pointer)(p) = unsafe.Pointer(new(bool)) +// Since the encoder writes no zeros, if we arrive at a decoder we have +// a value to extract and store. The field number has already been read +// (it's how we knew to call this decoder). +// Each decoder is responsible for handling any indirections associated +// with the data structure. If any pointer so reached is nil, allocation must +// be done. + +// decAlloc takes a value and returns a settable value that can +// be assigned to. If the value is a pointer, decAlloc guarantees it points to storage. +// The callers to the individual decoders are expected to have used decAlloc. +// The individual decoders don't need to it. +func decAlloc(v reflect.Value) reflect.Value { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) } - p = *(*unsafe.Pointer)(p) + v = v.Elem() } - *(*bool)(p) = state.decodeUint() != 0 + return v } -// decInt8 decodes an integer and stores it as an int8 through p. -func decInt8(i *decInstr, state *decoderState, p unsafe.Pointer) { - if i.indir > 0 { - if *(*unsafe.Pointer)(p) == nil { - *(*unsafe.Pointer)(p) = unsafe.Pointer(new(int8)) - } - p = *(*unsafe.Pointer)(p) - } +// decBool decodes a uint and stores it as a boolean in value. +func decBool(i *decInstr, state *decoderState, value reflect.Value) { + value.SetBool(state.decodeUint() != 0) +} + +// decInt8 decodes an integer and stores it as an int8 in value. +func decInt8(i *decInstr, state *decoderState, value reflect.Value) { v := state.decodeInt() if v < math.MinInt8 || math.MaxInt8 < v { error_(i.ovfl) - } else { - *(*int8)(p) = int8(v) } + value.SetInt(v) } -// decUint8 decodes an unsigned integer and stores it as a uint8 through p. -func decUint8(i *decInstr, state *decoderState, p unsafe.Pointer) { - if i.indir > 0 { - if *(*unsafe.Pointer)(p) == nil { - *(*unsafe.Pointer)(p) = unsafe.Pointer(new(uint8)) - } - p = *(*unsafe.Pointer)(p) - } +// decUint8 decodes an unsigned integer and stores it as a uint8 in value. +func decUint8(i *decInstr, state *decoderState, value reflect.Value) { v := state.decodeUint() if math.MaxUint8 < v { error_(i.ovfl) - } else { - *(*uint8)(p) = uint8(v) } + value.SetUint(v) } -// decInt16 decodes an integer and stores it as an int16 through p. -func decInt16(i *decInstr, state *decoderState, p unsafe.Pointer) { - if i.indir > 0 { - if *(*unsafe.Pointer)(p) == nil { - *(*unsafe.Pointer)(p) = unsafe.Pointer(new(int16)) - } - p = *(*unsafe.Pointer)(p) - } +// decInt16 decodes an integer and stores it as an int16 in value. +func decInt16(i *decInstr, state *decoderState, value reflect.Value) { v := state.decodeInt() if v < math.MinInt16 || math.MaxInt16 < v { error_(i.ovfl) - } else { - *(*int16)(p) = int16(v) } + value.SetInt(v) } -// decUint16 decodes an unsigned integer and stores it as a uint16 through p. -func decUint16(i *decInstr, state *decoderState, p unsafe.Pointer) { - if i.indir > 0 { - if *(*unsafe.Pointer)(p) == nil { - *(*unsafe.Pointer)(p) = unsafe.Pointer(new(uint16)) - } - p = *(*unsafe.Pointer)(p) - } +// decUint16 decodes an unsigned integer and stores it as a uint16 in value. +func decUint16(i *decInstr, state *decoderState, value reflect.Value) { v := state.decodeUint() if math.MaxUint16 < v { error_(i.ovfl) - } else { - *(*uint16)(p) = uint16(v) } + value.SetUint(v) } -// decInt32 decodes an integer and stores it as an int32 through p. -func decInt32(i *decInstr, state *decoderState, p unsafe.Pointer) { - if i.indir > 0 { - if *(*unsafe.Pointer)(p) == nil { - *(*unsafe.Pointer)(p) = unsafe.Pointer(new(int32)) - } - p = *(*unsafe.Pointer)(p) - } +// decInt32 decodes an integer and stores it as an int32 in value. +func decInt32(i *decInstr, state *decoderState, value reflect.Value) { v := state.decodeInt() if v < math.MinInt32 || math.MaxInt32 < v { error_(i.ovfl) - } else { - *(*int32)(p) = int32(v) } + value.SetInt(v) } -// decUint32 decodes an unsigned integer and stores it as a uint32 through p. -func decUint32(i *decInstr, state *decoderState, p unsafe.Pointer) { - if i.indir > 0 { - if *(*unsafe.Pointer)(p) == nil { - *(*unsafe.Pointer)(p) = unsafe.Pointer(new(uint32)) - } - p = *(*unsafe.Pointer)(p) - } +// decUint32 decodes an unsigned integer and stores it as a uint32 in value. +func decUint32(i *decInstr, state *decoderState, value reflect.Value) { v := state.decodeUint() if math.MaxUint32 < v { error_(i.ovfl) - } else { - *(*uint32)(p) = uint32(v) } + value.SetUint(v) } -// decInt64 decodes an integer and stores it as an int64 through p. -func decInt64(i *decInstr, state *decoderState, p unsafe.Pointer) { - if i.indir > 0 { - if *(*unsafe.Pointer)(p) == nil { - *(*unsafe.Pointer)(p) = unsafe.Pointer(new(int64)) - } - p = *(*unsafe.Pointer)(p) - } - *(*int64)(p) = int64(state.decodeInt()) +// decInt64 decodes an integer and stores it as an int64 in value. +func decInt64(i *decInstr, state *decoderState, value reflect.Value) { + v := state.decodeInt() + value.SetInt(v) } -// decUint64 decodes an unsigned integer and stores it as a uint64 through p. -func decUint64(i *decInstr, state *decoderState, p unsafe.Pointer) { - if i.indir > 0 { - if *(*unsafe.Pointer)(p) == nil { - *(*unsafe.Pointer)(p) = unsafe.Pointer(new(uint64)) - } - p = *(*unsafe.Pointer)(p) - } - *(*uint64)(p) = uint64(state.decodeUint()) +// decUint64 decodes an unsigned integer and stores it as a uint64 in value. +func decUint64(i *decInstr, state *decoderState, value reflect.Value) { + v := state.decodeUint() + value.SetUint(v) } // Floating-point numbers are transmitted as uint64s holding the bits @@ -304,7 +302,7 @@ func decUint64(i *decInstr, state *decoderState, p unsafe.Pointer) { // the exponent end coming out first, so integer floating point numbers // (for example) transmit more compactly. This routine does the // unswizzling. -func floatFromBits(u uint64) float64 { +func float64FromBits(u uint64) float64 { var v uint64 for i := 0; i < 8; i++ { v <<= 8 @@ -314,128 +312,100 @@ func floatFromBits(u uint64) float64 { return math.Float64frombits(v) } -// storeFloat32 decodes an unsigned integer, treats it as a 32-bit floating-point -// number, and stores it through p. It's a helper function for float32 and complex64. -func storeFloat32(i *decInstr, state *decoderState, p unsafe.Pointer) { - v := floatFromBits(state.decodeUint()) +// float32FromBits decodes an unsigned integer, treats it as a 32-bit floating-point +// number, and returns it. It's a helper function for float32 and complex64. +// It returns a float64 because that's what reflection needs, but its return +// value is known to be accurately representable in a float32. +func float32FromBits(u uint64, ovfl error) float64 { + v := float64FromBits(u) av := v if av < 0 { av = -av } // +Inf is OK in both 32- and 64-bit floats. Underflow is always OK. if math.MaxFloat32 < av && av <= math.MaxFloat64 { - error_(i.ovfl) - } else { - *(*float32)(p) = float32(v) + error_(ovfl) } + return v } // decFloat32 decodes an unsigned integer, treats it as a 32-bit floating-point -// number, and stores it through p. -func decFloat32(i *decInstr, state *decoderState, p unsafe.Pointer) { - if i.indir > 0 { - if *(*unsafe.Pointer)(p) == nil { - *(*unsafe.Pointer)(p) = unsafe.Pointer(new(float32)) - } - p = *(*unsafe.Pointer)(p) - } - storeFloat32(i, state, p) +// number, and stores it in value. +func decFloat32(i *decInstr, state *decoderState, value reflect.Value) { + value.SetFloat(float32FromBits(state.decodeUint(), i.ovfl)) } // decFloat64 decodes an unsigned integer, treats it as a 64-bit floating-point -// number, and stores it through p. -func decFloat64(i *decInstr, state *decoderState, p unsafe.Pointer) { - if i.indir > 0 { - if *(*unsafe.Pointer)(p) == nil { - *(*unsafe.Pointer)(p) = unsafe.Pointer(new(float64)) - } - p = *(*unsafe.Pointer)(p) - } - *(*float64)(p) = floatFromBits(uint64(state.decodeUint())) +// number, and stores it in value. +func decFloat64(i *decInstr, state *decoderState, value reflect.Value) { + value.SetFloat(float64FromBits(state.decodeUint())) } // decComplex64 decodes a pair of unsigned integers, treats them as a -// pair of floating point numbers, and stores them as a complex64 through p. +// pair of floating point numbers, and stores them as a complex64 in value. // The real part comes first. -func decComplex64(i *decInstr, state *decoderState, p unsafe.Pointer) { - if i.indir > 0 { - if *(*unsafe.Pointer)(p) == nil { - *(*unsafe.Pointer)(p) = unsafe.Pointer(new(complex64)) - } - p = *(*unsafe.Pointer)(p) - } - storeFloat32(i, state, p) - storeFloat32(i, state, unsafe.Pointer(uintptr(p)+unsafe.Sizeof(float32(0)))) +func decComplex64(i *decInstr, state *decoderState, value reflect.Value) { + real := float32FromBits(state.decodeUint(), i.ovfl) + imag := float32FromBits(state.decodeUint(), i.ovfl) + value.SetComplex(complex(real, imag)) } // decComplex128 decodes a pair of unsigned integers, treats them as a -// pair of floating point numbers, and stores them as a complex128 through p. +// pair of floating point numbers, and stores them as a complex128 in value. // The real part comes first. -func decComplex128(i *decInstr, state *decoderState, p unsafe.Pointer) { - if i.indir > 0 { - if *(*unsafe.Pointer)(p) == nil { - *(*unsafe.Pointer)(p) = unsafe.Pointer(new(complex128)) - } - p = *(*unsafe.Pointer)(p) - } - real := floatFromBits(uint64(state.decodeUint())) - imag := floatFromBits(uint64(state.decodeUint())) - *(*complex128)(p) = complex(real, imag) +func decComplex128(i *decInstr, state *decoderState, value reflect.Value) { + real := float64FromBits(state.decodeUint()) + imag := float64FromBits(state.decodeUint()) + value.SetComplex(complex(real, imag)) } -// decUint8Slice decodes a byte slice and stores through p a slice header +// decUint8Slice decodes a byte slice and stores in value a slice header // describing the data. // uint8 slices are encoded as an unsigned count followed by the raw bytes. -func decUint8Slice(i *decInstr, state *decoderState, p unsafe.Pointer) { - if i.indir > 0 { - if *(*unsafe.Pointer)(p) == nil { - *(*unsafe.Pointer)(p) = unsafe.Pointer(new([]uint8)) - } - p = *(*unsafe.Pointer)(p) +func decUint8Slice(i *decInstr, state *decoderState, value reflect.Value) { + u := state.decodeUint() + n := int(u) + if n < 0 || uint64(n) != u { + errorf("length of %s exceeds input size (%d bytes)", value.Type(), u) + } + if n > state.b.Len() { + errorf("%s data too long for buffer: %d", value.Type(), n) } - n := state.decodeUint() - if n > uint64(state.b.Len()) { - errorf("length of []byte exceeds input size (%d bytes)", n) + if n > tooBig { + errorf("byte slice too big: %d", n) } - slice := (*[]uint8)(p) - if uint64(cap(*slice)) < n { - *slice = make([]uint8, n) + if value.Cap() < n { + value.Set(reflect.MakeSlice(value.Type(), n, n)) } else { - *slice = (*slice)[0:n] + value.Set(value.Slice(0, n)) } - if _, err := state.b.Read(*slice); err != nil { + if _, err := state.b.Read(value.Bytes()); err != nil { errorf("error decoding []byte: %s", err) } } -// decString decodes byte array and stores through p a string header +// decString decodes byte array and stores in value a string header // describing the data. // Strings are encoded as an unsigned count followed by the raw bytes. -func decString(i *decInstr, state *decoderState, p unsafe.Pointer) { - if i.indir > 0 { - if *(*unsafe.Pointer)(p) == nil { - *(*unsafe.Pointer)(p) = unsafe.Pointer(new(string)) - } - p = *(*unsafe.Pointer)(p) +func decString(i *decInstr, state *decoderState, value reflect.Value) { + u := state.decodeUint() + n := int(u) + if n < 0 || uint64(n) != u || n > state.b.Len() { + errorf("length of %s exceeds input size (%d bytes)", value.Type(), u) } - n := state.decodeUint() - if n > uint64(state.b.Len()) { - errorf("string length exceeds input size (%d bytes)", n) + if n > state.b.Len() { + errorf("%s data too long for buffer: %d", value.Type(), n) } - b := make([]byte, n) - state.b.Read(b) - // It would be a shame to do the obvious thing here, - // *(*string)(p) = string(b) - // because we've already allocated the storage and this would - // allocate again and copy. So we do this ugly hack, which is even - // even more unsafe than it looks as it depends the memory - // representation of a string matching the beginning of the memory - // representation of a byte slice (a byte slice is longer). - *(*string)(p) = *(*string)(unsafe.Pointer(&b)) + // Read the data. + data := make([]byte, n) + if _, err := state.b.Read(data); err != nil { + errorf("error decoding string: %s", err) + } + value.SetString(string(data)) } // ignoreUint8Array skips over the data for a byte slice value with no destination. -func ignoreUint8Array(i *decInstr, state *decoderState, p unsafe.Pointer) { +func ignoreUint8Array(i *decInstr, state *decoderState, value reflect.Value) { b := make([]byte, state.decodeUint()) state.b.Read(b) } @@ -449,55 +419,29 @@ type decEngine struct { numInstr int // the number of active instructions } -// allocate makes sure storage is available for an object of underlying type rtyp -// that is indir levels of indirection through p. -func allocate(rtyp reflect.Type, p unsafe.Pointer, indir int) unsafe.Pointer { - if indir == 0 { - return p - } - up := p - if indir > 1 { - up = decIndirect(up, indir) - } - if *(*unsafe.Pointer)(up) == nil { - // Allocate object. - *(*unsafe.Pointer)(up) = unsafe.Pointer(reflect.New(rtyp).Pointer()) - } - return *(*unsafe.Pointer)(up) -} - -// decodeSingle decodes a top-level value that is not a struct and stores it through p. +// decodeSingle decodes a top-level value that is not a struct and stores it in value. // Such values are preceded by a zero, making them have the memory layout of a // struct field (although with an illegal field number). -func (dec *Decoder) decodeSingle(engine *decEngine, ut *userTypeInfo, basep unsafe.Pointer) { +func (dec *Decoder) decodeSingle(engine *decEngine, ut *userTypeInfo, value reflect.Value) { state := dec.newDecoderState(&dec.buf) + defer dec.freeDecoderState(state) state.fieldnum = singletonField - delta := int(state.decodeUint()) - if delta != 0 { + if state.decodeUint() != 0 { errorf("decode: corrupted data: non-zero delta for singleton") } instr := &engine.instr[singletonField] - if instr.indir != ut.indir { - errorf("internal error: inconsistent indirection instr %d ut %d", instr.indir, ut.indir) - } - ptr := basep // offset will be zero - if instr.indir > 1 { - ptr = decIndirect(ptr, instr.indir) - } - instr.op(instr, state, ptr) - dec.freeDecoderState(state) + instr.op(instr, state, value) } -// decodeStruct decodes a top-level struct and stores it through p. +// decodeStruct decodes a top-level struct and stores it in value. // Indir is for the value, not the type. At the time of the call it may // differ from ut.indir, which was computed when the engine was built. // This state cannot arise for decodeSingle, which is called directly // from the user's value, not from the innards of an engine. -func (dec *Decoder) decodeStruct(engine *decEngine, ut *userTypeInfo, p unsafe.Pointer, indir int) { - p = allocate(ut.base, p, indir) +func (dec *Decoder) decodeStruct(engine *decEngine, ut *userTypeInfo, value reflect.Value) { state := dec.newDecoderState(&dec.buf) + defer dec.freeDecoderState(state) state.fieldnum = -1 - basep := p for state.b.Len() > 0 { delta := int(state.decodeUint()) if delta < 0 { @@ -512,19 +456,25 @@ func (dec *Decoder) decodeStruct(engine *decEngine, ut *userTypeInfo, p unsafe.P break } instr := &engine.instr[fieldnum] - p := unsafe.Pointer(uintptr(basep) + instr.offset) - if instr.indir > 1 { - p = decIndirect(p, instr.indir) + var field reflect.Value + if instr.index != nil { + // Otherwise the field is unknown to us and instr.op is an ignore op. + field = value.FieldByIndex(instr.index) + if field.Kind() == reflect.Ptr { + field = decAlloc(field) + } } - instr.op(instr, state, p) + instr.op(instr, state, field) state.fieldnum = fieldnum } - dec.freeDecoderState(state) } +var noValue reflect.Value + // ignoreStruct discards the data for a struct with no destination. func (dec *Decoder) ignoreStruct(engine *decEngine) { state := dec.newDecoderState(&dec.buf) + defer dec.freeDecoderState(state) state.fieldnum = -1 for state.b.Len() > 0 { delta := int(state.decodeUint()) @@ -539,97 +489,89 @@ func (dec *Decoder) ignoreStruct(engine *decEngine) { error_(errRange) } instr := &engine.instr[fieldnum] - instr.op(instr, state, unsafe.Pointer(nil)) + instr.op(instr, state, noValue) state.fieldnum = fieldnum } - dec.freeDecoderState(state) } // ignoreSingle discards the data for a top-level non-struct value with no // destination. It's used when calling Decode with a nil value. func (dec *Decoder) ignoreSingle(engine *decEngine) { state := dec.newDecoderState(&dec.buf) + defer dec.freeDecoderState(state) state.fieldnum = singletonField delta := int(state.decodeUint()) if delta != 0 { errorf("decode: corrupted data: non-zero delta for singleton") } instr := &engine.instr[singletonField] - instr.op(instr, state, unsafe.Pointer(nil)) - dec.freeDecoderState(state) + instr.op(instr, state, noValue) } // decodeArrayHelper does the work for decoding arrays and slices. -func (dec *Decoder) decodeArrayHelper(state *decoderState, p unsafe.Pointer, elemOp decOp, elemWid uintptr, length, elemIndir int, ovfl error) { - instr := &decInstr{elemOp, 0, elemIndir, 0, ovfl} +func (dec *Decoder) decodeArrayHelper(state *decoderState, value reflect.Value, elemOp decOp, length int, ovfl error, helper decHelper) { + if helper != nil && helper(state, value, length, ovfl) { + return + } + instr := &decInstr{elemOp, 0, nil, ovfl} + isPtr := value.Type().Elem().Kind() == reflect.Ptr for i := 0; i < length; i++ { if state.b.Len() == 0 { errorf("decoding array or slice: length exceeds input size (%d elements)", length) } - up := p - if elemIndir > 1 { - up = decIndirect(up, elemIndir) + v := value.Index(i) + if isPtr { + v = decAlloc(v) } - elemOp(instr, state, up) - p = unsafe.Pointer(uintptr(p) + elemWid) + elemOp(instr, state, v) } } -// decodeArray decodes an array and stores it through p, that is, p points to the zeroth element. +// decodeArray decodes an array and stores it in value. // The length is an unsigned integer preceding the elements. Even though the length is redundant // (it's part of the type), it's a useful check and is included in the encoding. -func (dec *Decoder) decodeArray(atyp reflect.Type, state *decoderState, p unsafe.Pointer, elemOp decOp, elemWid uintptr, length, indir, elemIndir int, ovfl error) { - if indir > 0 { - p = allocate(atyp, p, 1) // All but the last level has been allocated by dec.Indirect - } +func (dec *Decoder) decodeArray(atyp reflect.Type, state *decoderState, value reflect.Value, elemOp decOp, length int, ovfl error, helper decHelper) { if n := state.decodeUint(); n != uint64(length) { errorf("length mismatch in decodeArray") } - dec.decodeArrayHelper(state, p, elemOp, elemWid, length, elemIndir, ovfl) + dec.decodeArrayHelper(state, value, elemOp, length, ovfl, helper) } -// decodeIntoValue is a helper for map decoding. Since maps are decoded using reflection, -// unlike the other items we can't use a pointer directly. -func decodeIntoValue(state *decoderState, op decOp, indir int, v reflect.Value, ovfl error) reflect.Value { - instr := &decInstr{op, 0, indir, 0, ovfl} - up := unsafeAddr(v) - if indir > 1 { - up = decIndirect(up, indir) +// decodeIntoValue is a helper for map decoding. +func decodeIntoValue(state *decoderState, op decOp, isPtr bool, value reflect.Value, ovfl error) reflect.Value { + instr := &decInstr{op, 0, nil, ovfl} + v := value + if isPtr { + v = decAlloc(value) } - op(instr, state, up) - return v + op(instr, state, v) + return value } -// decodeMap decodes a map and stores its header through p. +// decodeMap decodes a map and stores it in value. // Maps are encoded as a length followed by key:value pairs. // Because the internals of maps are not visible to us, we must // use reflection rather than pointer magic. -func (dec *Decoder) decodeMap(mtyp reflect.Type, state *decoderState, p unsafe.Pointer, keyOp, elemOp decOp, indir, keyIndir, elemIndir int, ovfl error) { - if indir > 0 { - p = allocate(mtyp, p, 1) // All but the last level has been allocated by dec.Indirect - } - up := unsafe.Pointer(p) - if *(*unsafe.Pointer)(up) == nil { // maps are represented as a pointer in the runtime +func (dec *Decoder) decodeMap(mtyp reflect.Type, state *decoderState, value reflect.Value, keyOp, elemOp decOp, ovfl error) { + if value.IsNil() { // Allocate map. - *(*unsafe.Pointer)(up) = unsafe.Pointer(reflect.MakeMap(mtyp).Pointer()) + value.Set(reflect.MakeMap(mtyp)) } - // Maps cannot be accessed by moving addresses around the way - // that slices etc. can. We must recover a full reflection value for - // the iteration. - v := reflect.NewAt(mtyp, unsafe.Pointer(p)).Elem() n := int(state.decodeUint()) + keyIsPtr := mtyp.Key().Kind() == reflect.Ptr + elemIsPtr := mtyp.Elem().Kind() == reflect.Ptr for i := 0; i < n; i++ { - key := decodeIntoValue(state, keyOp, keyIndir, allocValue(mtyp.Key()), ovfl) - elem := decodeIntoValue(state, elemOp, elemIndir, allocValue(mtyp.Elem()), ovfl) - v.SetMapIndex(key, elem) + key := decodeIntoValue(state, keyOp, keyIsPtr, allocValue(mtyp.Key()), ovfl) + elem := decodeIntoValue(state, elemOp, elemIsPtr, allocValue(mtyp.Elem()), ovfl) + value.SetMapIndex(key, elem) } } // ignoreArrayHelper does the work for discarding arrays and slices. func (dec *Decoder) ignoreArrayHelper(state *decoderState, elemOp decOp, length int) { - instr := &decInstr{elemOp, 0, 0, 0, errors.New("no error")} + instr := &decInstr{elemOp, 0, nil, errors.New("no error")} for i := 0; i < length; i++ { - elemOp(instr, state, nil) + elemOp(instr, state, noValue) } } @@ -644,36 +586,34 @@ func (dec *Decoder) ignoreArray(state *decoderState, elemOp decOp, length int) { // ignoreMap discards the data for a map value with no destination. func (dec *Decoder) ignoreMap(state *decoderState, keyOp, elemOp decOp) { n := int(state.decodeUint()) - keyInstr := &decInstr{keyOp, 0, 0, 0, errors.New("no error")} - elemInstr := &decInstr{elemOp, 0, 0, 0, errors.New("no error")} + keyInstr := &decInstr{keyOp, 0, nil, errors.New("no error")} + elemInstr := &decInstr{elemOp, 0, nil, errors.New("no error")} for i := 0; i < n; i++ { - keyOp(keyInstr, state, nil) - elemOp(elemInstr, state, nil) + keyOp(keyInstr, state, noValue) + elemOp(elemInstr, state, noValue) } } -// decodeSlice decodes a slice and stores the slice header through p. +// decodeSlice decodes a slice and stores it in value. // Slices are encoded as an unsigned length followed by the elements. -func (dec *Decoder) decodeSlice(atyp reflect.Type, state *decoderState, p unsafe.Pointer, elemOp decOp, elemWid uintptr, indir, elemIndir int, ovfl error) { - nr := state.decodeUint() - n := int(nr) - if indir > 0 { - if *(*unsafe.Pointer)(p) == nil { - // Allocate the slice header. - *(*unsafe.Pointer)(p) = unsafe.Pointer(new([]unsafe.Pointer)) - } - p = *(*unsafe.Pointer)(p) - } - // Allocate storage for the slice elements, that is, the underlying array, - // if the existing slice does not have the capacity. - // Always write a header at p. - hdrp := (*reflect.SliceHeader)(p) - if hdrp.Cap < n { - hdrp.Data = reflect.MakeSlice(atyp, n, n).Pointer() - hdrp.Cap = n +func (dec *Decoder) decodeSlice(state *decoderState, value reflect.Value, elemOp decOp, ovfl error, helper decHelper) { + u := state.decodeUint() + typ := value.Type() + size := uint64(typ.Elem().Size()) + nBytes := u * size + n := int(u) + // Take care with overflow in this calculation. + if n < 0 || uint64(n) != u || nBytes > tooBig || (size > 0 && nBytes/size != u) { + // We don't check n against buffer length here because if it's a slice + // of interfaces, there will be buffer reloads. + errorf("%s slice too big: %d elements of %d bytes", typ.Elem(), u, size) + } + if value.Cap() < n { + value.Set(reflect.MakeSlice(typ, n, n)) + } else { + value.Set(value.Slice(0, n)) } - hdrp.Len = n - dec.decodeArrayHelper(state, unsafe.Pointer(hdrp.Data), elemOp, elemWid, n, elemIndir, ovfl) + dec.decodeArrayHelper(state, value, elemOp, n, ovfl, helper) } // ignoreSlice skips over the data for a slice value with no destination. @@ -681,21 +621,10 @@ func (dec *Decoder) ignoreSlice(state *decoderState, elemOp decOp) { dec.ignoreArrayHelper(state, elemOp, int(state.decodeUint())) } -// setInterfaceValue sets an interface value to a concrete value, -// but first it checks that the assignment will succeed. -func setInterfaceValue(ivalue reflect.Value, value reflect.Value) { - if !value.Type().AssignableTo(ivalue.Type()) { - errorf("%s is not assignable to type %s", value.Type(), ivalue.Type()) - } - ivalue.Set(value) -} - -// decodeInterface decodes an interface value and stores it through p. +// decodeInterface decodes an interface value and stores it in value. // Interfaces are encoded as the name of a concrete type followed by a value. // If the name is empty, the value is nil and no value is sent. -func (dec *Decoder) decodeInterface(ityp reflect.Type, state *decoderState, p unsafe.Pointer, indir int) { - // Create a writable interface reflect.Value. We need one even for the nil case. - ivalue := allocValue(ityp) +func (dec *Decoder) decodeInterface(ityp reflect.Type, state *decoderState, value reflect.Value) { // Read the name of the concrete type. nr := state.decodeUint() if nr < 0 || nr > 1<<31 { // zero is permissible for anonymous types @@ -707,13 +636,10 @@ func (dec *Decoder) decodeInterface(ityp reflect.Type, state *decoderState, p un b := make([]byte, nr) state.b.Read(b) name := string(b) + // Allocate the destination interface value. if name == "" { - // Copy the representation of the nil interface value to the target. - // This is horribly unsafe and special. - if indir > 0 { - p = allocate(ityp, p, 1) // All but the last level has been allocated by dec.Indirect - } - *(*[2]uintptr)(unsafe.Pointer(p)) = ivalue.InterfaceData() + // Copy the nil interface value to the target. + value.Set(reflect.Zero(value.Type())) return } if len(name) > 1024 { @@ -735,21 +661,18 @@ func (dec *Decoder) decodeInterface(ityp reflect.Type, state *decoderState, p un // in case we want to ignore the value by skipping it completely). state.decodeUint() // Read the concrete value. - value := allocValue(typ) - dec.decodeValue(concreteId, value) + v := allocValue(typ) + dec.decodeValue(concreteId, v) if dec.err != nil { error_(dec.err) } - // Allocate the destination interface value. - if indir > 0 { - p = allocate(ityp, p, 1) // All but the last level has been allocated by dec.Indirect - } // Assign the concrete value to the interface. // Tread carefully; it might not satisfy the interface. - setInterfaceValue(ivalue, value) - // Copy the representation of the interface value to the target. - // This is horribly unsafe and special. - *(*[2]uintptr)(unsafe.Pointer(p)) = ivalue.InterfaceData() + if !typ.AssignableTo(ityp) { + errorf("%s is not assignable to type %s", typ, ityp) + } + // Copy the interface value to the target. + value.Set(v) } // ignoreInterface discards the data for an interface value with no destination. @@ -765,12 +688,12 @@ func (dec *Decoder) ignoreInterface(state *decoderState) { error_(dec.err) } // At this point, the decoder buffer contains a delimited value. Just toss it. - state.b.Next(int(state.decodeUint())) + state.b.Drop(int(state.decodeUint())) } // decodeGobDecoder decodes something implementing the GobDecoder interface. // The data is encoded as a byte slice. -func (dec *Decoder) decodeGobDecoder(ut *userTypeInfo, state *decoderState, v reflect.Value) { +func (dec *Decoder) decodeGobDecoder(ut *userTypeInfo, state *decoderState, value reflect.Value) { // Read the bytes for the value. b := make([]byte, state.decodeUint()) _, err := state.b.Read(b) @@ -780,11 +703,11 @@ func (dec *Decoder) decodeGobDecoder(ut *userTypeInfo, state *decoderState, v re // We know it's one of these. switch ut.externalDec { case xGob: - err = v.Interface().(GobDecoder).GobDecode(b) + err = value.Interface().(GobDecoder).GobDecode(b) case xBinary: - err = v.Interface().(encoding.BinaryUnmarshaler).UnmarshalBinary(b) + err = value.Interface().(encoding.BinaryUnmarshaler).UnmarshalBinary(b) case xText: - err = v.Interface().(encoding.TextUnmarshaler).UnmarshalText(b) + err = value.Interface().(encoding.TextUnmarshaler).UnmarshalText(b) } if err != nil { error_(err) @@ -832,7 +755,7 @@ var decIgnoreOpMap = map[typeId]decOp{ // decOpFor returns the decoding op for the base type under rt and // the indirection count to reach it. -func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProgress map[reflect.Type]*decOp) (*decOp, int) { +func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProgress map[reflect.Type]*decOp) *decOp { ut := userType(rt) // If the type implements GobEncoder, we handle it without further processing. if ut.externalDec != 0 { @@ -842,10 +765,9 @@ func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProg // If this type is already in progress, it's a recursive type (e.g. map[string]*T). // Return the pointer to the op we're already building. if opPtr := inProgress[rt]; opPtr != nil { - return opPtr, ut.indir + return opPtr } typ := ut.base - indir := ut.indir var op decOp k := typ.Kind() if int(k) < len(decOpTable) { @@ -858,20 +780,21 @@ func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProg case reflect.Array: name = "element of " + name elemId := dec.wireType[wireId].ArrayT.Elem - elemOp, elemIndir := dec.decOpFor(elemId, t.Elem(), name, inProgress) + elemOp := dec.decOpFor(elemId, t.Elem(), name, inProgress) ovfl := overflow(name) - op = func(i *decInstr, state *decoderState, p unsafe.Pointer) { - state.dec.decodeArray(t, state, p, *elemOp, t.Elem().Size(), t.Len(), i.indir, elemIndir, ovfl) + helper := decArrayHelper[t.Elem().Kind()] + op = func(i *decInstr, state *decoderState, value reflect.Value) { + state.dec.decodeArray(t, state, value, *elemOp, t.Len(), ovfl, helper) } case reflect.Map: keyId := dec.wireType[wireId].MapT.Key elemId := dec.wireType[wireId].MapT.Elem - keyOp, keyIndir := dec.decOpFor(keyId, t.Key(), "key of "+name, inProgress) - elemOp, elemIndir := dec.decOpFor(elemId, t.Elem(), "element of "+name, inProgress) + keyOp := dec.decOpFor(keyId, t.Key(), "key of "+name, inProgress) + elemOp := dec.decOpFor(elemId, t.Elem(), "element of "+name, inProgress) ovfl := overflow(name) - op = func(i *decInstr, state *decoderState, p unsafe.Pointer) { - state.dec.decodeMap(t, state, p, *keyOp, *elemOp, i.indir, keyIndir, elemIndir, ovfl) + op = func(i *decInstr, state *decoderState, value reflect.Value) { + state.dec.decodeMap(t, state, value, *keyOp, *elemOp, ovfl) } case reflect.Slice: @@ -886,32 +809,34 @@ func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProg } else { elemId = dec.wireType[wireId].SliceT.Elem } - elemOp, elemIndir := dec.decOpFor(elemId, t.Elem(), name, inProgress) + elemOp := dec.decOpFor(elemId, t.Elem(), name, inProgress) ovfl := overflow(name) - op = func(i *decInstr, state *decoderState, p unsafe.Pointer) { - state.dec.decodeSlice(t, state, p, *elemOp, t.Elem().Size(), i.indir, elemIndir, ovfl) + helper := decSliceHelper[t.Elem().Kind()] + op = func(i *decInstr, state *decoderState, value reflect.Value) { + state.dec.decodeSlice(state, value, *elemOp, ovfl, helper) } case reflect.Struct: // Generate a closure that calls out to the engine for the nested type. - enginePtr, err := dec.getDecEnginePtr(wireId, userType(typ)) + ut := userType(typ) + enginePtr, err := dec.getDecEnginePtr(wireId, ut) if err != nil { error_(err) } - op = func(i *decInstr, state *decoderState, p unsafe.Pointer) { + op = func(i *decInstr, state *decoderState, value reflect.Value) { // indirect through enginePtr to delay evaluation for recursive structs. - dec.decodeStruct(*enginePtr, userType(typ), p, i.indir) + dec.decodeStruct(*enginePtr, ut, value) } case reflect.Interface: - op = func(i *decInstr, state *decoderState, p unsafe.Pointer) { - state.dec.decodeInterface(t, state, p, i.indir) + op = func(i *decInstr, state *decoderState, value reflect.Value) { + state.dec.decodeInterface(t, state, value) } } } if op == nil { errorf("decode can't handle type %s", rt) } - return &op, indir + return &op } // decIgnoreOpFor returns the decoding op for a field that has no destination. @@ -921,7 +846,7 @@ func (dec *Decoder) decIgnoreOpFor(wireId typeId) decOp { if wireId == tInterface { // Special case because it's a method: the ignored item might // define types and we need to record their state in the decoder. - op = func(i *decInstr, state *decoderState, p unsafe.Pointer) { + op = func(i *decInstr, state *decoderState, value reflect.Value) { state.dec.ignoreInterface(state) } return op @@ -934,7 +859,7 @@ func (dec *Decoder) decIgnoreOpFor(wireId typeId) decOp { case wire.ArrayT != nil: elemId := wire.ArrayT.Elem elemOp := dec.decIgnoreOpFor(elemId) - op = func(i *decInstr, state *decoderState, p unsafe.Pointer) { + op = func(i *decInstr, state *decoderState, value reflect.Value) { state.dec.ignoreArray(state, elemOp, wire.ArrayT.Len) } @@ -943,14 +868,14 @@ func (dec *Decoder) decIgnoreOpFor(wireId typeId) decOp { elemId := dec.wireType[wireId].MapT.Elem keyOp := dec.decIgnoreOpFor(keyId) elemOp := dec.decIgnoreOpFor(elemId) - op = func(i *decInstr, state *decoderState, p unsafe.Pointer) { + op = func(i *decInstr, state *decoderState, value reflect.Value) { state.dec.ignoreMap(state, keyOp, elemOp) } case wire.SliceT != nil: elemId := wire.SliceT.Elem elemOp := dec.decIgnoreOpFor(elemId) - op = func(i *decInstr, state *decoderState, p unsafe.Pointer) { + op = func(i *decInstr, state *decoderState, value reflect.Value) { state.dec.ignoreSlice(state, elemOp) } @@ -960,13 +885,13 @@ func (dec *Decoder) decIgnoreOpFor(wireId typeId) decOp { if err != nil { error_(err) } - op = func(i *decInstr, state *decoderState, p unsafe.Pointer) { + op = func(i *decInstr, state *decoderState, value reflect.Value) { // indirect through enginePtr to delay evaluation for recursive structs state.dec.ignoreStruct(*enginePtr) } case wire.GobEncoderT != nil, wire.BinaryMarshalerT != nil, wire.TextMarshalerT != nil: - op = func(i *decInstr, state *decoderState, p unsafe.Pointer) { + op = func(i *decInstr, state *decoderState, value reflect.Value) { state.dec.ignoreGobDecoder(state) } } @@ -979,7 +904,7 @@ func (dec *Decoder) decIgnoreOpFor(wireId typeId) decOp { // gobDecodeOpFor returns the op for a type that is known to implement // GobDecoder. -func (dec *Decoder) gobDecodeOpFor(ut *userTypeInfo) (*decOp, int) { +func (dec *Decoder) gobDecodeOpFor(ut *userTypeInfo) *decOp { rcvrType := ut.user if ut.decIndir == -1 { rcvrType = reflect.PtrTo(rcvrType) @@ -989,25 +914,14 @@ func (dec *Decoder) gobDecodeOpFor(ut *userTypeInfo) (*decOp, int) { } } var op decOp - op = func(i *decInstr, state *decoderState, p unsafe.Pointer) { - // Caller has gotten us to within one indirection of our value. - if i.indir > 0 { - if *(*unsafe.Pointer)(p) == nil { - *(*unsafe.Pointer)(p) = unsafe.Pointer(reflect.New(ut.base).Pointer()) - } + op = func(i *decInstr, state *decoderState, value reflect.Value) { + // We now have the base type. We need its address if the receiver is a pointer. + if value.Kind() != reflect.Ptr && rcvrType.Kind() == reflect.Ptr { + value = value.Addr() } - // Now p is a pointer to the base type. Do we need to climb out to - // get to the receiver type? - var v reflect.Value - if ut.decIndir == -1 { - v = reflect.NewAt(rcvrType, unsafe.Pointer(&p)).Elem() - } else { - v = reflect.NewAt(rcvrType, p).Elem() - } - state.dec.decodeGobDecoder(ut, state, v) + state.dec.decodeGobDecoder(ut, state, value) } - return &op, int(ut.indir) - + return &op } // compatibleType asks: Are these two gob Types compatible? @@ -1108,9 +1022,9 @@ func (dec *Decoder) compileSingle(remoteId typeId, ut *userTypeInfo) (engine *de } return nil, errors.New("gob: decoding into local type " + name + ", received remote type " + remoteType) } - op, indir := dec.decOpFor(remoteId, rt, name, make(map[reflect.Type]*decOp)) + op := dec.decOpFor(remoteId, rt, name, make(map[reflect.Type]*decOp)) ovfl := errors.New(`value for "` + name + `" out of range`) - engine.instr[singletonField] = decInstr{*op, singletonField, indir, 0, ovfl} + engine.instr[singletonField] = decInstr{*op, singletonField, nil, ovfl} engine.numInstr = 1 return } @@ -1121,7 +1035,7 @@ func (dec *Decoder) compileIgnoreSingle(remoteId typeId) (engine *decEngine, err engine.instr = make([]decInstr, 1) // one item op := dec.decIgnoreOpFor(remoteId) ovfl := overflow(dec.typeString(remoteId)) - engine.instr[0] = decInstr{op, 0, 0, 0, ovfl} + engine.instr[0] = decInstr{op, 0, nil, ovfl} engine.numInstr = 1 return } @@ -1164,14 +1078,14 @@ func (dec *Decoder) compileDec(remoteId typeId, ut *userTypeInfo) (engine *decEn // TODO(r): anonymous names if !present || !isExported(wireField.Name) { op := dec.decIgnoreOpFor(wireField.Id) - engine.instr[fieldnum] = decInstr{op, fieldnum, 0, 0, ovfl} + engine.instr[fieldnum] = decInstr{op, fieldnum, nil, ovfl} continue } if !dec.compatibleType(localField.Type, wireField.Id, make(map[reflect.Type]typeId)) { errorf("wrong type (%s) for received field %s.%s", localField.Type, wireStruct.Name, wireField.Name) } - op, indir := dec.decOpFor(wireField.Id, localField.Type, localField.Name, seen) - engine.instr[fieldnum] = decInstr{*op, fieldnum, indir, uintptr(localField.Offset), ovfl} + op := dec.decOpFor(wireField.Id, localField.Type, localField.Name, seen) + engine.instr[fieldnum] = decInstr{*op, fieldnum, localField.Index, ovfl} engine.numInstr++ } return @@ -1222,22 +1136,23 @@ func (dec *Decoder) getIgnoreEnginePtr(wireId typeId) (enginePtr **decEngine, er return } -// decodeValue decodes the data stream representing a value and stores it in val. -func (dec *Decoder) decodeValue(wireId typeId, val reflect.Value) { +// decodeValue decodes the data stream representing a value and stores it in value. +func (dec *Decoder) decodeValue(wireId typeId, value reflect.Value) { defer catchError(&dec.err) // If the value is nil, it means we should just ignore this item. - if !val.IsValid() { + if !value.IsValid() { dec.decodeIgnoredValue(wireId) return } // Dereference down to the underlying type. - ut := userType(val.Type()) + ut := userType(value.Type()) base := ut.base var enginePtr **decEngine enginePtr, dec.err = dec.getDecEnginePtr(wireId, ut) if dec.err != nil { return } + value = decAlloc(value) engine := *enginePtr if st := base; st.Kind() == reflect.Struct && ut.externalDec == 0 { if engine.numInstr == 0 && st.NumField() > 0 && @@ -1245,9 +1160,9 @@ func (dec *Decoder) decodeValue(wireId typeId, val reflect.Value) { name := base.Name() errorf("type mismatch: no fields matched compiling decoder for %s", name) } - dec.decodeStruct(engine, ut, unsafeAddr(val), ut.indir) + dec.decodeStruct(engine, ut, value) } else { - dec.decodeSingle(engine, ut, unsafeAddr(val)) + dec.decodeSingle(engine, ut, value) } } @@ -1293,21 +1208,6 @@ func init() { decOpTable[reflect.Uintptr] = uop } -// Gob assumes it can call UnsafeAddr on any Value -// in order to get a pointer it can copy data from. -// Values that have just been created and do not point -// into existing structs or slices cannot be addressed, -// so simulate it by returning a pointer to a copy. -// Each call allocates once. -func unsafeAddr(v reflect.Value) unsafe.Pointer { - if v.CanAddr() { - return unsafe.Pointer(v.UnsafeAddr()) - } - x := reflect.New(v.Type()).Elem() - x.Set(v) - return unsafe.Pointer(x.UnsafeAddr()) -} - // Gob depends on being able to take the address // of zeroed Values it creates, so use this wrapper instead // of the standard reflect.Zero. diff --git a/libgo/go/encoding/gob/decoder.go b/libgo/go/encoding/gob/decoder.go index 3a769ec1254..c453e9ba397 100644 --- a/libgo/go/encoding/gob/decoder.go +++ b/libgo/go/encoding/gob/decoder.go @@ -6,25 +6,28 @@ package gob import ( "bufio" - "bytes" "errors" "io" "reflect" "sync" ) +// tooBig provides a sanity check for sizes; used in several places. +// Upper limit of 1GB, allowing room to grow a little without overflow. +// TODO: make this adjustable? +const tooBig = 1 << 30 + // A Decoder manages the receipt of type and data information read from the // remote side of a connection. type Decoder struct { mutex sync.Mutex // each item must be received atomically r io.Reader // source of the data - buf bytes.Buffer // buffer for more efficient i/o from r + buf decBuffer // buffer for more efficient i/o from r wireType map[typeId]*wireType // map from remote ID to local description decoderCache map[reflect.Type]map[typeId]**decEngine // cache of compiled engines ignorerCache map[typeId]**decEngine // ditto for ignored objects freeList *decoderState // list of free decoderStates; avoids reallocation countBuf []byte // used for decoding integers while parsing messages - tmp []byte // temporary storage for i/o; saves reallocating err error } @@ -75,9 +78,7 @@ func (dec *Decoder) recvMessage() bool { dec.err = err return false } - // Upper limit of 1GB, allowing room to grow a little without overflow. - // TODO: We might want more control over this limit. - if nbytes >= 1<<30 { + if nbytes >= tooBig { dec.err = errBadCount return false } @@ -87,37 +88,17 @@ func (dec *Decoder) recvMessage() bool { // readMessage reads the next nbytes bytes from the input. func (dec *Decoder) readMessage(nbytes int) { - // Allocate the dec.tmp buffer, up to 10KB. - const maxBuf = 10 * 1024 - nTmp := nbytes - if nTmp > maxBuf { - nTmp = maxBuf - } - if cap(dec.tmp) < nTmp { - nAlloc := nTmp + 100 // A little extra for growth. - if nAlloc > maxBuf { - nAlloc = maxBuf - } - dec.tmp = make([]byte, nAlloc) + if dec.buf.Len() != 0 { + // The buffer should always be empty now. + panic("non-empty decoder buffer") } - dec.tmp = dec.tmp[:nTmp] - // Read the data - dec.buf.Grow(nbytes) - for nbytes > 0 { - if nbytes < nTmp { - dec.tmp = dec.tmp[:nbytes] - } - var nRead int - nRead, dec.err = io.ReadFull(dec.r, dec.tmp) - if dec.err != nil { - if dec.err == io.EOF { - dec.err = io.ErrUnexpectedEOF - } - return + dec.buf.Size(nbytes) + _, dec.err = io.ReadFull(dec.r, dec.buf.Bytes()) + if dec.err != nil { + if dec.err == io.EOF { + dec.err = io.ErrUnexpectedEOF } - dec.buf.Write(dec.tmp) - nbytes -= nRead } } @@ -209,7 +190,7 @@ func (dec *Decoder) Decode(e interface{}) error { // Otherwise, it stores the value into v. In that case, v must represent // a non-nil pointer to data or be an assignable reflect.Value (v.CanSet()) // If the input is at EOF, DecodeValue returns io.EOF and -// does not modify e. +// does not modify v. func (dec *Decoder) DecodeValue(v reflect.Value) error { if v.IsValid() { if v.Kind() == reflect.Ptr && !v.IsNil() { diff --git a/libgo/go/encoding/gob/enc_helpers.go b/libgo/go/encoding/gob/enc_helpers.go new file mode 100644 index 00000000000..804e539d84d --- /dev/null +++ b/libgo/go/encoding/gob/enc_helpers.go @@ -0,0 +1,414 @@ +// Created by encgen --output enc_helpers.go; DO NOT EDIT + +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gob + +import ( + "reflect" +) + +var encArrayHelper = map[reflect.Kind]encHelper{ + reflect.Bool: encBoolArray, + reflect.Complex64: encComplex64Array, + reflect.Complex128: encComplex128Array, + reflect.Float32: encFloat32Array, + reflect.Float64: encFloat64Array, + reflect.Int: encIntArray, + reflect.Int16: encInt16Array, + reflect.Int32: encInt32Array, + reflect.Int64: encInt64Array, + reflect.Int8: encInt8Array, + reflect.String: encStringArray, + reflect.Uint: encUintArray, + reflect.Uint16: encUint16Array, + reflect.Uint32: encUint32Array, + reflect.Uint64: encUint64Array, + reflect.Uintptr: encUintptrArray, +} + +var encSliceHelper = map[reflect.Kind]encHelper{ + reflect.Bool: encBoolSlice, + reflect.Complex64: encComplex64Slice, + reflect.Complex128: encComplex128Slice, + reflect.Float32: encFloat32Slice, + reflect.Float64: encFloat64Slice, + reflect.Int: encIntSlice, + reflect.Int16: encInt16Slice, + reflect.Int32: encInt32Slice, + reflect.Int64: encInt64Slice, + reflect.Int8: encInt8Slice, + reflect.String: encStringSlice, + reflect.Uint: encUintSlice, + reflect.Uint16: encUint16Slice, + reflect.Uint32: encUint32Slice, + reflect.Uint64: encUint64Slice, + reflect.Uintptr: encUintptrSlice, +} + +func encBoolArray(state *encoderState, v reflect.Value) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return encBoolSlice(state, v.Slice(0, v.Len())) +} + +func encBoolSlice(state *encoderState, v reflect.Value) bool { + slice, ok := v.Interface().([]bool) + if !ok { + // It is kind bool but not type bool. TODO: We can handle this unsafely. + return false + } + for _, x := range slice { + if x != false || state.sendZero { + if x { + state.encodeUint(1) + } else { + state.encodeUint(0) + } + } + } + return true +} + +func encComplex64Array(state *encoderState, v reflect.Value) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return encComplex64Slice(state, v.Slice(0, v.Len())) +} + +func encComplex64Slice(state *encoderState, v reflect.Value) bool { + slice, ok := v.Interface().([]complex64) + if !ok { + // It is kind complex64 but not type complex64. TODO: We can handle this unsafely. + return false + } + for _, x := range slice { + if x != 0+0i || state.sendZero { + rpart := floatBits(float64(real(x))) + ipart := floatBits(float64(imag(x))) + state.encodeUint(rpart) + state.encodeUint(ipart) + } + } + return true +} + +func encComplex128Array(state *encoderState, v reflect.Value) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return encComplex128Slice(state, v.Slice(0, v.Len())) +} + +func encComplex128Slice(state *encoderState, v reflect.Value) bool { + slice, ok := v.Interface().([]complex128) + if !ok { + // It is kind complex128 but not type complex128. TODO: We can handle this unsafely. + return false + } + for _, x := range slice { + if x != 0+0i || state.sendZero { + rpart := floatBits(real(x)) + ipart := floatBits(imag(x)) + state.encodeUint(rpart) + state.encodeUint(ipart) + } + } + return true +} + +func encFloat32Array(state *encoderState, v reflect.Value) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return encFloat32Slice(state, v.Slice(0, v.Len())) +} + +func encFloat32Slice(state *encoderState, v reflect.Value) bool { + slice, ok := v.Interface().([]float32) + if !ok { + // It is kind float32 but not type float32. TODO: We can handle this unsafely. + return false + } + for _, x := range slice { + if x != 0 || state.sendZero { + bits := floatBits(float64(x)) + state.encodeUint(bits) + } + } + return true +} + +func encFloat64Array(state *encoderState, v reflect.Value) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return encFloat64Slice(state, v.Slice(0, v.Len())) +} + +func encFloat64Slice(state *encoderState, v reflect.Value) bool { + slice, ok := v.Interface().([]float64) + if !ok { + // It is kind float64 but not type float64. TODO: We can handle this unsafely. + return false + } + for _, x := range slice { + if x != 0 || state.sendZero { + bits := floatBits(x) + state.encodeUint(bits) + } + } + return true +} + +func encIntArray(state *encoderState, v reflect.Value) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return encIntSlice(state, v.Slice(0, v.Len())) +} + +func encIntSlice(state *encoderState, v reflect.Value) bool { + slice, ok := v.Interface().([]int) + if !ok { + // It is kind int but not type int. TODO: We can handle this unsafely. + return false + } + for _, x := range slice { + if x != 0 || state.sendZero { + state.encodeInt(int64(x)) + } + } + return true +} + +func encInt16Array(state *encoderState, v reflect.Value) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return encInt16Slice(state, v.Slice(0, v.Len())) +} + +func encInt16Slice(state *encoderState, v reflect.Value) bool { + slice, ok := v.Interface().([]int16) + if !ok { + // It is kind int16 but not type int16. TODO: We can handle this unsafely. + return false + } + for _, x := range slice { + if x != 0 || state.sendZero { + state.encodeInt(int64(x)) + } + } + return true +} + +func encInt32Array(state *encoderState, v reflect.Value) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return encInt32Slice(state, v.Slice(0, v.Len())) +} + +func encInt32Slice(state *encoderState, v reflect.Value) bool { + slice, ok := v.Interface().([]int32) + if !ok { + // It is kind int32 but not type int32. TODO: We can handle this unsafely. + return false + } + for _, x := range slice { + if x != 0 || state.sendZero { + state.encodeInt(int64(x)) + } + } + return true +} + +func encInt64Array(state *encoderState, v reflect.Value) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return encInt64Slice(state, v.Slice(0, v.Len())) +} + +func encInt64Slice(state *encoderState, v reflect.Value) bool { + slice, ok := v.Interface().([]int64) + if !ok { + // It is kind int64 but not type int64. TODO: We can handle this unsafely. + return false + } + for _, x := range slice { + if x != 0 || state.sendZero { + state.encodeInt(x) + } + } + return true +} + +func encInt8Array(state *encoderState, v reflect.Value) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return encInt8Slice(state, v.Slice(0, v.Len())) +} + +func encInt8Slice(state *encoderState, v reflect.Value) bool { + slice, ok := v.Interface().([]int8) + if !ok { + // It is kind int8 but not type int8. TODO: We can handle this unsafely. + return false + } + for _, x := range slice { + if x != 0 || state.sendZero { + state.encodeInt(int64(x)) + } + } + return true +} + +func encStringArray(state *encoderState, v reflect.Value) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return encStringSlice(state, v.Slice(0, v.Len())) +} + +func encStringSlice(state *encoderState, v reflect.Value) bool { + slice, ok := v.Interface().([]string) + if !ok { + // It is kind string but not type string. TODO: We can handle this unsafely. + return false + } + for _, x := range slice { + if x != "" || state.sendZero { + state.encodeUint(uint64(len(x))) + state.b.WriteString(x) + } + } + return true +} + +func encUintArray(state *encoderState, v reflect.Value) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return encUintSlice(state, v.Slice(0, v.Len())) +} + +func encUintSlice(state *encoderState, v reflect.Value) bool { + slice, ok := v.Interface().([]uint) + if !ok { + // It is kind uint but not type uint. TODO: We can handle this unsafely. + return false + } + for _, x := range slice { + if x != 0 || state.sendZero { + state.encodeUint(uint64(x)) + } + } + return true +} + +func encUint16Array(state *encoderState, v reflect.Value) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return encUint16Slice(state, v.Slice(0, v.Len())) +} + +func encUint16Slice(state *encoderState, v reflect.Value) bool { + slice, ok := v.Interface().([]uint16) + if !ok { + // It is kind uint16 but not type uint16. TODO: We can handle this unsafely. + return false + } + for _, x := range slice { + if x != 0 || state.sendZero { + state.encodeUint(uint64(x)) + } + } + return true +} + +func encUint32Array(state *encoderState, v reflect.Value) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return encUint32Slice(state, v.Slice(0, v.Len())) +} + +func encUint32Slice(state *encoderState, v reflect.Value) bool { + slice, ok := v.Interface().([]uint32) + if !ok { + // It is kind uint32 but not type uint32. TODO: We can handle this unsafely. + return false + } + for _, x := range slice { + if x != 0 || state.sendZero { + state.encodeUint(uint64(x)) + } + } + return true +} + +func encUint64Array(state *encoderState, v reflect.Value) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return encUint64Slice(state, v.Slice(0, v.Len())) +} + +func encUint64Slice(state *encoderState, v reflect.Value) bool { + slice, ok := v.Interface().([]uint64) + if !ok { + // It is kind uint64 but not type uint64. TODO: We can handle this unsafely. + return false + } + for _, x := range slice { + if x != 0 || state.sendZero { + state.encodeUint(x) + } + } + return true +} + +func encUintptrArray(state *encoderState, v reflect.Value) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return encUintptrSlice(state, v.Slice(0, v.Len())) +} + +func encUintptrSlice(state *encoderState, v reflect.Value) bool { + slice, ok := v.Interface().([]uintptr) + if !ok { + // It is kind uintptr but not type uintptr. TODO: We can handle this unsafely. + return false + } + for _, x := range slice { + if x != 0 || state.sendZero { + state.encodeUint(uint64(x)) + } + } + return true +} diff --git a/libgo/go/encoding/gob/encgen.go b/libgo/go/encoding/gob/encgen.go new file mode 100644 index 00000000000..efdd9282921 --- /dev/null +++ b/libgo/go/encoding/gob/encgen.go @@ -0,0 +1,218 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// encgen writes the helper functions for encoding. Intended to be +// used with go generate; see the invocation in encode.go. + +// TODO: We could do more by being unsafe. Add a -unsafe flag? + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "log" + "os" +) + +var output = flag.String("output", "enc_helpers.go", "file name to write") + +type Type struct { + lower string + upper string + zero string + encoder string +} + +var types = []Type{ + { + "bool", + "Bool", + "false", + `if x { + state.encodeUint(1) + } else { + state.encodeUint(0) + }`, + }, + { + "complex64", + "Complex64", + "0+0i", + `rpart := floatBits(float64(real(x))) + ipart := floatBits(float64(imag(x))) + state.encodeUint(rpart) + state.encodeUint(ipart)`, + }, + { + "complex128", + "Complex128", + "0+0i", + `rpart := floatBits(real(x)) + ipart := floatBits(imag(x)) + state.encodeUint(rpart) + state.encodeUint(ipart)`, + }, + { + "float32", + "Float32", + "0", + `bits := floatBits(float64(x)) + state.encodeUint(bits)`, + }, + { + "float64", + "Float64", + "0", + `bits := floatBits(x) + state.encodeUint(bits)`, + }, + { + "int", + "Int", + "0", + `state.encodeInt(int64(x))`, + }, + { + "int16", + "Int16", + "0", + `state.encodeInt(int64(x))`, + }, + { + "int32", + "Int32", + "0", + `state.encodeInt(int64(x))`, + }, + { + "int64", + "Int64", + "0", + `state.encodeInt(x)`, + }, + { + "int8", + "Int8", + "0", + `state.encodeInt(int64(x))`, + }, + { + "string", + "String", + `""`, + `state.encodeUint(uint64(len(x))) + state.b.WriteString(x)`, + }, + { + "uint", + "Uint", + "0", + `state.encodeUint(uint64(x))`, + }, + { + "uint16", + "Uint16", + "0", + `state.encodeUint(uint64(x))`, + }, + { + "uint32", + "Uint32", + "0", + `state.encodeUint(uint64(x))`, + }, + { + "uint64", + "Uint64", + "0", + `state.encodeUint(x)`, + }, + { + "uintptr", + "Uintptr", + "0", + `state.encodeUint(uint64(x))`, + }, + // uint8 Handled separately. +} + +func main() { + log.SetFlags(0) + log.SetPrefix("encgen: ") + flag.Parse() + if flag.NArg() != 0 { + log.Fatal("usage: encgen [--output filename]") + } + var b bytes.Buffer + fmt.Fprintf(&b, "// Created by encgen --output %s; DO NOT EDIT\n", *output) + fmt.Fprint(&b, header) + printMaps(&b, "Array") + fmt.Fprint(&b, "\n") + printMaps(&b, "Slice") + for _, t := range types { + fmt.Fprintf(&b, arrayHelper, t.lower, t.upper) + fmt.Fprintf(&b, sliceHelper, t.lower, t.upper, t.zero, t.encoder) + } + source, err := format.Source(b.Bytes()) + if err != nil { + log.Fatal("source format error:", err) + } + fd, err := os.Create(*output) + _, err = fd.Write(source) + if err != nil { + log.Fatal(err) + } +} + +func printMaps(b *bytes.Buffer, upperClass string) { + fmt.Fprintf(b, "var enc%sHelper = map[reflect.Kind]encHelper{\n", upperClass) + for _, t := range types { + fmt.Fprintf(b, "reflect.%s: enc%s%s,\n", t.upper, t.upper, upperClass) + } + fmt.Fprintf(b, "}\n") +} + +const header = ` +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gob + +import ( + "reflect" +) + +` + +const arrayHelper = ` +func enc%[2]sArray(state *encoderState, v reflect.Value) bool { + // Can only slice if it is addressable. + if !v.CanAddr() { + return false + } + return enc%[2]sSlice(state, v.Slice(0, v.Len())) +} +` + +const sliceHelper = ` +func enc%[2]sSlice(state *encoderState, v reflect.Value) bool { + slice, ok := v.Interface().([]%[1]s) + if !ok { + // It is kind %[1]s but not type %[1]s. TODO: We can handle this unsafely. + return false + } + for _, x := range slice { + if x != %[3]s || state.sendZero { + %[4]s + } + } + return true +} +` diff --git a/libgo/go/encoding/gob/encode.go b/libgo/go/encoding/gob/encode.go index 7831c02d139..f66279f1413 100644 --- a/libgo/go/encoding/gob/encode.go +++ b/libgo/go/encoding/gob/encode.go @@ -2,17 +2,19 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:generate go run encgen.go -output enc_helpers.go + package gob import ( - "bytes" "encoding" "math" "reflect" - "unsafe" ) -const uint64Size = int(unsafe.Sizeof(uint64(0))) +const uint64Size = 8 + +type encHelper func(state *encoderState, v reflect.Value) bool // encoderState is the global execution state of an instance of the encoder. // Field numbers are delta encoded and always increase. The field @@ -20,14 +22,46 @@ const uint64Size = int(unsafe.Sizeof(uint64(0))) // 0 terminates the structure. type encoderState struct { enc *Encoder - b *bytes.Buffer + b *encBuffer sendZero bool // encoding an array element or map key/value pair; send zero values fieldnum int // the last field number written. buf [1 + uint64Size]byte // buffer used by the encoder; here to avoid allocation. next *encoderState // for free list } -func (enc *Encoder) newEncoderState(b *bytes.Buffer) *encoderState { +// encBuffer is an extremely simple, fast implementation of a write-only byte buffer. +// It never returns a non-nil error, but Write returns an error value so it matches io.Writer. +type encBuffer struct { + data []byte + scratch [64]byte +} + +func (e *encBuffer) WriteByte(c byte) { + e.data = append(e.data, c) +} + +func (e *encBuffer) Write(p []byte) (int, error) { + e.data = append(e.data, p...) + return len(p), nil +} + +func (e *encBuffer) WriteString(s string) { + e.data = append(e.data, s...) +} + +func (e *encBuffer) Len() int { + return len(e.data) +} + +func (e *encBuffer) Bytes() []byte { + return e.data +} + +func (e *encBuffer) Reset() { + e.data = e.data[0:0] +} + +func (enc *Encoder) newEncoderState(b *encBuffer) *encoderState { e := enc.freeList if e == nil { e = new(encoderState) @@ -38,6 +72,9 @@ func (enc *Encoder) newEncoderState(b *bytes.Buffer) *encoderState { e.sendZero = false e.fieldnum = 0 e.b = b + if len(b.data) == 0 { + b.data = b.scratch[0:0] + } return e } @@ -54,10 +91,7 @@ func (enc *Encoder) freeEncoderState(e *encoderState) { // encodeUint writes an encoded unsigned integer to state.b. func (state *encoderState) encodeUint(x uint64) { if x <= 0x7F { - err := state.b.WriteByte(uint8(x)) - if err != nil { - error_(err) - } + state.b.WriteByte(uint8(x)) return } i := uint64Size @@ -67,10 +101,7 @@ func (state *encoderState) encodeUint(x uint64) { i-- } state.buf[i] = uint8(i - uint64Size) // = loop count, negated - _, err := state.b.Write(state.buf[i : uint64Size+1]) - if err != nil { - error_(err) - } + state.b.Write(state.buf[i : uint64Size+1]) } // encodeInt writes an encoded signed integer to state.w. @@ -87,14 +118,14 @@ func (state *encoderState) encodeInt(i int64) { } // encOp is the signature of an encoding operator for a given type. -type encOp func(i *encInstr, state *encoderState, p unsafe.Pointer) +type encOp func(i *encInstr, state *encoderState, v reflect.Value) // The 'instructions' of the encoding machine type encInstr struct { - op encOp - field int // field number - indir int // how many pointer indirections to reach the value in the struct - offset uintptr // offset in the structure of the field to encode + op encOp + field int // field number in input + index []int // struct index + indir int // how many pointer indirections to reach the value in the struct } // update emits a field number and updates the state to record its value for delta encoding. @@ -115,20 +146,20 @@ func (state *encoderState) update(instr *encInstr) { // encoded integer, followed by the field data in its appropriate // format. -// encIndirect dereferences p indir times and returns the result. -func encIndirect(p unsafe.Pointer, indir int) unsafe.Pointer { +// encIndirect dereferences pv indir times and returns the result. +func encIndirect(pv reflect.Value, indir int) reflect.Value { for ; indir > 0; indir-- { - p = *(*unsafe.Pointer)(p) - if p == nil { - return unsafe.Pointer(nil) + if pv.IsNil() { + break } + pv = pv.Elem() } - return p + return pv } -// encBool encodes the bool with address p as an unsigned 0 or 1. -func encBool(i *encInstr, state *encoderState, p unsafe.Pointer) { - b := *(*bool)(p) +// encBool encodes the bool referenced by v as an unsigned 0 or 1. +func encBool(i *encInstr, state *encoderState, v reflect.Value) { + b := v.Bool() if b || state.sendZero { state.update(i) if b { @@ -139,102 +170,21 @@ func encBool(i *encInstr, state *encoderState, p unsafe.Pointer) { } } -// encInt encodes the int with address p. -func encInt(i *encInstr, state *encoderState, p unsafe.Pointer) { - v := int64(*(*int)(p)) - if v != 0 || state.sendZero { - state.update(i) - state.encodeInt(v) - } -} - -// encUint encodes the uint with address p. -func encUint(i *encInstr, state *encoderState, p unsafe.Pointer) { - v := uint64(*(*uint)(p)) - if v != 0 || state.sendZero { +// encInt encodes the signed integer (int int8 int16 int32 int64) referenced by v. +func encInt(i *encInstr, state *encoderState, v reflect.Value) { + value := v.Int() + if value != 0 || state.sendZero { state.update(i) - state.encodeUint(v) + state.encodeInt(value) } } -// encInt8 encodes the int8 with address p. -func encInt8(i *encInstr, state *encoderState, p unsafe.Pointer) { - v := int64(*(*int8)(p)) - if v != 0 || state.sendZero { +// encUint encodes the unsigned integer (uint uint8 uint16 uint32 uint64 uintptr) referenced by v. +func encUint(i *encInstr, state *encoderState, v reflect.Value) { + value := v.Uint() + if value != 0 || state.sendZero { state.update(i) - state.encodeInt(v) - } -} - -// encUint8 encodes the uint8 with address p. -func encUint8(i *encInstr, state *encoderState, p unsafe.Pointer) { - v := uint64(*(*uint8)(p)) - if v != 0 || state.sendZero { - state.update(i) - state.encodeUint(v) - } -} - -// encInt16 encodes the int16 with address p. -func encInt16(i *encInstr, state *encoderState, p unsafe.Pointer) { - v := int64(*(*int16)(p)) - if v != 0 || state.sendZero { - state.update(i) - state.encodeInt(v) - } -} - -// encUint16 encodes the uint16 with address p. -func encUint16(i *encInstr, state *encoderState, p unsafe.Pointer) { - v := uint64(*(*uint16)(p)) - if v != 0 || state.sendZero { - state.update(i) - state.encodeUint(v) - } -} - -// encInt32 encodes the int32 with address p. -func encInt32(i *encInstr, state *encoderState, p unsafe.Pointer) { - v := int64(*(*int32)(p)) - if v != 0 || state.sendZero { - state.update(i) - state.encodeInt(v) - } -} - -// encUint encodes the uint32 with address p. -func encUint32(i *encInstr, state *encoderState, p unsafe.Pointer) { - v := uint64(*(*uint32)(p)) - if v != 0 || state.sendZero { - state.update(i) - state.encodeUint(v) - } -} - -// encInt64 encodes the int64 with address p. -func encInt64(i *encInstr, state *encoderState, p unsafe.Pointer) { - v := *(*int64)(p) - if v != 0 || state.sendZero { - state.update(i) - state.encodeInt(v) - } -} - -// encInt64 encodes the uint64 with address p. -func encUint64(i *encInstr, state *encoderState, p unsafe.Pointer) { - v := *(*uint64)(p) - if v != 0 || state.sendZero { - state.update(i) - state.encodeUint(v) - } -} - -// encUintptr encodes the uintptr with address p. -func encUintptr(i *encInstr, state *encoderState, p unsafe.Pointer) { - v := uint64(*(*uintptr)(p)) - if v != 0 || state.sendZero { - state.update(i) - state.encodeUint(v) + state.encodeUint(value) } } @@ -255,42 +205,20 @@ func floatBits(f float64) uint64 { return v } -// encFloat32 encodes the float32 with address p. -func encFloat32(i *encInstr, state *encoderState, p unsafe.Pointer) { - f := *(*float32)(p) +// encFloat encodes the floating point value (float32 float64) referenced by v. +func encFloat(i *encInstr, state *encoderState, v reflect.Value) { + f := v.Float() if f != 0 || state.sendZero { - v := floatBits(float64(f)) + bits := floatBits(f) state.update(i) - state.encodeUint(v) + state.encodeUint(bits) } } -// encFloat64 encodes the float64 with address p. -func encFloat64(i *encInstr, state *encoderState, p unsafe.Pointer) { - f := *(*float64)(p) - if f != 0 || state.sendZero { - state.update(i) - v := floatBits(f) - state.encodeUint(v) - } -} - -// encComplex64 encodes the complex64 with address p. +// encComplex encodes the complex value (complex64 complex128) referenced by v. // Complex numbers are just a pair of floating-point numbers, real part first. -func encComplex64(i *encInstr, state *encoderState, p unsafe.Pointer) { - c := *(*complex64)(p) - if c != 0+0i || state.sendZero { - rpart := floatBits(float64(real(c))) - ipart := floatBits(float64(imag(c))) - state.update(i) - state.encodeUint(rpart) - state.encodeUint(ipart) - } -} - -// encComplex128 encodes the complex128 with address p. -func encComplex128(i *encInstr, state *encoderState, p unsafe.Pointer) { - c := *(*complex128)(p) +func encComplex(i *encInstr, state *encoderState, v reflect.Value) { + c := v.Complex() if c != 0+0i || state.sendZero { rpart := floatBits(real(c)) ipart := floatBits(imag(c)) @@ -300,10 +228,10 @@ func encComplex128(i *encInstr, state *encoderState, p unsafe.Pointer) { } } -// encUint8Array encodes the byte slice whose header has address p. +// encUint8Array encodes the byte array referenced by v. // Byte arrays are encoded as an unsigned count followed by the raw bytes. -func encUint8Array(i *encInstr, state *encoderState, p unsafe.Pointer) { - b := *(*[]byte)(p) +func encUint8Array(i *encInstr, state *encoderState, v reflect.Value) { + b := v.Bytes() if len(b) > 0 || state.sendZero { state.update(i) state.encodeUint(uint64(len(b))) @@ -311,10 +239,10 @@ func encUint8Array(i *encInstr, state *encoderState, p unsafe.Pointer) { } } -// encString encodes the string whose header has address p. +// encString encodes the string referenced by v. // Strings are encoded as an unsigned count followed by the raw bytes. -func encString(i *encInstr, state *encoderState, p unsafe.Pointer) { - s := *(*string)(p) +func encString(i *encInstr, state *encoderState, v reflect.Value) { + s := v.String() if len(s) > 0 || state.sendZero { state.update(i) state.encodeUint(uint64(len(s))) @@ -324,7 +252,7 @@ func encString(i *encInstr, state *encoderState, p unsafe.Pointer) { // encStructTerminator encodes the end of an encoded struct // as delta field number of 0. -func encStructTerminator(i *encInstr, state *encoderState, p unsafe.Pointer) { +func encStructTerminator(i *encInstr, state *encoderState, v reflect.Value) { state.encodeUint(0) } @@ -338,60 +266,83 @@ type encEngine struct { const singletonField = 0 +// valid reports whether the value is valid and a non-nil pointer. +// (Slices, maps, and chans take care of themselves.) +func valid(v reflect.Value) bool { + switch v.Kind() { + case reflect.Invalid: + return false + case reflect.Ptr: + return !v.IsNil() + } + return true +} + // encodeSingle encodes a single top-level non-struct value. -func (enc *Encoder) encodeSingle(b *bytes.Buffer, engine *encEngine, basep unsafe.Pointer) { +func (enc *Encoder) encodeSingle(b *encBuffer, engine *encEngine, value reflect.Value) { state := enc.newEncoderState(b) + defer enc.freeEncoderState(state) state.fieldnum = singletonField // There is no surrounding struct to frame the transmission, so we must // generate data even if the item is zero. To do this, set sendZero. state.sendZero = true instr := &engine.instr[singletonField] - p := basep // offset will be zero if instr.indir > 0 { - if p = encIndirect(p, instr.indir); p == nil { - return - } + value = encIndirect(value, instr.indir) + } + if valid(value) { + instr.op(instr, state, value) } - instr.op(instr, state, p) - enc.freeEncoderState(state) } // encodeStruct encodes a single struct value. -func (enc *Encoder) encodeStruct(b *bytes.Buffer, engine *encEngine, basep unsafe.Pointer) { +func (enc *Encoder) encodeStruct(b *encBuffer, engine *encEngine, value reflect.Value) { + if !valid(value) { + return + } state := enc.newEncoderState(b) + defer enc.freeEncoderState(state) state.fieldnum = -1 for i := 0; i < len(engine.instr); i++ { instr := &engine.instr[i] - p := unsafe.Pointer(uintptr(basep) + instr.offset) + if i >= value.NumField() { + // encStructTerminator + instr.op(instr, state, reflect.Value{}) + break + } + field := value.FieldByIndex(instr.index) if instr.indir > 0 { - if p = encIndirect(p, instr.indir); p == nil { + field = encIndirect(field, instr.indir) + // TODO: Is field guaranteed valid? If so we could avoid this check. + if !valid(field) { continue } } - instr.op(instr, state, p) + instr.op(instr, state, field) } - enc.freeEncoderState(state) } -// encodeArray encodes the array whose 0th element is at p. -func (enc *Encoder) encodeArray(b *bytes.Buffer, p unsafe.Pointer, op encOp, elemWid uintptr, elemIndir int, length int) { +// encodeArray encodes an array. +func (enc *Encoder) encodeArray(b *encBuffer, value reflect.Value, op encOp, elemIndir int, length int, helper encHelper) { state := enc.newEncoderState(b) + defer enc.freeEncoderState(state) state.fieldnum = -1 state.sendZero = true state.encodeUint(uint64(length)) + if helper != nil && helper(state, value) { + return + } for i := 0; i < length; i++ { - elemp := p + elem := value.Index(i) if elemIndir > 0 { - up := encIndirect(elemp, elemIndir) - if up == nil { + elem = encIndirect(elem, elemIndir) + // TODO: Is elem guaranteed valid? If so we could avoid this check. + if !valid(elem) { errorf("encodeArray: nil element") } - elemp = up } - op(nil, state, elemp) - p = unsafe.Pointer(uintptr(p) + elemWid) + op(nil, state, elem) } - enc.freeEncoderState(state) } // encodeReflectValue is a helper for maps. It encodes the value v. @@ -402,13 +353,11 @@ func encodeReflectValue(state *encoderState, v reflect.Value, op encOp, indir in if !v.IsValid() { errorf("encodeReflectValue: nil element") } - op(nil, state, unsafeAddr(v)) + op(nil, state, v) } // encodeMap encodes a map as unsigned count followed by key:value pairs. -// Because map internals are not exposed, we must use reflection rather than -// addresses. -func (enc *Encoder) encodeMap(b *bytes.Buffer, mv reflect.Value, keyOp, elemOp encOp, keyIndir, elemIndir int) { +func (enc *Encoder) encodeMap(b *encBuffer, mv reflect.Value, keyOp, elemOp encOp, keyIndir, elemIndir int) { state := enc.newEncoderState(b) state.fieldnum = -1 state.sendZero = true @@ -426,7 +375,7 @@ func (enc *Encoder) encodeMap(b *bytes.Buffer, mv reflect.Value, keyOp, elemOp e // by the type identifier (which might require defining that type right now), followed // by the concrete value. A nil value gets sent as the empty string for the name, // followed by no value. -func (enc *Encoder) encodeInterface(b *bytes.Buffer, iv reflect.Value) { +func (enc *Encoder) encodeInterface(b *encBuffer, iv reflect.Value) { // Gobs can encode nil interface values but not typed interface // values holding nil pointers, since nil pointers point to no value. elem := iv.Elem() @@ -450,10 +399,7 @@ func (enc *Encoder) encodeInterface(b *bytes.Buffer, iv reflect.Value) { } // Send the name. state.encodeUint(uint64(len(name))) - _, err := state.b.WriteString(name) - if err != nil { - error_(err) - } + state.b.WriteString(name) // Define the type id if necessary. enc.sendTypeDescriptor(enc.writer(), state, ut) // Send the type id. @@ -461,7 +407,7 @@ func (enc *Encoder) encodeInterface(b *bytes.Buffer, iv reflect.Value) { // Encode the value into a new buffer. Any nested type definitions // should be written to b, before the encoded value. enc.pushWriter(b) - data := new(bytes.Buffer) + data := new(encBuffer) data.Write(spaceForLength) enc.encode(data, elem, ut) if enc.err != nil { @@ -470,7 +416,7 @@ func (enc *Encoder) encodeInterface(b *bytes.Buffer, iv reflect.Value) { enc.popWriter() enc.writeMessage(b, data) if enc.err != nil { - error_(err) + error_(enc.err) } enc.freeEncoderState(state) } @@ -512,7 +458,7 @@ func isZero(val reflect.Value) bool { // encGobEncoder encodes a value that implements the GobEncoder interface. // The data is sent as a byte array. -func (enc *Encoder) encodeGobEncoder(b *bytes.Buffer, ut *userTypeInfo, v reflect.Value) { +func (enc *Encoder) encodeGobEncoder(b *encBuffer, ut *userTypeInfo, v reflect.Value) { // TODO: should we catch panics from the called method? var data []byte @@ -539,30 +485,30 @@ func (enc *Encoder) encodeGobEncoder(b *bytes.Buffer, ut *userTypeInfo, v reflec var encOpTable = [...]encOp{ reflect.Bool: encBool, reflect.Int: encInt, - reflect.Int8: encInt8, - reflect.Int16: encInt16, - reflect.Int32: encInt32, - reflect.Int64: encInt64, + reflect.Int8: encInt, + reflect.Int16: encInt, + reflect.Int32: encInt, + reflect.Int64: encInt, reflect.Uint: encUint, - reflect.Uint8: encUint8, - reflect.Uint16: encUint16, - reflect.Uint32: encUint32, - reflect.Uint64: encUint64, - reflect.Uintptr: encUintptr, - reflect.Float32: encFloat32, - reflect.Float64: encFloat64, - reflect.Complex64: encComplex64, - reflect.Complex128: encComplex128, + reflect.Uint8: encUint, + reflect.Uint16: encUint, + reflect.Uint32: encUint, + reflect.Uint64: encUint, + reflect.Uintptr: encUint, + reflect.Float32: encFloat, + reflect.Float64: encFloat, + reflect.Complex64: encComplex, + reflect.Complex128: encComplex, reflect.String: encString, } // encOpFor returns (a pointer to) the encoding op for the base type under rt and // the indirection count to reach it. -func (enc *Encoder) encOpFor(rt reflect.Type, inProgress map[reflect.Type]*encOp) (*encOp, int) { +func encOpFor(rt reflect.Type, inProgress map[reflect.Type]*encOp, building map[*typeInfo]bool) (*encOp, int) { ut := userType(rt) // If the type implements GobEncoder, we handle it without further processing. if ut.externalEnc != 0 { - return enc.gobEncodeOpFor(ut) + return gobEncodeOpFor(ut) } // If this type is already in progress, it's a recursive type (e.g. map[string]*T). // Return the pointer to the op we're already building. @@ -586,31 +532,27 @@ func (enc *Encoder) encOpFor(rt reflect.Type, inProgress map[reflect.Type]*encOp break } // Slices have a header; we decode it to find the underlying array. - elemOp, elemIndir := enc.encOpFor(t.Elem(), inProgress) - op = func(i *encInstr, state *encoderState, p unsafe.Pointer) { - slice := (*reflect.SliceHeader)(p) - if !state.sendZero && slice.Len == 0 { + elemOp, elemIndir := encOpFor(t.Elem(), inProgress, building) + helper := encSliceHelper[t.Elem().Kind()] + op = func(i *encInstr, state *encoderState, slice reflect.Value) { + if !state.sendZero && slice.Len() == 0 { return } state.update(i) - state.enc.encodeArray(state.b, unsafe.Pointer(slice.Data), *elemOp, t.Elem().Size(), elemIndir, int(slice.Len)) + state.enc.encodeArray(state.b, slice, *elemOp, elemIndir, slice.Len(), helper) } case reflect.Array: // True arrays have size in the type. - elemOp, elemIndir := enc.encOpFor(t.Elem(), inProgress) - op = func(i *encInstr, state *encoderState, p unsafe.Pointer) { + elemOp, elemIndir := encOpFor(t.Elem(), inProgress, building) + helper := encArrayHelper[t.Elem().Kind()] + op = func(i *encInstr, state *encoderState, array reflect.Value) { state.update(i) - state.enc.encodeArray(state.b, p, *elemOp, t.Elem().Size(), elemIndir, t.Len()) + state.enc.encodeArray(state.b, array, *elemOp, elemIndir, array.Len(), helper) } case reflect.Map: - keyOp, keyIndir := enc.encOpFor(t.Key(), inProgress) - elemOp, elemIndir := enc.encOpFor(t.Elem(), inProgress) - op = func(i *encInstr, state *encoderState, p unsafe.Pointer) { - // Maps cannot be accessed by moving addresses around the way - // that slices etc. can. We must recover a full reflection value for - // the iteration. - v := reflect.NewAt(t, unsafe.Pointer(p)).Elem() - mv := reflect.Indirect(v) + keyOp, keyIndir := encOpFor(t.Key(), inProgress, building) + elemOp, elemIndir := encOpFor(t.Elem(), inProgress, building) + op = func(i *encInstr, state *encoderState, mv reflect.Value) { // We send zero-length (but non-nil) maps because the // receiver might want to use the map. (Maps don't use append.) if !state.sendZero && mv.IsNil() { @@ -621,19 +563,16 @@ func (enc *Encoder) encOpFor(rt reflect.Type, inProgress map[reflect.Type]*encOp } case reflect.Struct: // Generate a closure that calls out to the engine for the nested type. - enc.getEncEngine(userType(typ)) + getEncEngine(userType(typ), building) info := mustGetTypeInfo(typ) - op = func(i *encInstr, state *encoderState, p unsafe.Pointer) { + op = func(i *encInstr, state *encoderState, sv reflect.Value) { state.update(i) // indirect through info to delay evaluation for recursive structs - state.enc.encodeStruct(state.b, info.encoder, p) + enc := info.encoder.Load().(*encEngine) + state.enc.encodeStruct(state.b, enc, sv) } case reflect.Interface: - op = func(i *encInstr, state *encoderState, p unsafe.Pointer) { - // Interfaces transmit the name and contents of the concrete - // value they contain. - v := reflect.NewAt(t, unsafe.Pointer(p)).Elem() - iv := reflect.Indirect(v) + op = func(i *encInstr, state *encoderState, iv reflect.Value) { if !state.sendZero && (!iv.IsValid() || iv.IsNil()) { return } @@ -648,9 +587,8 @@ func (enc *Encoder) encOpFor(rt reflect.Type, inProgress map[reflect.Type]*encOp return &op, indir } -// gobEncodeOpFor returns the op for a type that is known to implement -// GobEncoder. -func (enc *Encoder) gobEncodeOpFor(ut *userTypeInfo) (*encOp, int) { +// gobEncodeOpFor returns the op for a type that is known to implement GobEncoder. +func gobEncodeOpFor(ut *userTypeInfo) (*encOp, int) { rt := ut.user if ut.encIndir == -1 { rt = reflect.PtrTo(rt) @@ -660,13 +598,13 @@ func (enc *Encoder) gobEncodeOpFor(ut *userTypeInfo) (*encOp, int) { } } var op encOp - op = func(i *encInstr, state *encoderState, p unsafe.Pointer) { - var v reflect.Value + op = func(i *encInstr, state *encoderState, v reflect.Value) { if ut.encIndir == -1 { // Need to climb up one level to turn value into pointer. - v = reflect.NewAt(rt, unsafe.Pointer(&p)).Elem() - } else { - v = reflect.NewAt(rt, p).Elem() + if !v.CanAddr() { + errorf("unaddressable value of type %s", rt) + } + v = v.Addr() } if !state.sendZero && isZero(v) { return @@ -678,7 +616,7 @@ func (enc *Encoder) gobEncodeOpFor(ut *userTypeInfo) (*encOp, int) { } // compileEnc returns the engine to compile the type. -func (enc *Encoder) compileEnc(ut *userTypeInfo) *encEngine { +func compileEnc(ut *userTypeInfo, building map[*typeInfo]bool) *encEngine { srt := ut.base engine := new(encEngine) seen := make(map[reflect.Type]*encOp) @@ -692,59 +630,57 @@ func (enc *Encoder) compileEnc(ut *userTypeInfo) *encEngine { if !isSent(&f) { continue } - op, indir := enc.encOpFor(f.Type, seen) - engine.instr = append(engine.instr, encInstr{*op, wireFieldNum, indir, uintptr(f.Offset)}) + op, indir := encOpFor(f.Type, seen, building) + engine.instr = append(engine.instr, encInstr{*op, wireFieldNum, f.Index, indir}) wireFieldNum++ } if srt.NumField() > 0 && len(engine.instr) == 0 { errorf("type %s has no exported fields", rt) } - engine.instr = append(engine.instr, encInstr{encStructTerminator, 0, 0, 0}) + engine.instr = append(engine.instr, encInstr{encStructTerminator, 0, nil, 0}) } else { engine.instr = make([]encInstr, 1) - op, indir := enc.encOpFor(rt, seen) - engine.instr[0] = encInstr{*op, singletonField, indir, 0} // offset is zero + op, indir := encOpFor(rt, seen, building) + engine.instr[0] = encInstr{*op, singletonField, nil, indir} } return engine } // getEncEngine returns the engine to compile the type. -// typeLock must be held (or we're in initialization and guaranteed single-threaded). -func (enc *Encoder) getEncEngine(ut *userTypeInfo) *encEngine { - info, err1 := getTypeInfo(ut) - if err1 != nil { - error_(err1) - } - if info.encoder == nil { - // Assign the encEngine now, so recursive types work correctly. But... - info.encoder = new(encEngine) - // ... if we fail to complete building the engine, don't cache the half-built machine. - // Doing this here means we won't cache a type that is itself OK but - // that contains a nested type that won't compile. The result is consistent - // error behavior when Encode is called multiple times on the top-level type. - ok := false - defer func() { - if !ok { - info.encoder = nil - } - }() - info.encoder = enc.compileEnc(ut) - ok = true +func getEncEngine(ut *userTypeInfo, building map[*typeInfo]bool) *encEngine { + info, err := getTypeInfo(ut) + if err != nil { + error_(err) } - return info.encoder + enc, ok := info.encoder.Load().(*encEngine) + if !ok { + enc = buildEncEngine(info, ut, building) + } + return enc } -// lockAndGetEncEngine is a function that locks and compiles. -// This lets us hold the lock only while compiling, not when encoding. -func (enc *Encoder) lockAndGetEncEngine(ut *userTypeInfo) *encEngine { - typeLock.Lock() - defer typeLock.Unlock() - return enc.getEncEngine(ut) +func buildEncEngine(info *typeInfo, ut *userTypeInfo, building map[*typeInfo]bool) *encEngine { + // Check for recursive types. + if building != nil && building[info] { + return nil + } + info.encInit.Lock() + defer info.encInit.Unlock() + enc, ok := info.encoder.Load().(*encEngine) + if !ok { + if building == nil { + building = make(map[*typeInfo]bool) + } + building[info] = true + enc = compileEnc(ut, building) + info.encoder.Store(enc) + } + return enc } -func (enc *Encoder) encode(b *bytes.Buffer, value reflect.Value, ut *userTypeInfo) { +func (enc *Encoder) encode(b *encBuffer, value reflect.Value, ut *userTypeInfo) { defer catchError(&enc.err) - engine := enc.lockAndGetEncEngine(ut) + engine := getEncEngine(ut, nil) indir := ut.indir if ut.externalEnc != 0 { indir = int(ut.encIndir) @@ -753,8 +689,8 @@ func (enc *Encoder) encode(b *bytes.Buffer, value reflect.Value, ut *userTypeInf value = reflect.Indirect(value) } if ut.externalEnc == 0 && value.Type().Kind() == reflect.Struct { - enc.encodeStruct(b, engine, unsafeAddr(value)) + enc.encodeStruct(b, engine, value) } else { - enc.encodeSingle(b, engine, unsafeAddr(value)) + enc.encodeSingle(b, engine, value) } } diff --git a/libgo/go/encoding/gob/encoder.go b/libgo/go/encoding/gob/encoder.go index a3301c3bd33..a340e47b5ed 100644 --- a/libgo/go/encoding/gob/encoder.go +++ b/libgo/go/encoding/gob/encoder.go @@ -5,7 +5,6 @@ package gob import ( - "bytes" "io" "reflect" "sync" @@ -19,7 +18,7 @@ type Encoder struct { sent map[reflect.Type]typeId // which types we've already sent countState *encoderState // stage for writing counts freeList *encoderState // list of free encoderStates; avoids reallocation - byteBuf bytes.Buffer // buffer for top-level encoderState + byteBuf encBuffer // buffer for top-level encoderState err error } @@ -34,7 +33,7 @@ func NewEncoder(w io.Writer) *Encoder { enc := new(Encoder) enc.w = []io.Writer{w} enc.sent = make(map[reflect.Type]typeId) - enc.countState = enc.newEncoderState(new(bytes.Buffer)) + enc.countState = enc.newEncoderState(new(encBuffer)) return enc } @@ -60,7 +59,7 @@ func (enc *Encoder) setError(err error) { } // writeMessage sends the data item preceded by a unsigned count of its length. -func (enc *Encoder) writeMessage(w io.Writer, b *bytes.Buffer) { +func (enc *Encoder) writeMessage(w io.Writer, b *encBuffer) { // Space has been reserved for the length at the head of the message. // This is a little dirty: we grab the slice from the bytes.Buffer and massage // it by hand. @@ -88,9 +87,7 @@ func (enc *Encoder) sendActualType(w io.Writer, state *encoderState, ut *userTyp if _, alreadySent := enc.sent[actual]; alreadySent { return false } - typeLock.Lock() info, err := getTypeInfo(ut) - typeLock.Unlock() if err != nil { enc.setError(err) return @@ -191,9 +188,7 @@ func (enc *Encoder) sendTypeDescriptor(w io.Writer, state *encoderState, ut *use // a singleton basic type (int, []byte etc.) at top level. We don't // need to send the type info but we do need to update enc.sent. if !sent { - typeLock.Lock() info, err := getTypeInfo(ut) - typeLock.Unlock() if err != nil { enc.setError(err) return diff --git a/libgo/go/encoding/gob/encoder_test.go b/libgo/go/encoding/gob/encoder_test.go index 6445ce10026..0ea4c0ec8e5 100644 --- a/libgo/go/encoding/gob/encoder_test.go +++ b/libgo/go/encoding/gob/encoder_test.go @@ -13,6 +13,52 @@ import ( "testing" ) +// Test basic operations in a safe manner. +func TestBasicEncoderDecoder(t *testing.T) { + var values = []interface{}{ + true, + int(123), + int8(123), + int16(-12345), + int32(123456), + int64(-1234567), + uint(123), + uint8(123), + uint16(12345), + uint32(123456), + uint64(1234567), + uintptr(12345678), + float32(1.2345), + float64(1.2345678), + complex64(1.2345 + 2.3456i), + complex128(1.2345678 + 2.3456789i), + []byte("hello"), + string("hello"), + } + for _, value := range values { + b := new(bytes.Buffer) + enc := NewEncoder(b) + err := enc.Encode(value) + if err != nil { + t.Error("encoder fail:", err) + } + dec := NewDecoder(b) + result := reflect.New(reflect.TypeOf(value)) + err = dec.Decode(result.Interface()) + if err != nil { + t.Fatalf("error decoding %T: %v:", reflect.TypeOf(value), err) + } + if !reflect.DeepEqual(value, result.Elem().Interface()) { + t.Fatalf("%T: expected %v got %v", value, value, result.Elem().Interface()) + } + } +} + +type ET0 struct { + A int + B string +} + type ET2 struct { X string } @@ -40,14 +86,40 @@ type ET4 struct { func TestEncoderDecoder(t *testing.T) { b := new(bytes.Buffer) enc := NewEncoder(b) + et0 := new(ET0) + et0.A = 7 + et0.B = "gobs of fun" + err := enc.Encode(et0) + if err != nil { + t.Error("encoder fail:", err) + } + //fmt.Printf("% x %q\n", b, b) + //Debug(b) + dec := NewDecoder(b) + newEt0 := new(ET0) + err = dec.Decode(newEt0) + if err != nil { + t.Fatal("error decoding ET0:", err) + } + + if !reflect.DeepEqual(et0, newEt0) { + t.Fatalf("invalid data for et0: expected %+v; got %+v", *et0, *newEt0) + } + if b.Len() != 0 { + t.Error("not at eof;", b.Len(), "bytes left") + } + // t.FailNow() + + b = new(bytes.Buffer) + enc = NewEncoder(b) et1 := new(ET1) et1.A = 7 et1.Et2 = new(ET2) - err := enc.Encode(et1) + err = enc.Encode(et1) if err != nil { t.Error("encoder fail:", err) } - dec := NewDecoder(b) + dec = NewDecoder(b) newEt1 := new(ET1) err = dec.Decode(newEt1) if err != nil { @@ -860,3 +932,25 @@ func Test29ElementSlice(t *testing.T) { return } } + +// Don't crash, just give error when allocating a huge slice. +// Issue 8084. +func TestErrorForHugeSlice(t *testing.T) { + // Encode an int slice. + buf := new(bytes.Buffer) + slice := []int{1, 1, 1, 1, 1, 1, 1, 1, 1, 1} + err := NewEncoder(buf).Encode(slice) + if err != nil { + t.Fatal("encode:", err) + } + // Reach into the buffer and smash the count to make the encoded slice very long. + buf.Bytes()[buf.Len()-len(slice)-1] = 0xfa + // Decode and see error. + err = NewDecoder(buf).Decode(&slice) + if err == nil { + t.Fatal("decode: no error") + } + if !strings.Contains(err.Error(), "slice too big") { + t.Fatal("decode: expected slice too big error, got %s", err.Error()) + } +} diff --git a/libgo/go/encoding/gob/gobencdec_test.go b/libgo/go/encoding/gob/gobencdec_test.go index 157b7723a75..eb76b481d19 100644 --- a/libgo/go/encoding/gob/gobencdec_test.go +++ b/libgo/go/encoding/gob/gobencdec_test.go @@ -279,7 +279,7 @@ func TestGobEncoderValueField(t *testing.T) { b := new(bytes.Buffer) // First a field that's a structure. enc := NewEncoder(b) - err := enc.Encode(GobTestValueEncDec{17, StringStruct{"HIJKL"}}) + err := enc.Encode(&GobTestValueEncDec{17, StringStruct{"HIJKL"}}) if err != nil { t.Fatal("encode error:", err) } @@ -326,7 +326,7 @@ func TestGobEncoderArrayField(t *testing.T) { for i := range a.A.a { a.A.a[i] = byte(i) } - err := enc.Encode(a) + err := enc.Encode(&a) if err != nil { t.Fatal("encode error:", err) } @@ -589,7 +589,8 @@ func TestGobEncoderStructSingleton(t *testing.T) { func TestGobEncoderNonStructSingleton(t *testing.T) { b := new(bytes.Buffer) enc := NewEncoder(b) - err := enc.Encode(Gobber(1234)) + var g Gobber = 1234 + err := enc.Encode(&g) if err != nil { t.Fatal("encode error:", err) } diff --git a/libgo/go/encoding/gob/timing_test.go b/libgo/go/encoding/gob/timing_test.go index 9fbb0ac6d5a..940e5ad4126 100644 --- a/libgo/go/encoding/gob/timing_test.go +++ b/libgo/go/encoding/gob/timing_test.go @@ -19,33 +19,57 @@ type Bench struct { D []byte } -func benchmarkEndToEnd(r io.Reader, w io.Writer, b *testing.B) { - b.StopTimer() - enc := NewEncoder(w) - dec := NewDecoder(r) - bench := &Bench{7, 3.2, "now is the time", []byte("for all good men")} - b.StartTimer() - for i := 0; i < b.N; i++ { - if enc.Encode(bench) != nil { - panic("encode error") +func benchmarkEndToEnd(b *testing.B, ctor func() interface{}, pipe func() (r io.Reader, w io.Writer, err error)) { + b.RunParallel(func(pb *testing.PB) { + r, w, err := pipe() + if err != nil { + b.Fatal("can't get pipe:", err) } - if dec.Decode(bench) != nil { - panic("decode error") + v := ctor() + enc := NewEncoder(w) + dec := NewDecoder(r) + for pb.Next() { + if err := enc.Encode(v); err != nil { + b.Fatal("encode error:", err) + } + if err := dec.Decode(v); err != nil { + b.Fatal("decode error:", err) + } } - } + }) } func BenchmarkEndToEndPipe(b *testing.B) { - r, w, err := os.Pipe() - if err != nil { - b.Fatal("can't get pipe:", err) - } - benchmarkEndToEnd(r, w, b) + benchmarkEndToEnd(b, func() interface{} { + return &Bench{7, 3.2, "now is the time", bytes.Repeat([]byte("for all good men"), 100)} + }, func() (r io.Reader, w io.Writer, err error) { + r, w, err = os.Pipe() + return + }) } func BenchmarkEndToEndByteBuffer(b *testing.B) { - var buf bytes.Buffer - benchmarkEndToEnd(&buf, &buf, b) + benchmarkEndToEnd(b, func() interface{} { + return &Bench{7, 3.2, "now is the time", bytes.Repeat([]byte("for all good men"), 100)} + }, func() (r io.Reader, w io.Writer, err error) { + var buf bytes.Buffer + return &buf, &buf, nil + }) +} + +func BenchmarkEndToEndSliceByteBuffer(b *testing.B) { + benchmarkEndToEnd(b, func() interface{} { + v := &Bench{7, 3.2, "now is the time", nil} + Register(v) + arr := make([]interface{}, 100) + for i := range arr { + arr[i] = v + } + return &arr + }, func() (r io.Reader, w io.Writer, err error) { + var buf bytes.Buffer + return &buf, &buf, nil + }) } func TestCountEncodeMallocs(t *testing.T) { @@ -103,7 +127,199 @@ func TestCountDecodeMallocs(t *testing.T) { t.Fatal("decode:", err) } }) - if allocs != 3 { - t.Fatalf("mallocs per decode of type Bench: %v; wanted 3\n", allocs) + if allocs != 4 { + t.Fatalf("mallocs per decode of type Bench: %v; wanted 4\n", allocs) + } +} + +func BenchmarkEncodeComplex128Slice(b *testing.B) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + a := make([]complex128, 1000) + for i := range a { + a[i] = 1.2 + 3.4i + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.Reset() + err := enc.Encode(a) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeFloat64Slice(b *testing.B) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + a := make([]float64, 1000) + for i := range a { + a[i] = 1.23e4 + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.Reset() + err := enc.Encode(a) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeInt32Slice(b *testing.B) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + a := make([]int32, 1000) + for i := range a { + a[i] = 1234 + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.Reset() + err := enc.Encode(a) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeStringSlice(b *testing.B) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + a := make([]string, 1000) + for i := range a { + a[i] = "now is the time" + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.Reset() + err := enc.Encode(a) + if err != nil { + b.Fatal(err) + } + } +} + +// benchmarkBuf is a read buffer we can reset +type benchmarkBuf struct { + offset int + data []byte +} + +func (b *benchmarkBuf) Read(p []byte) (n int, err error) { + n = copy(p, b.data[b.offset:]) + if n == 0 { + return 0, io.EOF + } + b.offset += n + return +} + +func (b *benchmarkBuf) ReadByte() (c byte, err error) { + if b.offset >= len(b.data) { + return 0, io.EOF + } + c = b.data[b.offset] + b.offset++ + return +} + +func (b *benchmarkBuf) reset() { + b.offset = 0 +} + +func BenchmarkDecodeComplex128Slice(b *testing.B) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + a := make([]complex128, 1000) + for i := range a { + a[i] = 1.2 + 3.4i + } + err := enc.Encode(a) + if err != nil { + b.Fatal(err) + } + x := make([]complex128, 1000) + bbuf := benchmarkBuf{data: buf.Bytes()} + b.ResetTimer() + for i := 0; i < b.N; i++ { + bbuf.reset() + dec := NewDecoder(&bbuf) + err := dec.Decode(&x) + if err != nil { + b.Fatal(i, err) + } + } +} + +func BenchmarkDecodeFloat64Slice(b *testing.B) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + a := make([]float64, 1000) + for i := range a { + a[i] = 1.23e4 + } + err := enc.Encode(a) + if err != nil { + b.Fatal(err) + } + x := make([]float64, 1000) + bbuf := benchmarkBuf{data: buf.Bytes()} + b.ResetTimer() + for i := 0; i < b.N; i++ { + bbuf.reset() + dec := NewDecoder(&bbuf) + err := dec.Decode(&x) + if err != nil { + b.Fatal(i, err) + } + } +} + +func BenchmarkDecodeInt32Slice(b *testing.B) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + a := make([]int32, 1000) + for i := range a { + a[i] = 1234 + } + err := enc.Encode(a) + if err != nil { + b.Fatal(err) + } + x := make([]int32, 1000) + bbuf := benchmarkBuf{data: buf.Bytes()} + b.ResetTimer() + for i := 0; i < b.N; i++ { + bbuf.reset() + dec := NewDecoder(&bbuf) + err := dec.Decode(&x) + if err != nil { + b.Fatal(i, err) + } + } +} + +func BenchmarkDecodeStringSlice(b *testing.B) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + a := make([]string, 1000) + for i := range a { + a[i] = "now is the time" + } + err := enc.Encode(a) + if err != nil { + b.Fatal(err) + } + x := make([]string, 1000) + bbuf := benchmarkBuf{data: buf.Bytes()} + b.ResetTimer() + for i := 0; i < b.N; i++ { + bbuf.reset() + dec := NewDecoder(&bbuf) + err := dec.Decode(&x) + if err != nil { + b.Fatal(i, err) + } } } diff --git a/libgo/go/encoding/gob/type.go b/libgo/go/encoding/gob/type.go index cad14527953..a49b71a8676 100644 --- a/libgo/go/encoding/gob/type.go +++ b/libgo/go/encoding/gob/type.go @@ -11,6 +11,7 @@ import ( "os" "reflect" "sync" + "sync/atomic" "unicode" "unicode/utf8" ) @@ -681,29 +682,51 @@ func (w *wireType) string() string { type typeInfo struct { id typeId - encoder *encEngine + encInit sync.Mutex // protects creation of encoder + encoder atomic.Value // *encEngine wire *wireType } -var typeInfoMap = make(map[reflect.Type]*typeInfo) // protected by typeLock +// typeInfoMap is an atomic pointer to map[reflect.Type]*typeInfo. +// It's updated copy-on-write. Readers just do an atomic load +// to get the current version of the map. Writers make a full copy of +// the map and atomically update the pointer to point to the new map. +// Under heavy read contention, this is significantly faster than a map +// protected by a mutex. +var typeInfoMap atomic.Value + +func lookupTypeInfo(rt reflect.Type) *typeInfo { + m, _ := typeInfoMap.Load().(map[reflect.Type]*typeInfo) + return m[rt] +} -// typeLock must be held. func getTypeInfo(ut *userTypeInfo) (*typeInfo, error) { rt := ut.base if ut.externalEnc != 0 { // We want the user type, not the base type. rt = ut.user } - info, ok := typeInfoMap[rt] - if ok { + if info := lookupTypeInfo(rt); info != nil { return info, nil } - info = new(typeInfo) + return buildTypeInfo(ut, rt) +} + +// buildTypeInfo constructs the type information for the type +// and stores it in the type info map. +func buildTypeInfo(ut *userTypeInfo, rt reflect.Type) (*typeInfo, error) { + typeLock.Lock() + defer typeLock.Unlock() + + if info := lookupTypeInfo(rt); info != nil { + return info, nil + } + gt, err := getBaseType(rt.Name(), rt) if err != nil { return nil, err } - info.id = gt.id() + info := &typeInfo{id: gt.id()} if ut.externalEnc != 0 { userType, err := getType(rt.Name(), ut, rt) @@ -719,25 +742,32 @@ func getTypeInfo(ut *userTypeInfo) (*typeInfo, error) { case xText: info.wire = &wireType{TextMarshalerT: gt} } - typeInfoMap[ut.user] = info - return info, nil + rt = ut.user + } else { + t := info.id.gobType() + switch typ := rt; typ.Kind() { + case reflect.Array: + info.wire = &wireType{ArrayT: t.(*arrayType)} + case reflect.Map: + info.wire = &wireType{MapT: t.(*mapType)} + case reflect.Slice: + // []byte == []uint8 is a special case handled separately + if typ.Elem().Kind() != reflect.Uint8 { + info.wire = &wireType{SliceT: t.(*sliceType)} + } + case reflect.Struct: + info.wire = &wireType{StructT: t.(*structType)} + } } - t := info.id.gobType() - switch typ := rt; typ.Kind() { - case reflect.Array: - info.wire = &wireType{ArrayT: t.(*arrayType)} - case reflect.Map: - info.wire = &wireType{MapT: t.(*mapType)} - case reflect.Slice: - // []byte == []uint8 is a special case handled separately - if typ.Elem().Kind() != reflect.Uint8 { - info.wire = &wireType{SliceT: t.(*sliceType)} - } - case reflect.Struct: - info.wire = &wireType{StructT: t.(*structType)} + // Create new map with old contents plus new entry. + newm := make(map[reflect.Type]*typeInfo) + m, _ := typeInfoMap.Load().(map[reflect.Type]*typeInfo) + for k, v := range m { + newm[k] = v } - typeInfoMap[rt] = info + newm[rt] = info + typeInfoMap.Store(newm) return info, nil } diff --git a/libgo/go/encoding/json/decode.go b/libgo/go/encoding/json/decode.go index af1c908ad77..705bc2e17a7 100644 --- a/libgo/go/encoding/json/decode.go +++ b/libgo/go/encoding/json/decode.go @@ -173,7 +173,6 @@ type decodeState struct { scan scanner nextscan scanner // for calls to nextValue savedError error - tempstr string // scratch space to avoid some allocations useNumber bool } @@ -293,6 +292,32 @@ func (d *decodeState) value(v reflect.Value) { } } +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() interface{} { + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(reflect.Value{}) + + case scanBeginObject: + d.object(reflect.Value{}) + + case scanBeginLiteral: + switch v := d.literalInterface().(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + // indirect walks down v allocating pointers as needed, // until it gets to a non-pointer. // if it encounters an Unmarshaler, indirect stops and returns that. @@ -444,8 +469,10 @@ func (d *decodeState) array(v reflect.Value) { } } +var nullLiteral = []byte("null") + // object consumes an object from d.data[d.off-1:], decoding into the value v. -// the first byte of the object ('{') has been read already. +// the first byte ('{') of the object has been read already. func (d *decodeState) object(v reflect.Value) { // Check for unmarshaler. u, ut, pv := d.indirect(v, false) @@ -478,7 +505,9 @@ func (d *decodeState) object(v reflect.Value) { t := v.Type() if t.Key().Kind() != reflect.String { d.saveError(&UnmarshalTypeError{"object", v.Type()}) - break + d.off-- + d.next() // skip over { } in input + return } if v.IsNil() { v.Set(reflect.MakeMap(t)) @@ -564,9 +593,14 @@ func (d *decodeState) object(v reflect.Value) { // Read value. if destring { - d.value(reflect.ValueOf(&d.tempstr)) - d.literalStore([]byte(d.tempstr), subv, true) - d.tempstr = "" // Zero scratch space for successive values. + switch qv := d.valueQuoted().(type) { + case nil: + d.literalStore(nullLiteral, subv, false) + case string: + d.literalStore([]byte(qv), subv, true) + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", item, v.Type())) + } } else { d.value(subv) } diff --git a/libgo/go/encoding/json/decode_test.go b/libgo/go/encoding/json/decode_test.go index 238a87fd665..7235969b9fe 100644 --- a/libgo/go/encoding/json/decode_test.go +++ b/libgo/go/encoding/json/decode_test.go @@ -406,6 +406,13 @@ var unmarshalTests = []unmarshalTest{ ptr: new(string), out: "hello\ufffd\ufffd\ufffd\ufffd\ufffd\ufffdworld", }, + + // issue 8305 + { + in: `{"2009-11-10T23:00:00Z": "hello world"}`, + ptr: &map[time.Time]string{}, + err: &UnmarshalTypeError{"object", reflect.TypeOf(map[time.Time]string{})}, + }, } func TestMarshal(t *testing.T) { @@ -514,6 +521,7 @@ func TestUnmarshal(t *testing.T) { if tt.ptr == nil { continue } + // v = new(right-type) v := reflect.New(reflect.TypeOf(tt.ptr).Elem()) dec := NewDecoder(bytes.NewReader(in)) @@ -521,7 +529,9 @@ func TestUnmarshal(t *testing.T) { dec.UseNumber() } if err := dec.Decode(v.Interface()); !reflect.DeepEqual(err, tt.err) { - t.Errorf("#%d: %v want %v", i, err, tt.err) + t.Errorf("#%d: %v, want %v", i, err, tt.err) + continue + } else if err != nil { continue } if !reflect.DeepEqual(v.Elem().Interface(), tt.out) { @@ -1060,18 +1070,25 @@ func TestEmptyString(t *testing.T) { } } -// Test that the returned error is non-nil when trying to unmarshal null string into int, for successive ,string option -// Issue 7046 +// Test that a null for ,string is not replaced with the previous quoted string (issue 7046). +// It should also not be an error (issue 2540, issue 8587). func TestNullString(t *testing.T) { type T struct { - A int `json:",string"` - B int `json:",string"` + A int `json:",string"` + B int `json:",string"` + C *int `json:",string"` } - data := []byte(`{"A": "1", "B": null}`) + data := []byte(`{"A": "1", "B": null, "C": null}`) var s T + s.B = 1 + s.C = new(int) + *s.C = 2 err := Unmarshal(data, &s) - if err == nil { - t.Fatalf("expected error; got %v", s) + if err != nil { + t.Fatalf("Unmarshal: %v") + } + if s.B != 1 || s.C != nil { + t.Fatalf("after Unmarshal, s.B=%d, s.C=%p, want 1, nil", s.B, s.C) } } diff --git a/libgo/go/encoding/json/encode.go b/libgo/go/encoding/json/encode.go index 741ddd89cbe..fca2a0980b2 100644 --- a/libgo/go/encoding/json/encode.go +++ b/libgo/go/encoding/json/encode.go @@ -40,8 +40,8 @@ import ( // // Floating point, integer, and Number values encode as JSON numbers. // -// String values encode as JSON strings. InvalidUTF8Error will be returned -// if an invalid UTF-8 sequence is encountered. +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. // The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" // to keep some browsers from misinterpreting JSON output as HTML. // Ampersand "&" is also escaped to "\u0026" for the same reason. @@ -93,6 +93,8 @@ import ( // as described in the next paragraph. // An anonymous struct field with a name given in its JSON tag is treated as // having that name, rather than being anonymous. +// An anonymous struct field of interface type is treated the same as having +// that type as its name, rather than being anonymous. // // The Go visibility rules for struct fields are amended for JSON when // deciding which field to marshal or unmarshal. If there are @@ -696,12 +698,12 @@ type ptrEncoder struct { elemEnc encoderFunc } -func (pe *ptrEncoder) encode(e *encodeState, v reflect.Value, _ bool) { +func (pe *ptrEncoder) encode(e *encodeState, v reflect.Value, quoted bool) { if v.IsNil() { e.WriteString("null") return } - pe.elemEnc(e, v.Elem(), false) + pe.elemEnc(e, v.Elem(), quoted) } func newPtrEncoder(t reflect.Type) encoderFunc { @@ -803,6 +805,9 @@ func (e *encodeState) string(s string) (int, error) { case '\r': e.WriteByte('\\') e.WriteByte('r') + case '\t': + e.WriteByte('\\') + e.WriteByte('t') default: // This encodes bytes < 0x20 except for \n and \r, // as well as <, > and &. The latter are escaped because they @@ -876,9 +881,12 @@ func (e *encodeState) stringBytes(s []byte) (int, error) { case '\r': e.WriteByte('\\') e.WriteByte('r') + case '\t': + e.WriteByte('\\') + e.WriteByte('t') default: // This encodes bytes < 0x20 except for \n and \r, - // as well as < and >. The latter are escaped because they + // as well as <, >, and &. The latter are escaped because they // can lead to security holes when user-controlled strings // are rendered into JSON and served to some browsers. e.WriteString(`\u00`) diff --git a/libgo/go/encoding/json/encode_test.go b/libgo/go/encoding/json/encode_test.go index 2e89a78eb9f..7abfa85db7b 100644 --- a/libgo/go/encoding/json/encode_test.go +++ b/libgo/go/encoding/json/encode_test.go @@ -452,3 +452,81 @@ func TestHTMLEscape(t *testing.T) { t.Errorf("HTMLEscape(&b, []byte(m)) = %s; want %s", b.Bytes(), want.Bytes()) } } + +// golang.org/issue/8582 +func TestEncodePointerString(t *testing.T) { + type stringPointer struct { + N *int64 `json:"n,string"` + } + var n int64 = 42 + b, err := Marshal(stringPointer{N: &n}) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if got, want := string(b), `{"n":"42"}`; got != want { + t.Errorf("Marshal = %s, want %s", got, want) + } + var back stringPointer + err = Unmarshal(b, &back) + if err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if back.N == nil { + t.Fatalf("Unmarshalled nil N field") + } + if *back.N != 42 { + t.Fatalf("*N = %d; want 42", *back.N) + } +} + +var encodeStringTests = []struct { + in string + out string +}{ + {"\x00", `"\u0000"`}, + {"\x01", `"\u0001"`}, + {"\x02", `"\u0002"`}, + {"\x03", `"\u0003"`}, + {"\x04", `"\u0004"`}, + {"\x05", `"\u0005"`}, + {"\x06", `"\u0006"`}, + {"\x07", `"\u0007"`}, + {"\x08", `"\u0008"`}, + {"\x09", `"\t"`}, + {"\x0a", `"\n"`}, + {"\x0b", `"\u000b"`}, + {"\x0c", `"\u000c"`}, + {"\x0d", `"\r"`}, + {"\x0e", `"\u000e"`}, + {"\x0f", `"\u000f"`}, + {"\x10", `"\u0010"`}, + {"\x11", `"\u0011"`}, + {"\x12", `"\u0012"`}, + {"\x13", `"\u0013"`}, + {"\x14", `"\u0014"`}, + {"\x15", `"\u0015"`}, + {"\x16", `"\u0016"`}, + {"\x17", `"\u0017"`}, + {"\x18", `"\u0018"`}, + {"\x19", `"\u0019"`}, + {"\x1a", `"\u001a"`}, + {"\x1b", `"\u001b"`}, + {"\x1c", `"\u001c"`}, + {"\x1d", `"\u001d"`}, + {"\x1e", `"\u001e"`}, + {"\x1f", `"\u001f"`}, +} + +func TestEncodeString(t *testing.T) { + for _, tt := range encodeStringTests { + b, err := Marshal(tt.in) + if err != nil { + t.Errorf("Marshal(%q): %v", tt.in, err) + continue + } + out := string(b) + if out != tt.out { + t.Errorf("Marshal(%q) = %#q, want %#q", tt.in, out, tt.out) + } + } +} diff --git a/libgo/go/encoding/json/stream.go b/libgo/go/encoding/json/stream.go index 1cb289fd843..9566ecadcbb 100644 --- a/libgo/go/encoding/json/stream.go +++ b/libgo/go/encoding/json/stream.go @@ -139,7 +139,6 @@ func nonSpace(b []byte) bool { // An Encoder writes JSON objects to an output stream. type Encoder struct { w io.Writer - e encodeState err error } diff --git a/libgo/go/encoding/xml/xml.go b/libgo/go/encoding/xml/xml.go index b473cb84584..8c15b98c3a9 100644 --- a/libgo/go/encoding/xml/xml.go +++ b/libgo/go/encoding/xml/xml.go @@ -196,6 +196,7 @@ type Decoder struct { ns map[string]string err error line int + offset int64 unmarshalDepth int } @@ -859,9 +860,17 @@ func (d *Decoder) getc() (b byte, ok bool) { if b == '\n' { d.line++ } + d.offset++ return b, true } +// InputOffset returns the input stream byte offset of the current decoder position. +// The offset gives the location of the end of the most recently returned token +// and the beginning of the next token. +func (d *Decoder) InputOffset() int64 { + return d.offset +} + // Return saved offset. // If we did ungetc (nextByte >= 0), have to back up one. func (d *Decoder) savedOffset() int { @@ -891,6 +900,7 @@ func (d *Decoder) ungetc(b byte) { d.line-- } d.nextByte = int(b) + d.offset-- } var entity = map[string]int{ diff --git a/libgo/go/encoding/xml/xml_test.go b/libgo/go/encoding/xml/xml_test.go index 7723ab1c9f0..be995c0d52c 100644 --- a/libgo/go/encoding/xml/xml_test.go +++ b/libgo/go/encoding/xml/xml_test.go @@ -170,7 +170,7 @@ var xmlInput = []string{ func TestRawToken(t *testing.T) { d := NewDecoder(strings.NewReader(testInput)) d.Entity = testEntity - testRawToken(t, d, rawTokens) + testRawToken(t, d, testInput, rawTokens) } const nonStrictInput = ` @@ -225,7 +225,7 @@ var nonStrictTokens = []Token{ func TestNonStrictRawToken(t *testing.T) { d := NewDecoder(strings.NewReader(nonStrictInput)) d.Strict = false - testRawToken(t, d, nonStrictTokens) + testRawToken(t, d, nonStrictInput, nonStrictTokens) } type downCaser struct { @@ -254,7 +254,7 @@ func TestRawTokenAltEncoding(t *testing.T) { } return &downCaser{t, input.(io.ByteReader)}, nil } - testRawToken(t, d, rawTokensAltEncoding) + testRawToken(t, d, testInputAltEncoding, rawTokensAltEncoding) } func TestRawTokenAltEncodingNoConverter(t *testing.T) { @@ -280,9 +280,12 @@ func TestRawTokenAltEncodingNoConverter(t *testing.T) { } } -func testRawToken(t *testing.T, d *Decoder, rawTokens []Token) { +func testRawToken(t *testing.T, d *Decoder, raw string, rawTokens []Token) { + lastEnd := int64(0) for i, want := range rawTokens { + start := d.InputOffset() have, err := d.RawToken() + end := d.InputOffset() if err != nil { t.Fatalf("token %d: unexpected error: %s", i, err) } @@ -300,6 +303,26 @@ func testRawToken(t *testing.T, d *Decoder, rawTokens []Token) { } t.Errorf("token %d = %s, want %s", i, shave, swant) } + + // Check that InputOffset returned actual token. + switch { + case start < lastEnd: + t.Errorf("token %d: position [%d,%d) for %T is before previous token", i, start, end, have) + case start >= end: + // Special case: EndElement can be synthesized. + if start == end && end == lastEnd { + break + } + t.Errorf("token %d: position [%d,%d) for %T is empty", i, start, end, have) + case end > int64(len(raw)): + t.Errorf("token %d: position [%d,%d) for %T extends beyond input", i, start, end, have) + default: + text := raw[start:end] + if strings.ContainsAny(text, "<>") && (!strings.HasPrefix(text, "<") || !strings.HasSuffix(text, ">")) { + t.Errorf("token %d: misaligned raw token %#q for %T", i, text, have) + } + } + lastEnd = end } } diff --git a/libgo/go/flag/flag.go b/libgo/go/flag/flag.go index cd2a165be19..60aef5d806c 100644 --- a/libgo/go/flag/flag.go +++ b/libgo/go/flag/flag.go @@ -73,7 +73,8 @@ import ( "time" ) -// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. +// ErrHelp is the error returned if the -help or -h flag is invoked +// but no such flag is defined. var ErrHelp = errors.New("flag: help requested") // -- bool Value @@ -405,6 +406,7 @@ func defaultUsage(f *FlagSet) { // for how to write your own usage function. // Usage prints to standard error a usage message documenting all defined command-line flags. +// It is called when an error occurs while parsing flags. // The function is a variable that may be changed to point to a custom function. var Usage = func() { fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) @@ -628,18 +630,21 @@ func Float64(name string, value float64, usage string) *float64 { // DurationVar defines a time.Duration flag with specified name, default value, and usage string. // The argument p points to a time.Duration variable in which to store the value of the flag. +// The flag accepts a value acceptable to time.ParseDuration. func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) { f.Var(newDurationValue(value, p), name, usage) } // DurationVar defines a time.Duration flag with specified name, default value, and usage string. // The argument p points to a time.Duration variable in which to store the value of the flag. +// The flag accepts a value acceptable to time.ParseDuration. func DurationVar(p *time.Duration, name string, value time.Duration, usage string) { CommandLine.Var(newDurationValue(value, p), name, usage) } // Duration defines a time.Duration flag with specified name, default value, and usage string. // The return value is the address of a time.Duration variable that stores the value of the flag. +// The flag accepts a value acceptable to time.ParseDuration. func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration { p := new(time.Duration) f.DurationVar(p, name, value, usage) @@ -648,6 +653,7 @@ func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time // Duration defines a time.Duration flag with specified name, default value, and usage string. // The return value is the address of a time.Duration variable that stores the value of the flag. +// The flag accepts a value acceptable to time.ParseDuration. func Duration(name string, value time.Duration, usage string) *time.Duration { return CommandLine.Duration(name, value, usage) } @@ -697,13 +703,15 @@ func (f *FlagSet) failf(format string, a ...interface{}) error { return err } -// usage calls the Usage method for the flag set, or the usage function if -// the flag set is CommandLine. +// usage calls the Usage method for the flag set if one is specified, +// or the appropriate default usage function otherwise. func (f *FlagSet) usage() { - if f == CommandLine { - Usage() - } else if f.Usage == nil { - defaultUsage(f) + if f.Usage == nil { + if f == CommandLine { + Usage() + } else { + defaultUsage(f) + } } else { f.Usage() } @@ -752,6 +760,7 @@ func (f *FlagSet) parseOne() (bool, error) { } return false, f.failf("flag provided but not defined: -%s", name) } + if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg if has_value { if err := fv.Set(value); err != nil { @@ -784,7 +793,7 @@ func (f *FlagSet) parseOne() (bool, error) { // Parse parses flag definitions from the argument list, which should not // include the command name. Must be called after all flags in the FlagSet // are defined and before flags are accessed by the program. -// The return value will be ErrHelp if -help was set but not defined. +// The return value will be ErrHelp if -help or -h were set but not defined. func (f *FlagSet) Parse(arguments []string) error { f.parsed = true f.args = arguments @@ -826,7 +835,7 @@ func Parsed() bool { } // CommandLine is the default set of command-line flags, parsed from os.Args. -// The top-level functions such as BoolVar, Arg, and on are wrappers for the +// The top-level functions such as BoolVar, Arg, and so on are wrappers for the // methods of CommandLine. var CommandLine = NewFlagSet(os.Args[0], ExitOnError) diff --git a/libgo/go/flag/flag_test.go b/libgo/go/flag/flag_test.go index 2c038726979..8c88c8c2744 100644 --- a/libgo/go/flag/flag_test.go +++ b/libgo/go/flag/flag_test.go @@ -251,6 +251,16 @@ func TestUserDefined(t *testing.T) { } } +func TestUserDefinedForCommandLine(t *testing.T) { + const help = "HELP" + var result string + ResetForTesting(func() { result = help }) + Usage() + if result != help { + t.Fatalf("got %q; expected %q", result, help) + } +} + // Declare a user-defined boolean flag type. type boolFlagVar struct { count int diff --git a/libgo/go/fmt/doc.go b/libgo/go/fmt/doc.go index 02642d6ae77..ee54463e275 100644 --- a/libgo/go/fmt/doc.go +++ b/libgo/go/fmt/doc.go @@ -13,7 +13,7 @@ The verbs: General: - %v the value in a default format. + %v the value in a default format when printing structs, the plus flag (%+v) adds field names %#v a Go-syntax representation of the value %T a Go-syntax representation of the type of the value @@ -38,8 +38,8 @@ %E scientific notation, e.g. -1234.456E+78 %f decimal point but no exponent, e.g. 123.456 %F synonym for %f - %g whichever of %e or %f produces more compact output - %G whichever of %E or %f produces more compact output + %g %e for large exponents, %f otherwise + %G %E for large exponents, %F otherwise String and slice of bytes: %s the uninterpreted bytes of the string or slice %q a double-quoted string safely escaped with Go syntax @@ -51,6 +51,21 @@ There is no 'u' flag. Integers are printed unsigned if they have unsigned type. Similarly, there is no need to specify the size of the operand (int8, int64). + The default format for %v is: + bool: %t + int, int8 etc.: %d + uint, uint8 etc.: %d, %x if printed with %#v + float32, complex64, etc: %g + string: %s + chan: %p + pointer: %p + For compound objects, the elements are printed using these rules, recursively, + laid out like this: + struct: {field0 field1 ...} + array, slice: [elem0 elem1 ...] + maps: map[key1:value1 key2:value2] + pointer to above: &{}, &[], &map[] + Width is specified by an optional decimal number immediately following the verb. If absent, the width is whatever is necessary to represent the value. Precision is specified after the (optional) width by a period followed by a @@ -63,16 +78,20 @@ %9.2f width 9, precision 2 %9.f width 9, precision 0 - Width and precision are measured in units of Unicode code points. - (This differs from C's printf where the units are numbers - of bytes.) Either or both of the flags may be replaced with the - character '*', causing their values to be obtained from the next - operand, which must be of type int. + Width and precision are measured in units of Unicode code points, + that is, runes. (This differs from C's printf where the + units are always measured in bytes.) Either or both of the flags + may be replaced with the character '*', causing their values to be + obtained from the next operand, which must be of type int. - For most values, width is the minimum number of characters to output, + For most values, width is the minimum number of runes to output, padding the formatted form with spaces if necessary. - For strings, precision is the maximum number of characters to output, - truncating if necessary. + + For strings, byte slices and byte arrays, however, precision + limits the length of the input to be formatted (not the size of + the output), truncating if necessary. Normally it is measured in + runes, but for these types when formatted with the %x or %X format + it is measured in bytes. For floating-point values, width sets the minimum width of the field and precision sets the number of places after the decimal, if appropriate, @@ -147,6 +166,10 @@ func (x X) String() string { return Sprintf("<%s>", x) } convert the value before recurring: func (x X) String() string { return Sprintf("<%s>", string(x)) } + Infinite recursion can also be triggered by self-referential data + structures, such as a slice that contains itself as an element, if + that type has a String method. Such pathologies are rare, however, + and the package does not protect against them. Explicit argument indexes: @@ -160,7 +183,7 @@ For example, fmt.Sprintf("%[2]d %[1]d\n", 11, 22) - will yield "22, 11", while + will yield "22 11", while fmt.Sprintf("%[3]*.[2]*[1]f", 12.0, 2, 6), equivalent to fmt.Sprintf("%6.2f", 12.0), diff --git a/libgo/go/fmt/fmt_test.go b/libgo/go/fmt/fmt_test.go index 8e6923119de..ccd80904771 100644 --- a/libgo/go/fmt/fmt_test.go +++ b/libgo/go/fmt/fmt_test.go @@ -108,6 +108,20 @@ func (p *P) String() string { var barray = [5]renamedUint8{1, 2, 3, 4, 5} var bslice = barray[:] +type byteStringer byte + +func (byteStringer) String() string { return "X" } + +var byteStringerSlice = []byteStringer{97, 98, 99, 100} + +type byteFormatter byte + +func (byteFormatter) Format(f State, _ rune) { + Fprint(f, "X") +} + +var byteFormatterSlice = []byteFormatter{97, 98, 99, 100} + var b byte var fmtTests = []struct { @@ -125,13 +139,17 @@ var fmtTests = []struct { {"%x", "xyz", "78797a"}, {"%X", "xyz", "78797A"}, {"%q", "abc", `"abc"`}, + {"%#x", []byte("abc\xff"), "0x616263ff"}, + {"%#X", []byte("abc\xff"), "0X616263FF"}, + {"%# x", []byte("abc\xff"), "0x61 0x62 0x63 0xff"}, + {"%# X", []byte("abc\xff"), "0X61 0X62 0X63 0XFF"}, // basic bytes {"%s", []byte("abc"), "abc"}, {"%x", []byte("abc"), "616263"}, {"% x", []byte("abc\xff"), "61 62 63 ff"}, - {"%#x", []byte("abc\xff"), "0x610x620x630xff"}, - {"%#X", []byte("abc\xff"), "0X610X620X630XFF"}, + {"%#x", []byte("abc\xff"), "0x616263ff"}, + {"%#X", []byte("abc\xff"), "0X616263FF"}, {"%# x", []byte("abc\xff"), "0x61 0x62 0x63 0xff"}, {"%# X", []byte("abc\xff"), "0X61 0X62 0X63 0XFF"}, {"% X", []byte("abc\xff"), "61 62 63 FF"}, @@ -176,9 +194,18 @@ var fmtTests = []struct { {"%.5s", "日本語日本語", "日本語日本"}, {"%.5s", []byte("日本語日本語"), "日本語日本"}, {"%.5q", "abcdefghijklmnopqrstuvwxyz", `"abcde"`}, + {"%.5x", "abcdefghijklmnopqrstuvwxyz", `6162636465`}, + {"%.5q", []byte("abcdefghijklmnopqrstuvwxyz"), `"abcde"`}, + {"%.5x", []byte("abcdefghijklmnopqrstuvwxyz"), `6162636465`}, {"%.3q", "日本語日本語", `"日本語"`}, {"%.3q", []byte("日本語日本語"), `"日本語"`}, + {"%.1q", "日本語", `"日"`}, + {"%.1q", []byte("日本語"), `"日"`}, + {"%.1x", "日本語", `e6`}, + {"%.1X", []byte("日本語"), `E6`}, {"%10.1q", "日本語日本語", ` "日"`}, + {"%3c", '⌘', " ⌘"}, + {"%5q", '\u2026', ` '…'`}, {"%10v", nil, " <nil>"}, {"%-10v", nil, "<nil> "}, @@ -379,7 +406,7 @@ var fmtTests = []struct { {"%s", I(23), `<23>`}, {"%q", I(23), `"<23>"`}, {"%x", I(23), `3c32333e`}, - {"%#x", I(23), `0x3c0x320x330x3e`}, + {"%#x", I(23), `0x3c32333e`}, {"%# x", I(23), `0x3c 0x32 0x33 0x3e`}, {"%d", I(23), `23`}, // Stringer applies only to string formats. @@ -623,6 +650,21 @@ var fmtTests = []struct { {"%+010.2f", -104.66 + 440.51i, "(-000104.66+000440.51i)"}, {"%+010.2f", +104.66 - 440.51i, "(+000104.66-000440.51i)"}, {"%+010.2f", -104.66 - 440.51i, "(-000104.66-000440.51i)"}, + + // []T where type T is a byte with a Stringer method. + {"%v", byteStringerSlice, "[X X X X]"}, + {"%s", byteStringerSlice, "abcd"}, + {"%q", byteStringerSlice, "\"abcd\""}, + {"%x", byteStringerSlice, "61626364"}, + {"%#v", byteStringerSlice, "[]fmt_test.byteStringer{0x61, 0x62, 0x63, 0x64}"}, + + // And the same for Formatter. + {"%v", byteFormatterSlice, "[X X X X]"}, + {"%s", byteFormatterSlice, "abcd"}, + {"%q", byteFormatterSlice, "\"abcd\""}, + {"%x", byteFormatterSlice, "61626364"}, + // This next case seems wrong, but the docs say the Formatter wins here. + {"%#v", byteFormatterSlice, "[]fmt_test.byteFormatter{X, X, X, X}"}, } // zeroFill generates zero-filled strings of the specified width. The length @@ -672,7 +714,7 @@ func TestSprintf(t *testing.T) { // thing as if done by hand with two singleton prints. func TestComplexFormatting(t *testing.T) { var yesNo = []bool{true, false} - var signs = []float64{1, 0, -1} + var values = []float64{1, 0, -1, math.Inf(1), math.Inf(-1), math.NaN()} for _, plus := range yesNo { for _, zero := range yesNo { for _, space := range yesNo { @@ -697,10 +739,10 @@ func TestComplexFormatting(t *testing.T) { imagFmt += "+" imagFmt += "10.2" imagFmt += string(char) - for _, realSign := range signs { - for _, imagSign := range signs { - one := Sprintf(realFmt, complex(realSign, imagSign)) - two := Sprintf("("+realFmt+imagFmt+"i)", realSign, imagSign) + for _, realValue := range values { + for _, imagValue := range values { + one := Sprintf(realFmt, complex(realValue, imagValue)) + two := Sprintf("("+realFmt+imagFmt+"i)", realValue, imagValue) if one != two { t.Error(f, one, two) } @@ -819,7 +861,25 @@ func BenchmarkManyArgs(b *testing.B) { }) } +func BenchmarkFprintInt(b *testing.B) { + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + Fprint(&buf, 123456) + } +} + +func BenchmarkFprintIntNoAlloc(b *testing.B) { + var x interface{} = 123456 + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + buf.Reset() + Fprint(&buf, x) + } +} + var mallocBuf bytes.Buffer +var mallocPointer *int // A pointer so we know the interface value won't allocate. // gccgo numbers are different because gccgo does not have escape // analysis yet. @@ -833,11 +893,13 @@ var mallocTest = []struct { {5, `Sprintf("%x")`, func() { Sprintf("%x", 7) }}, {5, `Sprintf("%s")`, func() { Sprintf("%s", "hello") }}, {5, `Sprintf("%x %x")`, func() { Sprintf("%x %x", 7, 112) }}, - // For %g we use a float32, not float64, to guarantee passing the argument - // does not need to allocate memory to store the result in a pointer-sized word. - {20, `Sprintf("%g")`, func() { Sprintf("%g", float32(3.14159)) }}, - {5, `Fprintf(buf, "%x %x %x")`, func() { mallocBuf.Reset(); Fprintf(&mallocBuf, "%x %x %x", 7, 8, 9) }}, + {20, `Sprintf("%g")`, func() { Sprintf("%g", float32(3.14159)) }}, // TODO: Can this be 1? {5, `Fprintf(buf, "%s")`, func() { mallocBuf.Reset(); Fprintf(&mallocBuf, "%s", "hello") }}, + // If the interface value doesn't need to allocate, amortized allocation overhead should be zero. + {5, `Fprintf(buf, "%x %x %x")`, func() { + mallocBuf.Reset() + Fprintf(&mallocBuf, "%x %x %x", mallocPointer, mallocPointer, mallocPointer) + }}, } var _ bytes.Buffer @@ -859,7 +921,7 @@ func TestCountMallocs(t *testing.T) { type flagPrinter struct{} -func (*flagPrinter) Format(f State, c rune) { +func (flagPrinter) Format(f State, c rune) { s := "%" for i := 0; i < 128; i++ { if f.Flag(i) { @@ -905,11 +967,12 @@ func TestFlagParser(t *testing.T) { } func TestStructPrinter(t *testing.T) { - var s struct { + type T struct { a string b string c int } + var s T s.a = "abc" s.b = "def" s.c = 123 @@ -919,15 +982,38 @@ func TestStructPrinter(t *testing.T) { }{ {"%v", "{abc def 123}"}, {"%+v", "{a:abc b:def c:123}"}, + {"%#v", `fmt_test.T{a:"abc", b:"def", c:123}`}, } for _, tt := range tests { out := Sprintf(tt.fmt, s) if out != tt.out { - t.Errorf("Sprintf(%q, &s) = %q, want %q", tt.fmt, out, tt.out) + t.Errorf("Sprintf(%q, s) = %#q, want %#q", tt.fmt, out, tt.out) + } + // The same but with a pointer. + out = Sprintf(tt.fmt, &s) + if out != "&"+tt.out { + t.Errorf("Sprintf(%q, &s) = %#q, want %#q", tt.fmt, out, "&"+tt.out) } } } +func TestSlicePrinter(t *testing.T) { + slice := []int{} + s := Sprint(slice) + if s != "[]" { + t.Errorf("empty slice printed as %q not %q", s, "[]") + } + slice = []int{1, 2, 3} + s = Sprint(slice) + if s != "[1 2 3]" { + t.Errorf("slice: got %q expected %q", s, "[1 2 3]") + } + s = Sprint(&slice) + if s != "&[1 2 3]" { + t.Errorf("&slice: got %q expected %q", s, "&[1 2 3]") + } +} + // presentInMap checks map printing using substrings so we don't depend on the // print order. func presentInMap(s string, a []string, t *testing.T) { @@ -954,6 +1040,12 @@ func TestMapPrinter(t *testing.T) { a := []string{"1:one", "2:two", "3:three"} presentInMap(Sprintf("%v", m1), a, t) presentInMap(Sprint(m1), a, t) + // Pointer to map prints the same but with initial &. + if !strings.HasPrefix(Sprint(&m1), "&") { + t.Errorf("no initial & for address of map") + } + presentInMap(Sprintf("%v", &m1), a, t) + presentInMap(Sprint(&m1), a, t) } func TestEmptyMap(t *testing.T) { @@ -1084,10 +1176,10 @@ var panictests = []struct { } func TestPanics(t *testing.T) { - for _, tt := range panictests { + for i, tt := range panictests { s := Sprintf(tt.fmt, tt.in) if s != tt.out { - t.Errorf("%q: got %q expected %q", tt.fmt, s, tt.out) + t.Errorf("%d: %q: got %q expected %q", i, tt.fmt, s, tt.out) } } } @@ -1147,3 +1239,74 @@ func TestNilDoesNotBecomeTyped(t *testing.T) { t.Errorf("expected:\n\t%q\ngot:\n\t%q", expect, got) } } + +var formatterFlagTests = []struct { + in string + val interface{} + out string +}{ + // scalar values with the (unused by fmt) 'a' verb. + {"%a", flagPrinter{}, "[%a]"}, + {"%-a", flagPrinter{}, "[%-a]"}, + {"%+a", flagPrinter{}, "[%+a]"}, + {"%#a", flagPrinter{}, "[%#a]"}, + {"% a", flagPrinter{}, "[% a]"}, + {"%0a", flagPrinter{}, "[%0a]"}, + {"%1.2a", flagPrinter{}, "[%1.2a]"}, + {"%-1.2a", flagPrinter{}, "[%-1.2a]"}, + {"%+1.2a", flagPrinter{}, "[%+1.2a]"}, + {"%-+1.2a", flagPrinter{}, "[%+-1.2a]"}, + {"%-+1.2abc", flagPrinter{}, "[%+-1.2a]bc"}, + {"%-1.2abc", flagPrinter{}, "[%-1.2a]bc"}, + + // composite values with the 'a' verb + {"%a", [1]flagPrinter{}, "[[%a]]"}, + {"%-a", [1]flagPrinter{}, "[[%-a]]"}, + {"%+a", [1]flagPrinter{}, "[[%+a]]"}, + {"%#a", [1]flagPrinter{}, "[[%#a]]"}, + {"% a", [1]flagPrinter{}, "[[% a]]"}, + {"%0a", [1]flagPrinter{}, "[[%0a]]"}, + {"%1.2a", [1]flagPrinter{}, "[[%1.2a]]"}, + {"%-1.2a", [1]flagPrinter{}, "[[%-1.2a]]"}, + {"%+1.2a", [1]flagPrinter{}, "[[%+1.2a]]"}, + {"%-+1.2a", [1]flagPrinter{}, "[[%+-1.2a]]"}, + {"%-+1.2abc", [1]flagPrinter{}, "[[%+-1.2a]]bc"}, + {"%-1.2abc", [1]flagPrinter{}, "[[%-1.2a]]bc"}, + + // simple values with the 'v' verb + {"%v", flagPrinter{}, "[%v]"}, + {"%-v", flagPrinter{}, "[%-v]"}, + {"%+v", flagPrinter{}, "[%+v]"}, + {"%#v", flagPrinter{}, "[%#v]"}, + {"% v", flagPrinter{}, "[% v]"}, + {"%0v", flagPrinter{}, "[%0v]"}, + {"%1.2v", flagPrinter{}, "[%1.2v]"}, + {"%-1.2v", flagPrinter{}, "[%-1.2v]"}, + {"%+1.2v", flagPrinter{}, "[%+1.2v]"}, + {"%-+1.2v", flagPrinter{}, "[%+-1.2v]"}, + {"%-+1.2vbc", flagPrinter{}, "[%+-1.2v]bc"}, + {"%-1.2vbc", flagPrinter{}, "[%-1.2v]bc"}, + + // composite values with the 'v' verb. + {"%v", [1]flagPrinter{}, "[[%v]]"}, + {"%-v", [1]flagPrinter{}, "[[%-v]]"}, + {"%+v", [1]flagPrinter{}, "[[%+v]]"}, + {"%#v", [1]flagPrinter{}, "[1]fmt_test.flagPrinter{[%#v]}"}, + {"% v", [1]flagPrinter{}, "[[% v]]"}, + {"%0v", [1]flagPrinter{}, "[[%0v]]"}, + {"%1.2v", [1]flagPrinter{}, "[[%1.2v]]"}, + {"%-1.2v", [1]flagPrinter{}, "[[%-1.2v]]"}, + {"%+1.2v", [1]flagPrinter{}, "[[%+1.2v]]"}, + {"%-+1.2v", [1]flagPrinter{}, "[[%+-1.2v]]"}, + {"%-+1.2vbc", [1]flagPrinter{}, "[[%+-1.2v]]bc"}, + {"%-1.2vbc", [1]flagPrinter{}, "[[%-1.2v]]bc"}, +} + +func TestFormatterFlags(t *testing.T) { + for _, tt := range formatterFlagTests { + s := Sprintf(tt.in, tt.val) + if s != tt.out { + t.Errorf("Sprintf(%q, %T) = %q, want %q", tt.in, tt.val, s, tt.out) + } + } +} diff --git a/libgo/go/fmt/format.go b/libgo/go/fmt/format.go index a89c542cfb5..4d97d1443ed 100644 --- a/libgo/go/fmt/format.go +++ b/libgo/go/fmt/format.go @@ -34,15 +34,8 @@ func init() { } } -// A fmt is the raw formatter used by Printf etc. -// It prints into a buffer that must be set up separately. -type fmt struct { - intbuf [nByte]byte - buf *buffer - // width, precision - wid int - prec int - // flags +// flags placed in a separate struct for easy clearing. +type fmtFlags struct { widPresent bool precPresent bool minus bool @@ -52,20 +45,27 @@ type fmt struct { unicode bool uniQuote bool // Use 'x'= prefix for %U if printable. zero bool + + // For the formats %+v %#v, we set the plusV/sharpV flags + // and clear the plus/sharp flags since %+v and %#v are in effect + // different, flagless formats set at the top level. + plusV bool + sharpV bool +} + +// A fmt is the raw formatter used by Printf etc. +// It prints into a buffer that must be set up separately. +type fmt struct { + intbuf [nByte]byte + buf *buffer + // width, precision + wid int + prec int + fmtFlags } func (f *fmt) clearflags() { - f.wid = 0 - f.widPresent = false - f.prec = 0 - f.precPresent = false - f.minus = false - f.plus = false - f.sharp = false - f.space = false - f.unicode = false - f.uniQuote = false - f.zero = false + f.fmtFlags = fmtFlags{} } func (f *fmt) init(buf *buffer) { @@ -114,7 +114,7 @@ func (f *fmt) pad(b []byte) { f.buf.Write(b) return } - padding, left, right := f.computePadding(len(b)) + padding, left, right := f.computePadding(utf8.RuneCount(b)) if left > 0 { f.writePadding(left, padding) } @@ -199,10 +199,36 @@ func (f *fmt) integer(a int64, base uint64, signedness bool, digits string) { // block but it's not worth the duplication, so ua has 64 bits. i := len(buf) ua := uint64(a) - for ua >= base { - i-- - buf[i] = digits[ua%base] - ua /= base + // use constants for the division and modulo for more efficient code. + // switch cases ordered by popularity. + switch base { + case 10: + for ua >= 10 { + i-- + next := ua / 10 + buf[i] = byte('0' + ua - next*10) + ua = next + } + case 16: + for ua >= 16 { + i-- + buf[i] = digits[ua&0xF] + ua >>= 4 + } + case 8: + for ua >= 8 { + i-- + buf[i] = byte('0' + ua&7) + ua >>= 3 + } + case 2: + for ua >= 2 { + i-- + buf[i] = byte('0' + ua&1) + ua >>= 1 + } + default: + panic("fmt: unknown base; can't happen") } i-- buf[i] = digits[ua] @@ -298,7 +324,7 @@ func (f *fmt) fmt_sbx(s string, b []byte, digits string) { if i > 0 && f.space { buf = append(buf, ' ') } - if f.sharp { + if f.sharp && (f.space || i == 0) { buf = append(buf, '0', x) } var c byte @@ -314,11 +340,17 @@ func (f *fmt) fmt_sbx(s string, b []byte, digits string) { // fmt_sx formats a string as a hexadecimal encoding of its bytes. func (f *fmt) fmt_sx(s, digits string) { + if f.precPresent && f.prec < len(s) { + s = s[:f.prec] + } f.fmt_sbx(s, nil, digits) } // fmt_bx formats a byte slice as a hexadecimal encoding of its bytes. func (f *fmt) fmt_bx(b []byte, digits string) { + if f.precPresent && f.prec < len(b) { + b = b[:f.prec] + } f.fmt_sbx("", b, digits) } diff --git a/libgo/go/fmt/print.go b/libgo/go/fmt/print.go index 302661f4c85..59a30d221e9 100644 --- a/libgo/go/fmt/print.go +++ b/libgo/go/fmt/print.go @@ -128,7 +128,7 @@ var ppFree = sync.Pool{ New: func() interface{} { return new(pp) }, } -// newPrinter allocates a new pp struct or grab a cached one. +// newPrinter allocates a new pp struct or grabs a cached one. func newPrinter() *pp { p := ppFree.Get().(*pp) p.panicking = false @@ -297,13 +297,13 @@ func parsenum(s string, start, end int) (num int, isnum bool, newi int) { return } -func (p *pp) unknownType(v interface{}) { - if v == nil { +func (p *pp) unknownType(v reflect.Value) { + if !v.IsValid() { p.buf.Write(nilAngleBytes) return } p.buf.WriteByte('?') - p.buf.WriteString(reflect.TypeOf(v).String()) + p.buf.WriteString(v.Type().String()) p.buf.WriteByte('?') } @@ -317,11 +317,11 @@ func (p *pp) badVerb(verb rune) { case p.arg != nil: p.buf.WriteString(reflect.TypeOf(p.arg).String()) p.add('=') - p.printArg(p.arg, 'v', false, false, 0) + p.printArg(p.arg, 'v', 0) case p.value.IsValid(): p.buf.WriteString(p.value.Type().String()) p.add('=') - p.printValue(p.value, 'v', false, false, 0) + p.printValue(p.value, 'v', 0) default: p.buf.Write(nilAngleBytes) } @@ -406,7 +406,7 @@ func (p *pp) fmtUnicode(v int64) { p.fmt.sharp = sharp } -func (p *pp) fmtUint64(v uint64, verb rune, goSyntax bool) { +func (p *pp) fmtUint64(v uint64, verb rune) { switch verb { case 'b': p.fmt.integer(int64(v), 2, unsigned, ldigits) @@ -415,7 +415,7 @@ func (p *pp) fmtUint64(v uint64, verb rune, goSyntax bool) { case 'd': p.fmt.integer(int64(v), 10, unsigned, ldigits) case 'v': - if goSyntax { + if p.fmt.sharpV { p.fmt0x64(v, true) } else { p.fmt.integer(int64(v), 10, unsigned, ldigits) @@ -499,10 +499,10 @@ func (p *pp) fmtComplex128(v complex128, verb rune) { } } -func (p *pp) fmtString(v string, verb rune, goSyntax bool) { +func (p *pp) fmtString(v string, verb rune) { switch verb { case 'v': - if goSyntax { + if p.fmt.sharpV { p.fmt.fmt_q(v) } else { p.fmt.fmt_s(v) @@ -520,9 +520,9 @@ func (p *pp) fmtString(v string, verb rune, goSyntax bool) { } } -func (p *pp) fmtBytes(v []byte, verb rune, goSyntax bool, typ reflect.Type, depth int) { +func (p *pp) fmtBytes(v []byte, verb rune, typ reflect.Type, depth int) { if verb == 'v' || verb == 'd' { - if goSyntax { + if p.fmt.sharpV { if v == nil { if typ == nil { p.buf.WriteString("[]byte(nil)") @@ -543,15 +543,15 @@ func (p *pp) fmtBytes(v []byte, verb rune, goSyntax bool, typ reflect.Type, dept } for i, c := range v { if i > 0 { - if goSyntax { + if p.fmt.sharpV { p.buf.Write(commaSpaceBytes) } else { p.buf.WriteByte(' ') } } - p.printArg(c, 'v', p.fmt.plus, goSyntax, depth+1) + p.printArg(c, 'v', depth+1) } - if goSyntax { + if p.fmt.sharpV { p.buf.WriteByte('}') } else { p.buf.WriteByte(']') @@ -572,7 +572,7 @@ func (p *pp) fmtBytes(v []byte, verb rune, goSyntax bool, typ reflect.Type, dept } } -func (p *pp) fmtPointer(value reflect.Value, verb rune, goSyntax bool) { +func (p *pp) fmtPointer(value reflect.Value, verb rune) { use0x64 := true switch verb { case 'p', 'v': @@ -594,7 +594,7 @@ func (p *pp) fmtPointer(value reflect.Value, verb rune, goSyntax bool) { return } - if goSyntax { + if p.fmt.sharpV { p.add('(') p.buf.WriteString(value.Type().String()) p.add(')') @@ -611,7 +611,7 @@ func (p *pp) fmtPointer(value reflect.Value, verb rune, goSyntax bool) { if use0x64 { p.fmt0x64(uint64(u), !p.fmt.sharp) } else { - p.fmtUint64(uint64(u), verb, false) + p.fmtUint64(uint64(u), verb) } } } @@ -636,42 +636,65 @@ func (p *pp) catchPanic(arg interface{}, verb rune) { // Nested panics; the recursion in printArg cannot succeed. panic(err) } + p.fmt.clearflags() // We are done, and for this output we want default behavior. p.buf.Write(percentBangBytes) p.add(verb) p.buf.Write(panicBytes) p.panicking = true - p.printArg(err, 'v', false, false, 0) + p.printArg(err, 'v', 0) p.panicking = false p.buf.WriteByte(')') } } -func (p *pp) handleMethods(verb rune, plus, goSyntax bool, depth int) (wasString, handled bool) { +// clearSpecialFlags pushes %#v back into the regular flags and returns their old state. +func (p *pp) clearSpecialFlags() (plusV, sharpV bool) { + plusV = p.fmt.plusV + if plusV { + p.fmt.plus = true + p.fmt.plusV = false + } + sharpV = p.fmt.sharpV + if sharpV { + p.fmt.sharp = true + p.fmt.sharpV = false + } + return +} + +// restoreSpecialFlags, whose argument should be a call to clearSpecialFlags, +// restores the setting of the plusV and sharpV flags. +func (p *pp) restoreSpecialFlags(plusV, sharpV bool) { + if plusV { + p.fmt.plus = false + p.fmt.plusV = true + } + if sharpV { + p.fmt.sharp = false + p.fmt.sharpV = true + } +} + +func (p *pp) handleMethods(verb rune, depth int) (handled bool) { if p.erroring { return } // Is it a Formatter? if formatter, ok := p.arg.(Formatter); ok { handled = true - wasString = false + defer p.restoreSpecialFlags(p.clearSpecialFlags()) defer p.catchPanic(p.arg, verb) formatter.Format(p, verb) return } - // Must not touch flags before Formatter looks at them. - if plus { - p.fmt.plus = false - } // If we're doing Go syntax and the argument knows how to supply it, take care of it now. - if goSyntax { - p.fmt.sharp = false + if p.fmt.sharpV { if stringer, ok := p.arg.(GoStringer); ok { - wasString = false handled = true defer p.catchPanic(p.arg, verb) // Print the result of GoString unadorned. - p.fmtString(stringer.GoString(), 's', false) + p.fmt.fmt_s(stringer.GoString()) return } } else { @@ -682,30 +705,27 @@ func (p *pp) handleMethods(verb rune, plus, goSyntax bool, depth int) (wasString case 'v', 's', 'x', 'X', 'q': // Is it an error or Stringer? // The duplication in the bodies is necessary: - // setting wasString and handled, and deferring catchPanic, + // setting handled and deferring catchPanic // must happen before calling the method. switch v := p.arg.(type) { case error: - wasString = false handled = true defer p.catchPanic(p.arg, verb) - p.printArg(v.Error(), verb, plus, false, depth) + p.printArg(v.Error(), verb, depth) return case Stringer: - wasString = false handled = true defer p.catchPanic(p.arg, verb) - p.printArg(v.String(), verb, plus, false, depth) + p.printArg(v.String(), verb, depth) return } } } - handled = false - return + return false } -func (p *pp) printArg(arg interface{}, verb rune, plus, goSyntax bool, depth int) (wasString bool) { +func (p *pp) printArg(arg interface{}, verb rune, depth int) (wasString bool) { p.arg = arg p.value = reflect.Value{} @@ -722,26 +742,13 @@ func (p *pp) printArg(arg interface{}, verb rune, plus, goSyntax bool, depth int // %T (the value's type) and %p (its address) are special; we always do them first. switch verb { case 'T': - p.printArg(reflect.TypeOf(arg).String(), 's', false, false, 0) + p.printArg(reflect.TypeOf(arg).String(), 's', 0) return false case 'p': - p.fmtPointer(reflect.ValueOf(arg), verb, goSyntax) + p.fmtPointer(reflect.ValueOf(arg), verb) return false } - // Clear flags for base formatters. - // handleMethods needs them, so we must restore them later. - // We could call handleMethods here and avoid this work, but - // handleMethods is expensive enough to be worth delaying. - oldPlus := p.fmt.plus - oldSharp := p.fmt.sharp - if plus { - p.fmt.plus = false - } - if goSyntax { - p.fmt.sharp = false - } - // Some types can be done without reflection. switch f := arg.(type) { case bool: @@ -765,40 +772,37 @@ func (p *pp) printArg(arg interface{}, verb rune, plus, goSyntax bool, depth int case int64: p.fmtInt64(f, verb) case uint: - p.fmtUint64(uint64(f), verb, goSyntax) + p.fmtUint64(uint64(f), verb) case uint8: - p.fmtUint64(uint64(f), verb, goSyntax) + p.fmtUint64(uint64(f), verb) case uint16: - p.fmtUint64(uint64(f), verb, goSyntax) + p.fmtUint64(uint64(f), verb) case uint32: - p.fmtUint64(uint64(f), verb, goSyntax) + p.fmtUint64(uint64(f), verb) case uint64: - p.fmtUint64(f, verb, goSyntax) + p.fmtUint64(f, verb) case uintptr: - p.fmtUint64(uint64(f), verb, goSyntax) + p.fmtUint64(uint64(f), verb) case string: - p.fmtString(f, verb, goSyntax) + p.fmtString(f, verb) wasString = verb == 's' || verb == 'v' case []byte: - p.fmtBytes(f, verb, goSyntax, nil, depth) + p.fmtBytes(f, verb, nil, depth) wasString = verb == 's' default: - // Restore flags in case handleMethods finds a Formatter. - p.fmt.plus = oldPlus - p.fmt.sharp = oldSharp // If the type is not simple, it might have methods. - if isString, handled := p.handleMethods(verb, plus, goSyntax, depth); handled { - return isString + if handled := p.handleMethods(verb, depth); handled { + return false } // Need to use reflection - return p.printReflectValue(reflect.ValueOf(arg), verb, plus, goSyntax, depth) + return p.printReflectValue(reflect.ValueOf(arg), verb, depth) } p.arg = nil return } // printValue is like printArg but starts with a reflect value, not an interface{} value. -func (p *pp) printValue(value reflect.Value, verb rune, plus, goSyntax bool, depth int) (wasString bool) { +func (p *pp) printValue(value reflect.Value, verb rune, depth int) (wasString bool) { if !value.IsValid() { if verb == 'T' || verb == 'v' { p.buf.Write(nilAngleBytes) @@ -812,10 +816,10 @@ func (p *pp) printValue(value reflect.Value, verb rune, plus, goSyntax bool, dep // %T (the value's type) and %p (its address) are special; we always do them first. switch verb { case 'T': - p.printArg(value.Type().String(), 's', false, false, 0) + p.printArg(value.Type().String(), 's', 0) return false case 'p': - p.fmtPointer(value, verb, goSyntax) + p.fmtPointer(value, verb) return false } @@ -825,16 +829,18 @@ func (p *pp) printValue(value reflect.Value, verb rune, plus, goSyntax bool, dep if value.CanInterface() { p.arg = value.Interface() } - if isString, handled := p.handleMethods(verb, plus, goSyntax, depth); handled { - return isString + if handled := p.handleMethods(verb, depth); handled { + return false } - return p.printReflectValue(value, verb, plus, goSyntax, depth) + return p.printReflectValue(value, verb, depth) } +var byteType = reflect.TypeOf(byte(0)) + // printReflectValue is the fallback for both printArg and printValue. // It uses reflect to print the value. -func (p *pp) printReflectValue(value reflect.Value, verb rune, plus, goSyntax bool, depth int) (wasString bool) { +func (p *pp) printReflectValue(value reflect.Value, verb rune, depth int) (wasString bool) { oldValue := p.value p.value = value BigSwitch: @@ -844,7 +850,7 @@ BigSwitch: case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: p.fmtInt64(f.Int(), verb) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - p.fmtUint64(f.Uint(), verb, goSyntax) + p.fmtUint64(f.Uint(), verb) case reflect.Float32, reflect.Float64: if f.Type().Size() == 4 { p.fmtFloat32(float32(f.Float()), verb) @@ -858,9 +864,9 @@ BigSwitch: p.fmtComplex128(f.Complex(), verb) } case reflect.String: - p.fmtString(f.String(), verb, goSyntax) + p.fmtString(f.String(), verb) case reflect.Map: - if goSyntax { + if p.fmt.sharpV { p.buf.WriteString(f.Type().String()) if f.IsNil() { p.buf.WriteString("(nil)") @@ -873,23 +879,23 @@ BigSwitch: keys := f.MapKeys() for i, key := range keys { if i > 0 { - if goSyntax { + if p.fmt.sharpV { p.buf.Write(commaSpaceBytes) } else { p.buf.WriteByte(' ') } } - p.printValue(key, verb, plus, goSyntax, depth+1) + p.printValue(key, verb, depth+1) p.buf.WriteByte(':') - p.printValue(f.MapIndex(key), verb, plus, goSyntax, depth+1) + p.printValue(f.MapIndex(key), verb, depth+1) } - if goSyntax { + if p.fmt.sharpV { p.buf.WriteByte('}') } else { p.buf.WriteByte(']') } case reflect.Struct: - if goSyntax { + if p.fmt.sharpV { p.buf.WriteString(value.Type().String()) } p.add('{') @@ -897,36 +903,40 @@ BigSwitch: t := v.Type() for i := 0; i < v.NumField(); i++ { if i > 0 { - if goSyntax { + if p.fmt.sharpV { p.buf.Write(commaSpaceBytes) } else { p.buf.WriteByte(' ') } } - if plus || goSyntax { + if p.fmt.plusV || p.fmt.sharpV { if f := t.Field(i); f.Name != "" { p.buf.WriteString(f.Name) p.buf.WriteByte(':') } } - p.printValue(getField(v, i), verb, plus, goSyntax, depth+1) + p.printValue(getField(v, i), verb, depth+1) } p.buf.WriteByte('}') case reflect.Interface: value := f.Elem() if !value.IsValid() { - if goSyntax { + if p.fmt.sharpV { p.buf.WriteString(f.Type().String()) p.buf.Write(nilParenBytes) } else { p.buf.Write(nilAngleBytes) } } else { - wasString = p.printValue(value, verb, plus, goSyntax, depth+1) + wasString = p.printValue(value, verb, depth+1) } case reflect.Array, reflect.Slice: - // Byte slices are special. - if typ := f.Type(); typ.Elem().Kind() == reflect.Uint8 { + // Byte slices are special: + // - Handle []byte (== []uint8) with fmtBytes. + // - Handle []T, where T is a named byte type, with fmtBytes only + // for the s, q, an x verbs. For other verbs, T might be a + // Stringer, so we use printValue to print each element. + if typ := f.Type(); typ.Elem().Kind() == reflect.Uint8 && (typ.Elem() == byteType || verb == 's' || verb == 'q' || verb == 'x') { var bytes []byte if f.Kind() == reflect.Slice { bytes = f.Bytes() @@ -941,11 +951,11 @@ BigSwitch: bytes[i] = byte(f.Index(i).Uint()) } } - p.fmtBytes(bytes, verb, goSyntax, typ, depth) + p.fmtBytes(bytes, verb, typ, depth) wasString = verb == 's' break } - if goSyntax { + if p.fmt.sharpV { p.buf.WriteString(value.Type().String()) if f.Kind() == reflect.Slice && f.IsNil() { p.buf.WriteString("(nil)") @@ -957,15 +967,15 @@ BigSwitch: } for i := 0; i < f.Len(); i++ { if i > 0 { - if goSyntax { + if p.fmt.sharpV { p.buf.Write(commaSpaceBytes) } else { p.buf.WriteByte(' ') } } - p.printValue(f.Index(i), verb, plus, goSyntax, depth+1) + p.printValue(f.Index(i), verb, depth+1) } - if goSyntax { + if p.fmt.sharpV { p.buf.WriteByte('}') } else { p.buf.WriteByte(']') @@ -978,17 +988,21 @@ BigSwitch: switch a := f.Elem(); a.Kind() { case reflect.Array, reflect.Slice: p.buf.WriteByte('&') - p.printValue(a, verb, plus, goSyntax, depth+1) + p.printValue(a, verb, depth+1) break BigSwitch case reflect.Struct: p.buf.WriteByte('&') - p.printValue(a, verb, plus, goSyntax, depth+1) + p.printValue(a, verb, depth+1) + break BigSwitch + case reflect.Map: + p.buf.WriteByte('&') + p.printValue(a, verb, depth+1) break BigSwitch } } fallthrough case reflect.Chan, reflect.Func, reflect.UnsafePointer: - p.fmtPointer(value, verb, goSyntax) + p.fmtPointer(value, verb) default: p.unknownType(f) } @@ -1154,9 +1168,19 @@ func (p *pp) doPrintf(format string, a []interface{}) { arg := a[argNum] argNum++ - goSyntax := c == 'v' && p.fmt.sharp - plus := c == 'v' && p.fmt.plus - p.printArg(arg, c, plus, goSyntax, 0) + if c == 'v' { + if p.fmt.sharp { + // Go syntax. Set the flag in the fmt and clear the sharp flag. + p.fmt.sharp = false + p.fmt.sharpV = true + } + if p.fmt.plus { + // Struct-field syntax. Set the flag in the fmt and clear the plus flag. + p.fmt.plus = false + p.fmt.plusV = true + } + } + p.printArg(arg, c, 0) } // Check for extra arguments unless the call accessed the arguments @@ -1170,7 +1194,7 @@ func (p *pp) doPrintf(format string, a []interface{}) { p.buf.WriteString(reflect.TypeOf(arg).String()) p.buf.WriteByte('=') } - p.printArg(arg, 'v', false, false, 0) + p.printArg(arg, 'v', 0) if argNum+1 < len(a) { p.buf.Write(commaSpaceBytes) } @@ -1191,7 +1215,7 @@ func (p *pp) doPrint(a []interface{}, addspace, addnewline bool) { p.buf.WriteByte(' ') } } - prevString = p.printArg(arg, 'v', false, false, 0) + prevString = p.printArg(arg, 'v', 0) } if addnewline { p.buf.WriteByte('\n') diff --git a/libgo/go/fmt/scan.go b/libgo/go/fmt/scan.go index 8a337e479d0..d7befeae43e 100644 --- a/libgo/go/fmt/scan.go +++ b/libgo/go/fmt/scan.go @@ -360,6 +360,7 @@ func (r *readRune) ReadRune() (rr rune, size int, err error) { } if r.buf[0] < utf8.RuneSelf { // fast check for common ASCII case rr = rune(r.buf[0]) + size = 1 // Known to be 1. return } var n int diff --git a/libgo/go/fmt/scan_test.go b/libgo/go/fmt/scan_test.go index d903f0c3ff7..541e12df210 100644 --- a/libgo/go/fmt/scan_test.go +++ b/libgo/go/fmt/scan_test.go @@ -842,6 +842,38 @@ func TestLineByLineFscanf(t *testing.T) { } } +// TestScanStateCount verifies the correct byte count is returned. Issue 8512. + +// runeScanner implements the Scanner interface for TestScanStateCount. +type runeScanner struct { + rune rune + size int +} + +func (rs *runeScanner) Scan(state ScanState, verb rune) error { + r, size, err := state.ReadRune() + rs.rune = r + rs.size = size + return err +} + +func TestScanStateCount(t *testing.T) { + var a, b, c runeScanner + n, err := Sscanf("12➂", "%c%c%c", &a, &b, &c) + if err != nil { + t.Fatal(err) + } + if n != 3 { + t.Fatalf("expected 3 items consumed, got %d") + } + if a.rune != '1' || b.rune != '2' || c.rune != '➂' { + t.Errorf("bad scan rune: %q %q %q should be '1' '2' '➂'", a.rune, b.rune, c.rune) + } + if a.size != 1 || b.size != 1 || c.size != 3 { + t.Errorf("bad scan size: %q %q %q should be 1 1 3", a.size, b.size, c.size) + } +} + // RecursiveInt accepts a string matching %d.%d.%d.... // and parses it into a linked list. // It allows us to benchmark recursive descent style scanners. diff --git a/libgo/go/go/ast/ast.go b/libgo/go/go/ast/ast.go index 6e635cd0166..312e3d1b989 100644 --- a/libgo/go/go/ast/ast.go +++ b/libgo/go/go/ast/ast.go @@ -699,9 +699,9 @@ type ( // A RangeStmt represents a for statement with a range clause. RangeStmt struct { For token.Pos // position of "for" keyword - Key, Value Expr // Value may be nil - TokPos token.Pos // position of Tok - Tok token.Token // ASSIGN, DEFINE + Key, Value Expr // Key, Value may be nil + TokPos token.Pos // position of Tok; invalid if Key == nil + Tok token.Token // ILLEGAL if Key == nil, ASSIGN, DEFINE X Expr // value to range over Body *BlockStmt } diff --git a/libgo/go/go/ast/scope.go b/libgo/go/go/ast/scope.go index 8df5b2c6565..df1529d1819 100644 --- a/libgo/go/go/ast/scope.go +++ b/libgo/go/go/ast/scope.go @@ -80,7 +80,7 @@ type Object struct { Name string // declared name Decl interface{} // corresponding Field, XxxSpec, FuncDecl, LabeledStmt, AssignStmt, Scope; or nil Data interface{} // object-specific data; or nil - Type interface{} // place holder for type information; may be nil + Type interface{} // placeholder for type information; may be nil } // NewObj creates a new object of a given kind and name. diff --git a/libgo/go/go/ast/walk.go b/libgo/go/go/ast/walk.go index fedffb3f22f..73ac38647a2 100644 --- a/libgo/go/go/ast/walk.go +++ b/libgo/go/go/ast/walk.go @@ -275,7 +275,9 @@ func Walk(v Visitor, node Node) { Walk(v, n.Body) case *RangeStmt: - Walk(v, n.Key) + if n.Key != nil { + Walk(v, n.Key) + } if n.Value != nil { Walk(v, n.Value) } diff --git a/libgo/go/go/build/build.go b/libgo/go/go/build/build.go index 5fe7dcb6b2f..2656aa06d68 100644 --- a/libgo/go/go/build/build.go +++ b/libgo/go/go/build/build.go @@ -23,6 +23,7 @@ import ( "strconv" "strings" "unicode" + "unicode/utf8" ) // A Context specifies the supporting context for a build. @@ -206,9 +207,7 @@ func (ctxt *Context) gopath() []string { if p == "" || p == ctxt.GOROOT { // Empty paths are uninteresting. // If the path is the GOROOT, ignore it. - // People sometimes set GOPATH=$GOROOT, which is useless - // but would cause us to find packages with import paths - // like "pkg/math". + // People sometimes set GOPATH=$GOROOT. // Do not get confused by this common mistake. continue } @@ -238,7 +237,7 @@ func (ctxt *Context) gopath() []string { func (ctxt *Context) SrcDirs() []string { var all []string if ctxt.GOROOT != "" { - dir := ctxt.joinPath(ctxt.GOROOT, "src", "pkg") + dir := ctxt.joinPath(ctxt.GOROOT, "src") if ctxt.isDir(dir) { all = append(all, dir) } @@ -272,6 +271,9 @@ var cgoEnabled = map[string]bool{ "linux/ppc64le": true, "linux/s390": true, "linux/s390x": true, + "android/386": true, + "android/amd64": true, + "android/arm": true, "netbsd/386": true, "netbsd/amd64": true, "netbsd/arm": true, @@ -296,10 +298,10 @@ func defaultContext() Context { // say "+build go1.x", and code that should only be built before Go 1.x // (perhaps it is the stub to use in that case) should say "+build !go1.x". // - // When we reach Go 1.4 the line will read - // c.ReleaseTags = []string{"go1.1", "go1.2", "go1.3", "go1.4"} + // When we reach Go 1.5 the line will read + // c.ReleaseTags = []string{"go1.1", "go1.2", "go1.3", "go1.4", "go1.5"} // and so on. - c.ReleaseTags = []string{"go1.1", "go1.2", "go1.3"} + c.ReleaseTags = []string{"go1.1", "go1.2", "go1.3", "go1.4"} switch os.Getenv("CGO_ENABLED") { case "1": @@ -338,22 +340,29 @@ const ( // If AllowBinary is set, Import can be satisfied by a compiled // package object without corresponding sources. AllowBinary + + // If ImportComment is set, parse import comments on package statements. + // Import returns an error if it finds a comment it cannot understand + // or finds conflicting comments in multiple source files. + // See golang.org/s/go14customimport for more information. + ImportComment ) // A Package describes the Go package found in a directory. type Package struct { - Dir string // directory containing package sources - Name string // package name - Doc string // documentation synopsis - ImportPath string // import path of package ("" if unknown) - Root string // root of Go tree where this package lives - SrcRoot string // package source root directory ("" if unknown) - PkgRoot string // package install root directory ("" if unknown) - BinDir string // command install directory ("" if unknown) - Goroot bool // package found in Go root - PkgObj string // installed .a file - AllTags []string // tags that can influence file selection in this directory - ConflictDir string // this directory shadows Dir in $GOPATH + Dir string // directory containing package sources + Name string // package name + ImportComment string // path in import comment on package statement + Doc string // documentation synopsis + ImportPath string // import path of package ("" if unknown) + Root string // root of Go tree where this package lives + SrcRoot string // package source root directory ("" if unknown) + PkgRoot string // package install root directory ("" if unknown) + BinDir string // command install directory ("" if unknown) + Goroot bool // package found in Go root + PkgObj string // installed .a file + AllTags []string // tags that can influence file selection in this directory + ConflictDir string // this directory shadows Dir in $GOPATH // Source files GoFiles []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles) @@ -412,6 +421,19 @@ func (e *NoGoError) Error() string { return "no buildable Go source files in " + e.Dir } +// MultiplePackageError describes a directory containing +// multiple buildable Go source files for multiple packages. +type MultiplePackageError struct { + Dir string // directory containing files + Packages []string // package names found + Files []string // corresponding files: Files[i] declares package Packages[i] +} + +func (e *MultiplePackageError) Error() string { + // Error string limited to two entries for compatibility. + return fmt.Sprintf("found packages %s (%s) and %s (%s) in %s", e.Packages[0], e.Files[0], e.Packages[1], e.Files[1], e.Dir) +} + func nameExt(name string) string { i := strings.LastIndex(name, ".") if i < 0 { @@ -472,7 +494,7 @@ func (ctxt *Context) Import(path string, srcDir string, mode ImportMode) (*Packa } // Determine canonical import path, if any. if ctxt.GOROOT != "" { - root := ctxt.joinPath(ctxt.GOROOT, "src", "pkg") + root := ctxt.joinPath(ctxt.GOROOT, "src") if sub, ok := ctxt.hasSubdir(root, p.Dir); ok { p.Goroot = true p.ImportPath = sub @@ -488,7 +510,7 @@ func (ctxt *Context) Import(path string, srcDir string, mode ImportMode) (*Packa // but check that using it wouldn't find something // else first. if ctxt.GOROOT != "" { - if dir := ctxt.joinPath(ctxt.GOROOT, "src", "pkg", sub); ctxt.isDir(dir) { + if dir := ctxt.joinPath(ctxt.GOROOT, "src", sub); ctxt.isDir(dir) { p.ConflictDir = dir goto Found } @@ -522,7 +544,7 @@ func (ctxt *Context) Import(path string, srcDir string, mode ImportMode) (*Packa // Determine directory from import path. if ctxt.GOROOT != "" { - dir := ctxt.joinPath(ctxt.GOROOT, "src", "pkg", path) + dir := ctxt.joinPath(ctxt.GOROOT, "src", path) isDir := ctxt.isDir(dir) binaryOnly = !isDir && mode&AllowBinary != 0 && pkga != "" && ctxt.isFile(ctxt.joinPath(ctxt.GOROOT, pkga)) if isDir || binaryOnly { @@ -568,11 +590,7 @@ func (ctxt *Context) Import(path string, srcDir string, mode ImportMode) (*Packa Found: if p.Root != "" { - if p.Goroot { - p.SrcRoot = ctxt.joinPath(p.Root, "src", "pkg") - } else { - p.SrcRoot = ctxt.joinPath(p.Root, "src") - } + p.SrcRoot = ctxt.joinPath(p.Root, "src") p.PkgRoot = ctxt.joinPath(p.Root, "pkg") p.BinDir = ctxt.joinPath(p.Root, "bin") if pkga != "" { @@ -593,7 +611,7 @@ Found: } var Sfiles []string // files with ".S" (capital S) - var firstFile string + var firstFile, firstCommentFile string imported := make(map[string][]token.Position) testImported := make(map[string][]token.Position) xTestImported := make(map[string][]token.Position) @@ -674,12 +692,28 @@ Found: p.Name = pkg firstFile = name } else if pkg != p.Name { - return p, fmt.Errorf("found packages %s (%s) and %s (%s) in %s", p.Name, firstFile, pkg, name, p.Dir) + return p, &MultiplePackageError{p.Dir, []string{firstFile, name}, []string{p.Name, pkg}} } if pf.Doc != nil && p.Doc == "" { p.Doc = doc.Synopsis(pf.Doc.Text()) } + if mode&ImportComment != 0 { + qcom, line := findImportComment(data) + if line != 0 { + com, err := strconv.Unquote(qcom) + if err != nil { + return p, fmt.Errorf("%s:%d: cannot parse import comment", filename, line) + } + if p.ImportComment == "" { + p.ImportComment = com + firstCommentFile = name + } else if p.ImportComment != com { + return p, fmt.Errorf("found import comments %q (%s) and %q (%s) in %s", p.ImportComment, firstCommentFile, com, name, p.Dir) + } + } + } + // Record imports and information about cgo. isCgo := false for _, decl := range pf.Decls { @@ -760,6 +794,117 @@ Found: return p, pkgerr } +func findImportComment(data []byte) (s string, line int) { + // expect keyword package + word, data := parseWord(data) + if string(word) != "package" { + return "", 0 + } + + // expect package name + _, data = parseWord(data) + + // now ready for import comment, a // or /* */ comment + // beginning and ending on the current line. + for len(data) > 0 && (data[0] == ' ' || data[0] == '\t' || data[0] == '\r') { + data = data[1:] + } + + var comment []byte + switch { + case bytes.HasPrefix(data, slashSlash): + i := bytes.Index(data, newline) + if i < 0 { + i = len(data) + } + comment = data[2:i] + case bytes.HasPrefix(data, slashStar): + data = data[2:] + i := bytes.Index(data, starSlash) + if i < 0 { + // malformed comment + return "", 0 + } + comment = data[:i] + if bytes.Contains(comment, newline) { + return "", 0 + } + } + comment = bytes.TrimSpace(comment) + + // split comment into `import`, `"pkg"` + word, arg := parseWord(comment) + if string(word) != "import" { + return "", 0 + } + + line = 1 + bytes.Count(data[:cap(data)-cap(arg)], newline) + return strings.TrimSpace(string(arg)), line +} + +var ( + slashSlash = []byte("//") + slashStar = []byte("/*") + starSlash = []byte("*/") + newline = []byte("\n") +) + +// skipSpaceOrComment returns data with any leading spaces or comments removed. +func skipSpaceOrComment(data []byte) []byte { + for len(data) > 0 { + switch data[0] { + case ' ', '\t', '\r', '\n': + data = data[1:] + continue + case '/': + if bytes.HasPrefix(data, slashSlash) { + i := bytes.Index(data, newline) + if i < 0 { + return nil + } + data = data[i+1:] + continue + } + if bytes.HasPrefix(data, slashStar) { + data = data[2:] + i := bytes.Index(data, starSlash) + if i < 0 { + return nil + } + data = data[i+2:] + continue + } + } + break + } + return data +} + +// parseWord skips any leading spaces or comments in data +// and then parses the beginning of data as an identifier or keyword, +// returning that word and what remains after the word. +func parseWord(data []byte) (word, rest []byte) { + data = skipSpaceOrComment(data) + + // Parse past leading word characters. + rest = data + for { + r, size := utf8.DecodeRune(rest) + if unicode.IsLetter(r) || '0' <= r && r <= '9' || r == '_' { + rest = rest[size:] + continue + } + break + } + + word = data[:len(data)-len(rest)] + if len(word) == 0 { + return nil, nil + } + + return word, rest +} + // MatchFile reports whether the file with the given name in the given directory // matches the context and would be included in a Package created by ImportDir // of that directory. @@ -1128,6 +1273,9 @@ func (ctxt *Context) match(name string, allTags map[string]bool) bool { if name == ctxt.GOOS || name == ctxt.GOARCH || name == ctxt.Compiler { return true } + if ctxt.GOOS == "android" && name == "linux" { + return true + } // other tags for _, tag := range ctxt.BuildTags { @@ -1155,10 +1303,25 @@ func (ctxt *Context) match(name string, allTags map[string]bool) bool { // name_$(GOARCH)_test.* // name_$(GOOS)_$(GOARCH)_test.* // +// An exception: if GOOS=android, then files with GOOS=linux are also matched. func (ctxt *Context) goodOSArchFile(name string, allTags map[string]bool) bool { if dot := strings.Index(name, "."); dot != -1 { name = name[:dot] } + + // Before Go 1.4, a file called "linux.go" would be equivalent to having a + // build tag "linux" in that file. For Go 1.4 and beyond, we require this + // auto-tagging to apply only to files with a non-empty prefix, so + // "foo_linux.go" is tagged but "linux.go" is not. This allows new operating + // sytems, such as android, to arrive without breaking existing code with + // innocuous source code in "android.go". The easiest fix: cut everything + // in the name before the initial _. + i := strings.Index(name, "_") + if i < 0 { + return true + } + name = name[i:] // ignore everything before first _ + l := strings.Split(name, "_") if n := len(l); n > 0 && l[n-1] == "test" { l = l[:n-1] @@ -1169,12 +1332,21 @@ func (ctxt *Context) goodOSArchFile(name string, allTags map[string]bool) bool { allTags[l[n-2]] = true allTags[l[n-1]] = true } - return l[n-2] == ctxt.GOOS && l[n-1] == ctxt.GOARCH + if l[n-1] != ctxt.GOARCH { + return false + } + if ctxt.GOOS == "android" && l[n-2] == "linux" { + return true + } + return l[n-2] == ctxt.GOOS } if n >= 1 && knownOS[l[n-1]] { if allTags != nil { allTags[l[n-1]] = true } + if ctxt.GOOS == "android" && l[n-1] == "linux" { + return true + } return l[n-1] == ctxt.GOOS } if n >= 1 && knownArch[l[n-1]] { diff --git a/libgo/go/go/build/build_test.go b/libgo/go/go/build/build_test.go index fca8d4bdb27..a40def0fa0e 100644 --- a/libgo/go/go/build/build_test.go +++ b/libgo/go/go/build/build_test.go @@ -85,6 +85,20 @@ func TestEmptyImport(t *testing.T) { } } +func TestEmptyFolderImport(t *testing.T) { + _, err := Import(".", "testdata/empty", 0) + if _, ok := err.(*NoGoError); !ok { + t.Fatal(`Import("testdata/empty") did not return NoGoError.`) + } +} + +func TestMultiplePackageImport(t *testing.T) { + _, err := Import(".", "testdata/multi", 0) + if _, ok := err.(*MultiplePackageError); !ok { + t.Fatal(`Import("testdata/multi") did not return MultiplePackageError.`) + } +} + func TestLocalDirectory(t *testing.T) { cwd, err := os.Getwd() if err != nil { @@ -153,22 +167,36 @@ func (r readNopCloser) Close() error { return nil } +var ( + ctxtP9 = Context{GOARCH: "arm", GOOS: "plan9"} + ctxtAndroid = Context{GOARCH: "arm", GOOS: "android"} +) + var matchFileTests = []struct { + ctxt Context name string data string match bool }{ - {"foo_arm.go", "", true}, - {"foo1_arm.go", "// +build linux\n\npackage main\n", false}, - {"foo_darwin.go", "", false}, - {"foo.go", "", true}, - {"foo1.go", "// +build linux\n\npackage main\n", false}, - {"foo.badsuffix", "", false}, + {ctxtP9, "foo_arm.go", "", true}, + {ctxtP9, "foo1_arm.go", "// +build linux\n\npackage main\n", false}, + {ctxtP9, "foo_darwin.go", "", false}, + {ctxtP9, "foo.go", "", true}, + {ctxtP9, "foo1.go", "// +build linux\n\npackage main\n", false}, + {ctxtP9, "foo.badsuffix", "", false}, + {ctxtAndroid, "foo_linux.go", "", true}, + {ctxtAndroid, "foo_android.go", "", true}, + {ctxtAndroid, "foo_plan9.go", "", false}, + {ctxtAndroid, "android.go", "", true}, + {ctxtAndroid, "plan9.go", "", true}, + {ctxtAndroid, "plan9_test.go", "", true}, + {ctxtAndroid, "arm.s", "", true}, + {ctxtAndroid, "amd64.s", "", true}, } func TestMatchFile(t *testing.T) { for _, tt := range matchFileTests { - ctxt := Context{GOARCH: "arm", GOOS: "plan9"} + ctxt := tt.ctxt ctxt.OpenFile = func(path string) (r io.ReadCloser, err error) { if path != "x+"+tt.name { t.Fatalf("OpenFile asked for %q, expected %q", path, "x+"+tt.name) @@ -184,3 +212,13 @@ func TestMatchFile(t *testing.T) { } } } + +func TestImportCmd(t *testing.T) { + p, err := Import("cmd/internal/objfile", "", 0) + if err != nil { + t.Fatal(err) + } + if !strings.HasSuffix(filepath.ToSlash(p.Dir), "src/cmd/internal/objfile") { + t.Fatalf("Import cmd/internal/objfile returned Dir=%q, want %q", filepath.ToSlash(p.Dir), ".../src/cmd/internal/objfile") + } +} diff --git a/libgo/go/go/build/deps_test.go b/libgo/go/go/build/deps_test.go index 3a795fdcc4b..a335effec3a 100644 --- a/libgo/go/go/build/deps_test.go +++ b/libgo/go/go/build/deps_test.go @@ -279,12 +279,12 @@ var pkgDeps = map[string][]string{ // Random byte, number generation. // This would be part of core crypto except that it imports // math/big, which imports fmt. - "crypto/rand": {"L4", "CRYPTO", "OS", "math/big", "syscall"}, + "crypto/rand": {"L4", "CRYPTO", "OS", "math/big", "syscall", "internal/syscall"}, // Mathematical crypto: dependencies on fmt (L4) and math/big. // We could avoid some of the fmt, but math/big imports fmt anyway. "crypto/dsa": {"L4", "CRYPTO", "math/big"}, - "crypto/ecdsa": {"L4", "CRYPTO", "crypto/elliptic", "math/big"}, + "crypto/ecdsa": {"L4", "CRYPTO", "crypto/elliptic", "math/big", "encoding/asn1"}, "crypto/elliptic": {"L4", "CRYPTO", "math/big"}, "crypto/rsa": {"L4", "CRYPTO", "crypto/rand", "math/big"}, @@ -318,6 +318,7 @@ var pkgDeps = map[string][]string{ "net/http": { "L4", "NET", "OS", "compress/gzip", "crypto/tls", "mime/multipart", "runtime/debug", + "net/http/internal", }, // HTTP-using packages. @@ -325,9 +326,9 @@ var pkgDeps = map[string][]string{ "net/http/cgi": {"L4", "NET", "OS", "crypto/tls", "net/http", "regexp"}, "net/http/fcgi": {"L4", "NET", "OS", "net/http", "net/http/cgi"}, "net/http/httptest": {"L4", "NET", "OS", "crypto/tls", "flag", "net/http"}, - "net/http/httputil": {"L4", "NET", "OS", "net/http"}, + "net/http/httputil": {"L4", "NET", "OS", "net/http", "net/http/internal"}, "net/http/pprof": {"L4", "OS", "html/template", "net/http", "runtime/pprof"}, - "net/rpc": {"L4", "NET", "encoding/gob", "net/http", "text/template"}, + "net/rpc": {"L4", "NET", "encoding/gob", "html/template", "net/http"}, "net/rpc/jsonrpc": {"L4", "NET", "encoding/json", "net/rpc"}, } @@ -360,7 +361,7 @@ func allowed(pkg string) map[string]bool { } var bools = []bool{false, true} -var geese = []string{"darwin", "dragonfly", "freebsd", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows"} +var geese = []string{"android", "darwin", "dragonfly", "freebsd", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows"} var goarches = []string{"386", "amd64", "arm", "arm64"} type osPkg struct { diff --git a/libgo/go/go/build/doc.go b/libgo/go/go/build/doc.go index f17f76ccc7b..75a827bb91f 100644 --- a/libgo/go/go/build/doc.go +++ b/libgo/go/go/build/doc.go @@ -100,6 +100,7 @@ // - "go1.1", from Go version 1.1 onward // - "go1.2", from Go version 1.2 onward // - "go1.3", from Go version 1.3 onward +// - "go1.4", from Go version 1.4 onward // - any additional words listed in ctxt.BuildTags // // If a file's name, after stripping the extension and a possible _test suffix, @@ -107,12 +108,10 @@ // *_GOOS // *_GOARCH // *_GOOS_GOARCH -// (example: source_windows_amd64.go) or the literals: -// GOOS -// GOARCH -// (example: windows.go) where GOOS and GOARCH represent any known operating -// system and architecture values respectively, then the file is considered to -// have an implicit build constraint requiring those terms. +// (example: source_windows_amd64.go) where GOOS and GOARCH represent +// any known operating system and architecture values respectively, then +// the file is considered to have an implicit build constraint requiring +// those terms. // // To keep a file from being considered for the build: // @@ -134,4 +133,7 @@ // building the package for Windows; similarly, math_386.s will be included // only when building the package for 32-bit x86. // +// Using GOOS=android matches build tags and files as for GOOS=linux +// in addition to android tags and files. +// package build diff --git a/libgo/go/go/build/syslist.go b/libgo/go/go/build/syslist.go index bde12a55344..0bf4b1573c9 100644 --- a/libgo/go/go/build/syslist.go +++ b/libgo/go/go/build/syslist.go @@ -4,5 +4,5 @@ package build -const goosList = "darwin dragonfly freebsd linux nacl netbsd openbsd plan9 solaris windows " +const goosList = "android darwin dragonfly freebsd linux nacl netbsd openbsd plan9 solaris windows " const goarchList = "386 amd64 amd64p32 arm arm64 alpha m68k mipso32 mipsn32 mipsn64 mipso64 ppc ppc64 ppc64le s390 s390x sparc sparc64 " diff --git a/libgo/go/go/build/testdata/multi/file.go b/libgo/go/go/build/testdata/multi/file.go new file mode 100644 index 00000000000..ee946eb2a2d --- /dev/null +++ b/libgo/go/go/build/testdata/multi/file.go @@ -0,0 +1,5 @@ +// Test data - not compiled. + +package main + +func main() {} diff --git a/libgo/go/go/build/testdata/multi/file_appengine.go b/libgo/go/go/build/testdata/multi/file_appengine.go new file mode 100644 index 00000000000..4ea31e7031e --- /dev/null +++ b/libgo/go/go/build/testdata/multi/file_appengine.go @@ -0,0 +1,5 @@ +// Test data - not compiled. + +package test_package + +func init() {} diff --git a/libgo/go/go/doc/exports.go b/libgo/go/go/doc/exports.go index ff01285d4c3..1d3b466d8c7 100644 --- a/libgo/go/go/doc/exports.go +++ b/libgo/go/go/doc/exports.go @@ -6,15 +6,19 @@ package doc -import "go/ast" +import ( + "go/ast" + "go/token" +) // filterIdentList removes unexported names from list in place -// and returns the resulting list. +// and returns the resulting list. If blankOk is set, blank +// identifiers are considered exported names. // -func filterIdentList(list []*ast.Ident) []*ast.Ident { +func filterIdentList(list []*ast.Ident, blankOk bool) []*ast.Ident { j := 0 for _, x := range list { - if ast.IsExported(x.Name) { + if ast.IsExported(x.Name) || (blankOk && x.Name == "_") { list[j] = x j++ } @@ -74,7 +78,7 @@ func (r *reader) filterFieldList(parent *namedType, fields *ast.FieldList, ityp r.remember(ityp) } } else { - field.Names = filterIdentList(field.Names) + field.Names = filterIdentList(field.Names, false) if len(field.Names) < n { removedFields = true } @@ -136,13 +140,15 @@ func (r *reader) filterType(parent *namedType, typ ast.Expr) { } } -func (r *reader) filterSpec(spec ast.Spec) bool { +func (r *reader) filterSpec(spec ast.Spec, tok token.Token) bool { switch s := spec.(type) { case *ast.ImportSpec: // always keep imports so we can collect them return true case *ast.ValueSpec: - s.Names = filterIdentList(s.Names) + // special case: consider blank constants as exported + // (work-around for issue 5397) + s.Names = filterIdentList(s.Names, tok == token.CONST) if len(s.Names) > 0 { r.filterType(nil, s.Type) return true @@ -159,10 +165,10 @@ func (r *reader) filterSpec(spec ast.Spec) bool { return false } -func (r *reader) filterSpecList(list []ast.Spec) []ast.Spec { +func (r *reader) filterSpecList(list []ast.Spec, tok token.Token) []ast.Spec { j := 0 for _, s := range list { - if r.filterSpec(s) { + if r.filterSpec(s, tok) { list[j] = s j++ } @@ -173,7 +179,7 @@ func (r *reader) filterSpecList(list []ast.Spec) []ast.Spec { func (r *reader) filterDecl(decl ast.Decl) bool { switch d := decl.(type) { case *ast.GenDecl: - d.Specs = r.filterSpecList(d.Specs) + d.Specs = r.filterSpecList(d.Specs, d.Tok) return len(d.Specs) > 0 case *ast.FuncDecl: // ok to filter these methods early because any diff --git a/libgo/go/go/doc/headscan.go b/libgo/go/go/doc/headscan.go index f5593476382..1ccaa158194 100644 --- a/libgo/go/go/doc/headscan.go +++ b/libgo/go/go/doc/headscan.go @@ -24,6 +24,7 @@ import ( "go/token" "os" "path/filepath" + "regexp" "runtime" "strings" ) @@ -33,10 +34,10 @@ var ( verbose = flag.Bool("v", false, "verbose mode") ) -const ( - html_h = "<h3>" - html_endh = "</h3>\n" -) +// ToHTML in comment.go assigns a (possibly blank) ID to each heading +var html_h = regexp.MustCompile(`<h3 id="[^"]*">`) + +const html_endh = "</h3>\n" func isGoFile(fi os.FileInfo) bool { return strings.HasSuffix(fi.Name(), ".go") && @@ -47,11 +48,11 @@ func appendHeadings(list []string, comment string) []string { var buf bytes.Buffer doc.ToHTML(&buf, comment, nil) for s := buf.String(); ; { - i := strings.Index(s, html_h) - if i < 0 { + loc := html_h.FindStringIndex(s) + if len(loc) == 0 { break } - i += len(html_h) + i := loc[1] j := strings.Index(s, html_endh) if j < 0 { list = append(list, s[i:]) // incorrect HTML diff --git a/libgo/go/go/doc/testdata/blank.0.golden b/libgo/go/go/doc/testdata/blank.0.golden new file mode 100644 index 00000000000..dae3ab2affa --- /dev/null +++ b/libgo/go/go/doc/testdata/blank.0.golden @@ -0,0 +1,37 @@ +// Package blank is a go/doc test for the handling of _. See issue ... +PACKAGE blank + +IMPORTPATH + testdata/blank + +FILENAMES + testdata/blank.go + +CONSTANTS + // Package constants. + const ( + _ int = iota + I1 + I2 + ) + + +TYPES + // S has a padding field. + type S struct { + H uint32 + + A uint8 + // contains filtered or unexported fields + } + + // + type T int + + // T constants. + const ( + _ T = iota + T1 + T2 + ) + diff --git a/libgo/go/go/doc/testdata/blank.1.golden b/libgo/go/go/doc/testdata/blank.1.golden new file mode 100644 index 00000000000..333d7e5b040 --- /dev/null +++ b/libgo/go/go/doc/testdata/blank.1.golden @@ -0,0 +1,46 @@ +// Package blank is a go/doc test for the handling of _. See issue ... +PACKAGE blank + +IMPORTPATH + testdata/blank + +FILENAMES + testdata/blank.go + +CONSTANTS + // Package constants. + const ( + _ int = iota + I1 + I2 + ) + + +VARIABLES + // + var _ = T(55) + + +FUNCTIONS + // + func _() + + +TYPES + // S has a padding field. + type S struct { + H uint32 + _ uint8 + A uint8 + } + + // + type T int + + // T constants. + const ( + _ T = iota + T1 + T2 + ) + diff --git a/libgo/go/go/doc/testdata/blank.2.golden b/libgo/go/go/doc/testdata/blank.2.golden new file mode 100644 index 00000000000..dae3ab2affa --- /dev/null +++ b/libgo/go/go/doc/testdata/blank.2.golden @@ -0,0 +1,37 @@ +// Package blank is a go/doc test for the handling of _. See issue ... +PACKAGE blank + +IMPORTPATH + testdata/blank + +FILENAMES + testdata/blank.go + +CONSTANTS + // Package constants. + const ( + _ int = iota + I1 + I2 + ) + + +TYPES + // S has a padding field. + type S struct { + H uint32 + + A uint8 + // contains filtered or unexported fields + } + + // + type T int + + // T constants. + const ( + _ T = iota + T1 + T2 + ) + diff --git a/libgo/go/go/doc/testdata/blank.go b/libgo/go/go/doc/testdata/blank.go new file mode 100644 index 00000000000..f812c77b777 --- /dev/null +++ b/libgo/go/go/doc/testdata/blank.go @@ -0,0 +1,38 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blank is a go/doc test for the handling of _. +// See issue 5397. +package blank + +type T int + +// T constants. +const ( + _ T = iota + T1 + T2 +) + +// Package constants. +const ( + _ int = iota + I1 + I2 +) + +// Blanks not in doc output: + +// S has a padding field. +type S struct { + H uint32 + _ uint8 + A uint8 +} + +func _() {} + +type _ T + +var _ = T(55) diff --git a/libgo/go/go/format/format.go b/libgo/go/go/format/format.go index 3d00a645dbe..668a42df2df 100644 --- a/libgo/go/go/format/format.go +++ b/libgo/go/go/format/format.go @@ -18,6 +18,8 @@ import ( var config = printer.Config{Mode: printer.UseSpaces | printer.TabIndent, Tabwidth: 8} +const parserMode = parser.ParseComments + // Node formats node in canonical gofmt style and writes the result to dst. // // The node type must be *ast.File, *printer.CommentedNode, []ast.Decl, @@ -52,7 +54,7 @@ func Node(dst io.Writer, fset *token.FileSet, node interface{}) error { if err != nil { return err } - file, err = parser.ParseFile(fset, "", buf.Bytes(), parser.ParseComments) + file, err = parser.ParseFile(fset, "", buf.Bytes(), parserMode) if err != nil { // We should never get here. If we do, provide good diagnostic. return fmt.Errorf("format.Node internal error (%s)", err) @@ -80,66 +82,18 @@ func Node(dst io.Writer, fset *token.FileSet, node interface{}) error { // func Source(src []byte) ([]byte, error) { fset := token.NewFileSet() - node, err := parse(fset, src) + file, sourceAdj, indentAdj, err := parse(fset, "", src, true) if err != nil { return nil, err } - var buf bytes.Buffer - if file, ok := node.(*ast.File); ok { + if sourceAdj == nil { // Complete source file. + // TODO(gri) consider doing this always. ast.SortImports(fset, file) - err := config.Fprint(&buf, fset, file) - if err != nil { - return nil, err - } - - } else { - // Partial source file. - // Determine and prepend leading space. - i, j := 0, 0 - for j < len(src) && isSpace(src[j]) { - if src[j] == '\n' { - i = j + 1 // index of last line in leading space - } - j++ - } - buf.Write(src[:i]) - - // Determine indentation of first code line. - // Spaces are ignored unless there are no tabs, - // in which case spaces count as one tab. - indent := 0 - hasSpace := false - for _, b := range src[i:j] { - switch b { - case ' ': - hasSpace = true - case '\t': - indent++ - } - } - if indent == 0 && hasSpace { - indent = 1 - } - - // Format the source. - cfg := config - cfg.Indent = indent - err := cfg.Fprint(&buf, fset, node) - if err != nil { - return nil, err - } - - // Determine and append trailing space. - i = len(src) - for i > 0 && isSpace(src[i-1]) { - i-- - } - buf.Write(src[i:]) } - return buf.Bytes(), nil + return format(fset, file, sourceAdj, indentAdj, src, config) } func hasUnsortedImports(file *ast.File) bool { @@ -160,40 +114,153 @@ func hasUnsortedImports(file *ast.File) bool { return false } -func isSpace(b byte) bool { - return b == ' ' || b == '\t' || b == '\n' || b == '\r' -} +// ---------------------------------------------------------------------------- +// Support functions +// +// The functions parse, format, and isSpace below are identical to the +// respective functions in cmd/gofmt/gofmt.go - keep them in sync! +// +// TODO(gri) Factor out this functionality, eventually. -func parse(fset *token.FileSet, src []byte) (interface{}, error) { - // Try as a complete source file. - file, err := parser.ParseFile(fset, "", src, parser.ParseComments) - if err == nil { - return file, nil - } - // If the source is missing a package clause, try as a source fragment; otherwise fail. - if !strings.Contains(err.Error(), "expected 'package'") { - return nil, err +// parse parses src, which was read from the named file, +// as a Go source file, declaration, or statement list. +func parse(fset *token.FileSet, filename string, src []byte, fragmentOk bool) ( + file *ast.File, + sourceAdj func(src []byte, indent int) []byte, + indentAdj int, + err error, +) { + // Try as whole source file. + file, err = parser.ParseFile(fset, filename, src, parserMode) + // If there's no error, return. If the error is that the source file didn't begin with a + // package line and source fragments are ok, fall through to + // try as a source fragment. Stop and return on any other error. + if err == nil || !fragmentOk || !strings.Contains(err.Error(), "expected 'package'") { + return } - // Try as a declaration list by prepending a package clause in front of src. - // Use ';' not '\n' to keep line numbers intact. + // If this is a declaration list, make it a source file + // by inserting a package clause. + // Insert using a ;, not a newline, so that the line numbers + // in psrc match the ones in src. psrc := append([]byte("package p;"), src...) - file, err = parser.ParseFile(fset, "", psrc, parser.ParseComments) + file, err = parser.ParseFile(fset, filename, psrc, parserMode) if err == nil { - return file.Decls, nil + sourceAdj = func(src []byte, indent int) []byte { + // Remove the package clause. + // Gofmt has turned the ; into a \n. + src = src[indent+len("package p\n"):] + return bytes.TrimSpace(src) + } + return } - // If the source is missing a declaration, try as a statement list; otherwise fail. + // If the error is that the source file didn't begin with a + // declaration, fall through to try as a statement list. + // Stop and return on any other error. if !strings.Contains(err.Error(), "expected declaration") { - return nil, err + return } - // Try as statement list by wrapping a function around src. - fsrc := append(append([]byte("package p; func _() {"), src...), '}') - file, err = parser.ParseFile(fset, "", fsrc, parser.ParseComments) + // If this is a statement list, make it a source file + // by inserting a package clause and turning the list + // into a function body. This handles expressions too. + // Insert using a ;, not a newline, so that the line numbers + // in fsrc match the ones in src. + fsrc := append(append([]byte("package p; func _() {"), src...), '\n', '}') + file, err = parser.ParseFile(fset, filename, fsrc, parserMode) if err == nil { - return file.Decls[0].(*ast.FuncDecl).Body.List, nil + sourceAdj = func(src []byte, indent int) []byte { + // Cap adjusted indent to zero. + if indent < 0 { + indent = 0 + } + // Remove the wrapping. + // Gofmt has turned the ; into a \n\n. + // There will be two non-blank lines with indent, hence 2*indent. + src = src[2*indent+len("package p\n\nfunc _() {"):] + src = src[:len(src)-(indent+len("\n}\n"))] + return bytes.TrimSpace(src) + } + // Gofmt has also indented the function body one level. + // Adjust that with indentAdj. + indentAdj = -1 + } + + // Succeeded, or out of options. + return +} + +// format formats the given package file originally obtained from src +// and adjusts the result based on the original source via sourceAdj +// and indentAdj. +func format( + fset *token.FileSet, + file *ast.File, + sourceAdj func(src []byte, indent int) []byte, + indentAdj int, + src []byte, + cfg printer.Config, +) ([]byte, error) { + if sourceAdj == nil { + // Complete source file. + var buf bytes.Buffer + err := cfg.Fprint(&buf, fset, file) + if err != nil { + return nil, err + } + return buf.Bytes(), nil + } + + // Partial source file. + // Determine and prepend leading space. + i, j := 0, 0 + for j < len(src) && isSpace(src[j]) { + if src[j] == '\n' { + i = j + 1 // byte offset of last line in leading space + } + j++ + } + var res []byte + res = append(res, src[:i]...) + + // Determine and prepend indentation of first code line. + // Spaces are ignored unless there are no tabs, + // in which case spaces count as one tab. + indent := 0 + hasSpace := false + for _, b := range src[i:j] { + switch b { + case ' ': + hasSpace = true + case '\t': + indent++ + } + } + if indent == 0 && hasSpace { + indent = 1 + } + for i := 0; i < indent; i++ { + res = append(res, '\t') + } + + // Format the source. + // Write it without any leading and trailing space. + cfg.Indent = indent + indentAdj + var buf bytes.Buffer + err := cfg.Fprint(&buf, fset, file) + if err != nil { + return nil, err } + res = append(res, sourceAdj(buf.Bytes(), cfg.Indent)...) - // Failed, and out of options. - return nil, err + // Determine and append trailing space. + i = len(src) + for i > 0 && isSpace(src[i-1]) { + i-- + } + return append(res, src[i:]...), nil +} + +func isSpace(b byte) bool { + return b == ' ' || b == '\t' || b == '\n' || b == '\r' } diff --git a/libgo/go/go/format/format_test.go b/libgo/go/go/format/format_test.go index 93f0992477c..d7846bec652 100644 --- a/libgo/go/go/format/format_test.go +++ b/libgo/go/go/format/format_test.go @@ -87,7 +87,11 @@ var tests = []string{ "\tx := 0\n\tgo f()\n\n\n", "\n\t\t\n\n\tx := 0\n\tgo f()\n\n\n", "\n\t\t\n\n\t\t\tx := 0\n\t\t\tgo f()\n\n\n", - "\n\t\t\n\n\t\t\tx := 0\n\t\t\tconst s = `\nfoo\n`\n\n\n", // no indentation inside raw strings + "\n\t\t\n\n\t\t\tx := 0\n\t\t\tconst s = `\nfoo\n`\n\n\n", // no indentation added inside raw strings + "\n\t\t\n\n\t\t\tx := 0\n\t\t\tconst s = `\n\t\tfoo\n`\n\n\n", // no indentation removed inside raw strings + + // comments + "i := 5 /* Comment */", // Issue 5551. // erroneous programs "ERROR1 + 2 +", diff --git a/libgo/go/go/parser/error_test.go b/libgo/go/go/parser/error_test.go index 8506077cee6..48fb53e5b0d 100644 --- a/libgo/go/go/parser/error_test.go +++ b/libgo/go/go/parser/error_test.go @@ -34,7 +34,7 @@ import ( const testdata = "testdata" -var fsetErrs *token.FileSet +var fsetErrs = token.NewFileSet() // getFile assumes that each filename occurs at most once func getFile(filename string) (file *token.File) { @@ -169,7 +169,6 @@ func checkErrors(t *testing.T, filename string, input interface{}) { } func TestErrors(t *testing.T) { - fsetErrs = token.NewFileSet() list, err := ioutil.ReadDir(testdata) if err != nil { t.Fatal(err) diff --git a/libgo/go/go/parser/interface.go b/libgo/go/go/parser/interface.go index 57da4ddcd93..49103058b59 100644 --- a/libgo/go/go/parser/interface.go +++ b/libgo/go/go/parser/interface.go @@ -184,7 +184,7 @@ func ParseExpr(x string) (ast.Expr, error) { // If a semicolon was inserted, consume it; // report an error if there's more tokens. - if p.tok == token.SEMICOLON { + if p.tok == token.SEMICOLON && p.lit == "\n" { p.next() } p.expect(token.EOF) diff --git a/libgo/go/go/parser/parser.go b/libgo/go/go/parser/parser.go index 00dd532b239..4a005d8ffa3 100644 --- a/libgo/go/go/parser/parser.go +++ b/libgo/go/go/parser/parser.go @@ -641,6 +641,7 @@ func (p *parser) parseArrayType() ast.Expr { } lbrack := p.expect(token.LBRACK) + p.exprLev++ var len ast.Expr // always permit ellipsis for more fault-tolerant parsing if p.tok == token.ELLIPSIS { @@ -649,6 +650,7 @@ func (p *parser) parseArrayType() ast.Expr { } else if p.tok != token.RBRACK { len = p.parseRhs() } + p.exprLev-- p.expect(token.RBRACK) elt := p.parseType() @@ -823,9 +825,10 @@ func (p *parser) parseParameterList(scope *ast.Scope, ellipsisOk bool) (params [ // parameter or result variable is the function body. p.declare(field, nil, scope, ast.Var, idents...) p.resolve(typ) - if p.tok == token.COMMA { - p.next() + if !p.atComma("parameter list") { + return } + p.next() for p.tok != token.RPAREN && p.tok != token.EOF { idents := p.parseIdentList() typ := p.parseVarType(ellipsisOk) @@ -840,15 +843,15 @@ func (p *parser) parseParameterList(scope *ast.Scope, ellipsisOk bool) (params [ } p.next() } - } else { - // Type { "," Type } (anonymous parameters) - params = make([]*ast.Field, len(list)) - for i, typ := range list { - p.resolve(typ) - params[i] = &ast.Field{Type: typ} - } + return } + // Type { "," Type } (anonymous parameters) + params = make([]*ast.Field, len(list)) + for i, typ := range list { + p.resolve(typ) + params[i] = &ast.Field{Type: typ} + } return } @@ -2041,7 +2044,16 @@ func (p *parser) parseForStmt() ast.Stmt { prevLev := p.exprLev p.exprLev = -1 if p.tok != token.SEMICOLON { - s2, isRange = p.parseSimpleStmt(rangeOk) + if p.tok == token.RANGE { + // "for range x" (nil lhs in assignment) + pos := p.pos + p.next() + y := []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}} + s2 = &ast.AssignStmt{Rhs: y} + isRange = true + } else { + s2, isRange = p.parseSimpleStmt(rangeOk) + } } if !isRange && p.tok == token.SEMICOLON { p.next() @@ -2066,12 +2078,14 @@ func (p *parser) parseForStmt() ast.Stmt { // check lhs var key, value ast.Expr switch len(as.Lhs) { - case 2: - key, value = as.Lhs[0], as.Lhs[1] + case 0: + // nothing to do case 1: key = as.Lhs[0] + case 2: + key, value = as.Lhs[0], as.Lhs[1] default: - p.errorExpected(as.Lhs[0].Pos(), "1 or 2 expressions") + p.errorExpected(as.Lhs[len(as.Lhs)-1].Pos(), "at most 2 expressions") return &ast.BadStmt{From: pos, To: p.safePos(body.End())} } // parseSimpleStmt returned a right-hand side that @@ -2296,36 +2310,6 @@ func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.Gen } } -func (p *parser) parseReceiver(scope *ast.Scope) *ast.FieldList { - if p.trace { - defer un(trace(p, "Receiver")) - } - - par := p.parseParameters(scope, false) - - // must have exactly one receiver - if par.NumFields() != 1 { - p.errorExpected(par.Opening, "exactly one receiver") - par.List = []*ast.Field{{Type: &ast.BadExpr{From: par.Opening, To: par.Closing + 1}}} - return par - } - - // recv type must be of the form ["*"] identifier - recv := par.List[0] - base := deref(recv.Type) - if _, isIdent := base.(*ast.Ident); !isIdent { - if _, isBad := base.(*ast.BadExpr); !isBad { - // only report error if it's a new one - p.errorExpected(base.Pos(), "(unqualified) identifier") - } - par.List = []*ast.Field{ - {Type: &ast.BadExpr{From: recv.Pos(), To: p.safePos(recv.End())}}, - } - } - - return par -} - func (p *parser) parseFuncDecl() *ast.FuncDecl { if p.trace { defer un(trace(p, "FunctionDecl")) @@ -2337,7 +2321,7 @@ func (p *parser) parseFuncDecl() *ast.FuncDecl { var recv *ast.FieldList if p.tok == token.LPAREN { - recv = p.parseReceiver(scope) + recv = p.parseParameters(scope, false) } ident := p.parseIdent() diff --git a/libgo/go/go/parser/parser_test.go b/libgo/go/go/parser/parser_test.go index 2797ea518bd..85065fd1829 100644 --- a/libgo/go/go/parser/parser_test.go +++ b/libgo/go/go/parser/parser_test.go @@ -74,36 +74,54 @@ func TestParseExpr(t *testing.T) { src := "a + b" x, err := ParseExpr(src) if err != nil { - t.Fatalf("ParseExpr(%s): %v", src, err) + t.Errorf("ParseExpr(%q): %v", src, err) } // sanity check if _, ok := x.(*ast.BinaryExpr); !ok { - t.Errorf("ParseExpr(%s): got %T, want *ast.BinaryExpr", src, x) + t.Errorf("ParseExpr(%q): got %T, want *ast.BinaryExpr", src, x) } // a valid type expression src = "struct{x *int}" x, err = ParseExpr(src) if err != nil { - t.Fatalf("ParseExpr(%s): %v", src, err) + t.Errorf("ParseExpr(%q): %v", src, err) } // sanity check if _, ok := x.(*ast.StructType); !ok { - t.Errorf("ParseExpr(%s): got %T, want *ast.StructType", src, x) + t.Errorf("ParseExpr(%q): got %T, want *ast.StructType", src, x) } // an invalid expression src = "a + *" - _, err = ParseExpr(src) - if err == nil { - t.Fatalf("ParseExpr(%s): got no error", src) + if _, err := ParseExpr(src); err == nil { + t.Errorf("ParseExpr(%q): got no error", src) } // a valid expression followed by extra tokens is invalid src = "a[i] := x" - _, err = ParseExpr(src) - if err == nil { - t.Fatalf("ParseExpr(%s): got no error", src) + if _, err := ParseExpr(src); err == nil { + t.Errorf("ParseExpr(%q): got no error", src) + } + + // a semicolon is not permitted unless automatically inserted + src = "a + b\n" + if _, err := ParseExpr(src); err != nil { + t.Errorf("ParseExpr(%q): got error %s", src, err) + } + src = "a + b;" + if _, err := ParseExpr(src); err == nil { + t.Errorf("ParseExpr(%q): got no error", src) + } + + // various other stuff following a valid expression + const validExpr = "a + b" + const anything = "dh3*#D)#_" + for _, c := range "!)]};," { + src := validExpr + string(c) + anything + if _, err := ParseExpr(src); err == nil { + t.Errorf("ParseExpr(%q): got no error", src) + } } // ParseExpr must not crash diff --git a/libgo/go/go/parser/short_test.go b/libgo/go/go/parser/short_test.go index b794060998f..05e44de28a7 100644 --- a/libgo/go/go/parser/short_test.go +++ b/libgo/go/go/parser/short_test.go @@ -35,6 +35,11 @@ var valids = []string{ `package p; func f() { for _ = range "foo" + "bar" {} };`, `package p; func f() { var s []int; g(s[:], s[i:], s[:j], s[i:j], s[i:j:k], s[:j:k]) };`, `package p; var ( _ = (struct {*T}).m; _ = (interface {T}).m )`, + `package p; func ((T),) m() {}`, + `package p; func ((*T),) m() {}`, + `package p; func (*(T),) m() {}`, + `package p; func _(x []int) { for range x {} }`, + `package p; func _() { if [T{}.n]int{} {} }`, } func TestValid(t *testing.T) { @@ -89,6 +94,7 @@ var invalids = []string{ `package p; func f() { go f /* ERROR HERE "function must be invoked" */ }`, `package p; func f() { defer func() {} /* ERROR HERE "function must be invoked" */ }`, `package p; func f() { go func() { func() { f(x func /* ERROR "expected '\)'" */ (){}) } } }`, + `package p; func f() (a b string /* ERROR "expected '\)'" */ , ok bool) // issue 8656`, } func TestInvalid(t *testing.T) { diff --git a/libgo/go/go/printer/nodes.go b/libgo/go/go/printer/nodes.go index 04b5f1a76a9..d5a69349beb 100644 --- a/libgo/go/go/printer/nodes.go +++ b/libgo/go/go/printer/nodes.go @@ -163,8 +163,8 @@ func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exp size := 0 // print all list elements + prevLine := prev.Line for i, x := range list { - prevLine := line line = p.lineFor(x.Pos()) // determine if the next linebreak, if any, needs to use formfeed: @@ -207,8 +207,8 @@ func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exp } } + needsLinebreak := 0 < prevLine && prevLine < line if i > 0 { - needsLinebreak := prevLine < line && prevLine > 0 && line > 0 // use position of expression following the comma as // comma position for correct comment placement, but // only if the expression is on the same line @@ -232,16 +232,20 @@ func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exp } } - if isPair && size > 0 && len(list) > 1 { - // we have a key:value expression that fits onto one line and - // is in a list with more then one entry: use a column for the - // key such that consecutive entries can align if possible + if len(list) > 1 && isPair && size > 0 && needsLinebreak { + // we have a key:value expression that fits onto one line + // and it's not on the same line as the prior expression: + // use a column for the key such that consecutive entries + // can align if possible + // (needsLinebreak is set if we started a new line before) p.expr(pair.Key) p.print(pair.Colon, token.COLON, vtab) p.expr(pair.Value) } else { p.expr0(x, depth) } + + prevLine = line } if mode&commaTerm != 0 && next.IsValid() && p.pos.Line < next.Line { @@ -732,7 +736,7 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int) { if _, hasParens := x.X.(*ast.ParenExpr); hasParens { // don't print parentheses around an already parenthesized expression // TODO(gri) consider making this more general and incorporate precedence levels - p.expr0(x.X, reduceDepth(depth)) // parentheses undo one level of depth + p.expr0(x.X, depth) } else { p.print(token.LPAREN) p.expr0(x.X, reduceDepth(depth)) // parentheses undo one level of depth @@ -1216,14 +1220,17 @@ func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool) { case *ast.RangeStmt: p.print(token.FOR, blank) - p.expr(s.Key) - if s.Value != nil { - // use position of value following the comma as - // comma position for correct comment placement - p.print(s.Value.Pos(), token.COMMA, blank) - p.expr(s.Value) - } - p.print(blank, s.TokPos, s.Tok, blank, token.RANGE, blank) + if s.Key != nil { + p.expr(s.Key) + if s.Value != nil { + // use position of value following the comma as + // comma position for correct comment placement + p.print(s.Value.Pos(), token.COMMA, blank) + p.expr(s.Value) + } + p.print(blank, s.TokPos, s.Tok, blank) + } + p.print(token.RANGE, blank) p.expr(stripParens(s.X)) p.print(blank) p.block(s.Body, 1) diff --git a/libgo/go/go/printer/printer_test.go b/libgo/go/go/printer/printer_test.go index 306928a69a2..3b0570e5b50 100644 --- a/libgo/go/go/printer/printer_test.go +++ b/libgo/go/go/printer/printer_test.go @@ -159,13 +159,6 @@ func runcheck(t *testing.T, source, golden string, mode checkMode) { } func check(t *testing.T, source, golden string, mode checkMode) { - // start a timer to produce a time-out signal - tc := make(chan int) - go func() { - time.Sleep(10 * time.Second) // plenty of a safety margin, even for very slow machines - tc <- 0 - }() - // run the test cc := make(chan int) go func() { @@ -173,9 +166,9 @@ func check(t *testing.T, source, golden string, mode checkMode) { cc <- 0 }() - // wait for the first finisher + // wait with timeout select { - case <-tc: + case <-time.After(10 * time.Second): // plenty of a safety margin, even for very slow machines // test running past time out t.Errorf("%s: running too slowly", source) case <-cc: @@ -357,7 +350,7 @@ func idents(f *ast.File) <-chan *ast.Ident { // identCount returns the number of identifiers found in f. func identCount(f *ast.File) int { n := 0 - for _ = range idents(f) { + for range idents(f) { n++ } return n diff --git a/libgo/go/go/printer/testdata/declarations.golden b/libgo/go/go/printer/testdata/declarations.golden index a27f21fc8ce..9acd41b7d28 100644 --- a/libgo/go/go/printer/testdata/declarations.golden +++ b/libgo/go/go/printer/testdata/declarations.golden @@ -593,7 +593,7 @@ var ( ) func _() { - var privateKey2 = &Block{Type: "RSA PRIVATE KEY", + var privateKey2 = &Block{Type: "RSA PRIVATE KEY", Headers: map[string]string{}, Bytes: []uint8{0x30, 0x82, 0x1, 0x3a, 0x2, 0x1, 0x0, 0x2, 0x41, 0x0, 0xb2, 0x99, 0xf, 0x49, 0xc4, 0x7d, 0xfa, 0x8c, @@ -698,6 +698,29 @@ var _ = T4{ c: z, } +// no alignment of map composite entries if they are not the first entry on a line +var _ = T{0: 0} // not aligned +var _ = T{0: 0, // not aligned + 1: 1, // aligned + 22: 22, // aligned + 333: 333, 1234: 12, 12345: 0, // first on line aligned +} + +// test cases form issue 8685 +// not aligned +var _ = map[int]string{1: "spring", 2: "summer", + 3: "autumn", 4: "winter"} + +// not aligned +var _ = map[string]string{"a": "spring", "b": "summer", + "c": "autumn", "d": "winter"} + +// aligned +var _ = map[string]string{"a": "spring", + "b": "summer", + "c": "autumn", + "d": "winter"} + func _() { var _ = T{ a, // must introduce trailing comma diff --git a/libgo/go/go/printer/testdata/declarations.input b/libgo/go/go/printer/testdata/declarations.input index d9951d3865f..45beec25fc7 100644 --- a/libgo/go/go/printer/testdata/declarations.input +++ b/libgo/go/go/printer/testdata/declarations.input @@ -715,6 +715,31 @@ var _ = T4{ } +// no alignment of map composite entries if they are not the first entry on a line +var _ = T{0: 0} // not aligned +var _ = T{0: 0, // not aligned + 1: 1, // aligned + 22: 22, // aligned + 333: 333, 1234: 12, 12345: 0, // first on line aligned +} + + +// test cases form issue 8685 +// not aligned +var _ = map[int]string{1: "spring", 2: "summer", + 3: "autumn", 4: "winter"} + +// not aligned +var _ = map[string]string{"a": "spring", "b": "summer", + "c": "autumn", "d": "winter"} + +// aligned +var _ = map[string]string{"a": "spring", +"b": "summer", + "c": "autumn", +"d": "winter"} + + func _() { var _ = T{ a, // must introduce trailing comma diff --git a/libgo/go/go/printer/testdata/expressions.golden b/libgo/go/go/printer/testdata/expressions.golden index fbe8275b3a5..e3d17a4653f 100644 --- a/libgo/go/go/printer/testdata/expressions.golden +++ b/libgo/go/go/printer/testdata/expressions.golden @@ -94,6 +94,11 @@ func _() { _ = Open(dpath+"/file", O_WRONLY|O_CREAT, 0666) _ = int(c0&_Mask4)<<18 | int(c1&_Maskx)<<12 | int(c2&_Maskx)<<6 | int(c3&_Maskx) + // test case for issue 8021 + // want: + // ([]bool{})[([]int{})[((1)+(((1)+((((1)*(((1)+(1))+(1)))+(1))*(1)))+(1)))]] + _ = ([]bool{})[([]int{})[((1)+(((1)+((((1)*(((1)+(1))+(1)))+(1))*(1)))+(1)))]] + // the parser does not restrict expressions that may appear as statements true 42 diff --git a/libgo/go/go/printer/testdata/expressions.input b/libgo/go/go/printer/testdata/expressions.input index f4d20fa0f77..d20a59350ed 100644 --- a/libgo/go/go/printer/testdata/expressions.input +++ b/libgo/go/go/printer/testdata/expressions.input @@ -95,6 +95,11 @@ func _() { _ = Open(dpath + "/file", O_WRONLY | O_CREAT, 0666) _ = int(c0&_Mask4)<<18 | int(c1&_Maskx)<<12 | int(c2&_Maskx)<<6 | int(c3&_Maskx) + // test case for issue 8021 + // want: + // ([]bool{})[([]int{})[((1)+(((1)+((((1)*(((1)+(1))+(1)))+(1))*(1)))+(1)))]] + _ = ([]bool{})[([]int{})[((1) + (((((1) + (((((((1) * (((((1) + (1))) + (1))))) + (1))) * (1))))) + (1))))]] + // the parser does not restrict expressions that may appear as statements true 42 diff --git a/libgo/go/go/printer/testdata/expressions.raw b/libgo/go/go/printer/testdata/expressions.raw index 97bc81dad87..2357336957f 100644 --- a/libgo/go/go/printer/testdata/expressions.raw +++ b/libgo/go/go/printer/testdata/expressions.raw @@ -94,6 +94,11 @@ func _() { _ = Open(dpath+"/file", O_WRONLY|O_CREAT, 0666) _ = int(c0&_Mask4)<<18 | int(c1&_Maskx)<<12 | int(c2&_Maskx)<<6 | int(c3&_Maskx) + // test case for issue 8021 + // want: + // ([]bool{})[([]int{})[((1)+(((1)+((((1)*(((1)+(1))+(1)))+(1))*(1)))+(1)))]] + _ = ([]bool{})[([]int{})[((1)+(((1)+((((1)*(((1)+(1))+(1)))+(1))*(1)))+(1)))]] + // the parser does not restrict expressions that may appear as statements true 42 diff --git a/libgo/go/go/printer/testdata/statements.golden b/libgo/go/go/printer/testdata/statements.golden index 3b298f95ef1..4b134604735 100644 --- a/libgo/go/go/printer/testdata/statements.golden +++ b/libgo/go/go/printer/testdata/statements.golden @@ -309,6 +309,9 @@ func _() { for x := expr; expr; expr = false { use(x) } + for range []int{} { + println("foo") + } for x := range []int{} { use(x) } @@ -338,6 +341,12 @@ func _() { a[i] = i } // multiple lines + for range a { + } + for _ = range a { + } + for _, _ = range a { + } for i := range a { } for i := range a { @@ -435,7 +444,7 @@ func _() { } if x == a+b*(T{42}[0]) { } - if x == a+(b * (T{42}[0])) { + if x == a+(b*(T{42}[0])) { } if x == a+b*(T{42}[0]) { } diff --git a/libgo/go/go/printer/testdata/statements.input b/libgo/go/go/printer/testdata/statements.input index e7fcc0e5409..cade1576bf7 100644 --- a/libgo/go/go/printer/testdata/statements.input +++ b/libgo/go/go/printer/testdata/statements.input @@ -269,6 +269,8 @@ func _() { for x := expr;expr;expr = false { use(x) } + for range []int{} { + println("foo")} for x := range []int{} { use(x) } for x := range (([]int{})) { @@ -289,6 +291,9 @@ func _() { for i := 0; i < len(a); 1++ { a[i] = i } // multiple lines + for range a{} + for _ = range a{} + for _, _ = range a{} for i := range a {} for i := range a { a[i] = i } for i := range a { a[i] = i diff --git a/libgo/go/go/token/position.go b/libgo/go/go/token/position.go index e6f0ae6a673..82d90eeb728 100644 --- a/libgo/go/go/token/position.go +++ b/libgo/go/go/token/position.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// TODO(gri) consider making this a separate package outside the go directory. - package token import ( @@ -184,6 +182,7 @@ func (f *File) SetLines(lines []int) bool { } // SetLinesForContent sets the line offsets for the given file content. +// It ignores position-altering //line comments. func (f *File) SetLinesForContent(content []byte) { var lines []int line := 0 @@ -255,7 +254,6 @@ func (f *File) Offset(p Pos) int { // p must be a Pos value in that file or NoPos. // func (f *File) Line(p Pos) int { - // TODO(gri) this can be implemented much more efficiently return f.Position(p).Line } @@ -263,13 +261,16 @@ func searchLineInfos(a []lineInfo, x int) int { return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1 } -// info returns the file name, line, and column number for a file offset. -func (f *File) info(offset int) (filename string, line, column int) { +// unpack returns the filename and line and column number for a file offset. +// If adjusted is set, unpack will return the filename and line information +// possibly adjusted by //line comments; otherwise those comments are ignored. +// +func (f *File) unpack(offset int, adjusted bool) (filename string, line, column int) { filename = f.name if i := searchInts(f.lines, offset); i >= 0 { line, column = i+1, offset-f.lines[i]+1 } - if len(f.infos) > 0 { + if adjusted && len(f.infos) > 0 { // almost no files have extra line infos if i := searchLineInfos(f.infos, offset); i >= 0 { alt := &f.infos[i] @@ -282,26 +283,35 @@ func (f *File) info(offset int) (filename string, line, column int) { return } -func (f *File) position(p Pos) (pos Position) { +func (f *File) position(p Pos, adjusted bool) (pos Position) { offset := int(p) - f.base pos.Offset = offset - pos.Filename, pos.Line, pos.Column = f.info(offset) + pos.Filename, pos.Line, pos.Column = f.unpack(offset, adjusted) return } -// Position returns the Position value for the given file position p; -// p must be a Pos value in that file or NoPos. +// PositionFor returns the Position value for the given file position p. +// If adjusted is set, the position may be adjusted by position-altering +// //line comments; otherwise those comments are ignored. +// p must be a Pos value in f or NoPos. // -func (f *File) Position(p Pos) (pos Position) { +func (f *File) PositionFor(p Pos, adjusted bool) (pos Position) { if p != NoPos { if int(p) < f.base || int(p) > f.base+f.size { panic("illegal Pos value") } - pos = f.position(p) + pos = f.position(p, adjusted) } return } +// Position returns the Position value for the given file position p. +// Calling f.Position(p) is equivalent to calling f.PositionFor(p, true). +// +func (f *File) Position(p Pos) (pos Position) { + return f.PositionFor(p, true) +} + // ----------------------------------------------------------------------------- // FileSet @@ -427,16 +437,27 @@ func (s *FileSet) File(p Pos) (f *File) { return } -// Position converts a Pos in the fileset into a general Position. -func (s *FileSet) Position(p Pos) (pos Position) { +// PositionFor converts a Pos p in the fileset into a Position value. +// If adjusted is set, the position may be adjusted by position-altering +// //line comments; otherwise those comments are ignored. +// p must be a Pos value in s or NoPos. +// +func (s *FileSet) PositionFor(p Pos, adjusted bool) (pos Position) { if p != NoPos { if f := s.file(p); f != nil { - pos = f.position(p) + pos = f.position(p, adjusted) } } return } +// Position converts a Pos p in the fileset into a Position value. +// Calling s.Position(p) is equivalent to calling s.PositionFor(p, true). +// +func (s *FileSet) Position(p Pos) (pos Position) { + return s.PositionFor(p, true) +} + // ----------------------------------------------------------------------------- // Helper functions diff --git a/libgo/go/go/token/position_test.go b/libgo/go/go/token/position_test.go index ef6cfd93c25..d26939ce277 100644 --- a/libgo/go/go/token/position_test.go +++ b/libgo/go/go/token/position_test.go @@ -11,18 +11,18 @@ import ( "testing" ) -func checkPos(t *testing.T, msg string, p, q Position) { - if p.Filename != q.Filename { - t.Errorf("%s: expected filename = %q; got %q", msg, q.Filename, p.Filename) +func checkPos(t *testing.T, msg string, got, want Position) { + if got.Filename != want.Filename { + t.Errorf("%s: got filename = %q; want %q", msg, got.Filename, want.Filename) } - if p.Offset != q.Offset { - t.Errorf("%s: expected offset = %d; got %d", msg, q.Offset, p.Offset) + if got.Offset != want.Offset { + t.Errorf("%s: got offset = %d; want %d", msg, got.Offset, want.Offset) } - if p.Line != q.Line { - t.Errorf("%s: expected line = %d; got %d", msg, q.Line, p.Line) + if got.Line != want.Line { + t.Errorf("%s: got line = %d; want %d", msg, got.Line, want.Line) } - if p.Column != q.Column { - t.Errorf("%s: expected column = %d; got %d", msg, q.Column, p.Column) + if got.Column != want.Column { + t.Errorf("%s: got column = %d; want %d", msg, got.Column, want.Column) } } @@ -68,7 +68,7 @@ func verifyPositions(t *testing.T, fset *FileSet, f *File, lines []int) { p := f.Pos(offs) offs2 := f.Offset(p) if offs2 != offs { - t.Errorf("%s, Offset: expected offset %d; got %d", f.Name(), offs, offs2) + t.Errorf("%s, Offset: got offset %d; want %d", f.Name(), offs2, offs) } line, col := linecol(lines, offs) msg := fmt.Sprintf("%s (offs = %d, p = %d)", f.Name(), offs, p) @@ -93,16 +93,16 @@ func TestPositions(t *testing.T) { for _, test := range tests { // verify consistency of test case if test.source != nil && len(test.source) != test.size { - t.Errorf("%s: inconsistent test case: expected file size %d; got %d", test.filename, test.size, len(test.source)) + t.Errorf("%s: inconsistent test case: got file size %d; want %d", test.filename, len(test.source), test.size) } // add file and verify name and size f := fset.AddFile(test.filename, fset.Base()+delta, test.size) if f.Name() != test.filename { - t.Errorf("expected filename %q; got %q", test.filename, f.Name()) + t.Errorf("got filename %q; want %q", f.Name(), test.filename) } if f.Size() != test.size { - t.Errorf("%s: expected file size %d; got %d", f.Name(), test.size, f.Size()) + t.Errorf("%s: got file size %d; want %d", f.Name(), f.Size(), test.size) } if fset.File(f.Pos(0)) != f { t.Errorf("%s: f.Pos(0) was not found in f", f.Name()) @@ -112,12 +112,12 @@ func TestPositions(t *testing.T) { for i, offset := range test.lines { f.AddLine(offset) if f.LineCount() != i+1 { - t.Errorf("%s, AddLine: expected line count %d; got %d", f.Name(), i+1, f.LineCount()) + t.Errorf("%s, AddLine: got line count %d; want %d", f.Name(), f.LineCount(), i+1) } // adding the same offset again should be ignored f.AddLine(offset) if f.LineCount() != i+1 { - t.Errorf("%s, AddLine: expected unchanged line count %d; got %d", f.Name(), i+1, f.LineCount()) + t.Errorf("%s, AddLine: got unchanged line count %d; want %d", f.Name(), f.LineCount(), i+1) } verifyPositions(t, fset, f, test.lines[0:i+1]) } @@ -127,7 +127,7 @@ func TestPositions(t *testing.T) { t.Errorf("%s: SetLines failed", f.Name()) } if f.LineCount() != len(test.lines) { - t.Errorf("%s, SetLines: expected line count %d; got %d", f.Name(), len(test.lines), f.LineCount()) + t.Errorf("%s, SetLines: got line count %d; want %d", f.Name(), f.LineCount(), len(test.lines)) } verifyPositions(t, fset, f, test.lines) @@ -139,7 +139,7 @@ func TestPositions(t *testing.T) { } f.SetLinesForContent(src) if f.LineCount() != len(test.lines) { - t.Errorf("%s, SetLinesForContent: expected line count %d; got %d", f.Name(), len(test.lines), f.LineCount()) + t.Errorf("%s, SetLinesForContent: got line count %d; want %d", f.Name(), f.LineCount(), len(test.lines)) } verifyPositions(t, fset, f, test.lines) } @@ -177,13 +177,13 @@ func TestFiles(t *testing.T) { j := 0 fset.Iterate(func(f *File) bool { if f.Name() != tests[j].filename { - t.Errorf("expected filename = %s; got %s", tests[j].filename, f.Name()) + t.Errorf("got filename = %s; want %s", f.Name(), tests[j].filename) } j++ return true }) if j != i+1 { - t.Errorf("expected %d files; got %d", i+1, j) + t.Errorf("got %d files; want %d", j, i+1) } } } @@ -195,7 +195,7 @@ func TestFileSetPastEnd(t *testing.T) { fset.AddFile(test.filename, fset.Base(), test.size) } if f := fset.File(Pos(fset.Base())); f != nil { - t.Errorf("expected nil, got %v", f) + t.Errorf("got %v, want nil", f) } } @@ -209,7 +209,7 @@ func TestFileSetCacheUnlikely(t *testing.T) { for file, pos := range offsets { f := fset.File(Pos(pos)) if f.Name() != file { - t.Errorf("expecting %q at position %d, got %q", file, pos, f.Name()) + t.Errorf("got %q at position %d, want %q", f.Name(), pos, file) } } } @@ -236,3 +236,62 @@ func TestFileSetRace(t *testing.T) { } stop.Wait() } + +func TestPositionFor(t *testing.T) { + src := []byte(` +foo +b +ar +//line :100 +foobar +//line bar:3 +done +`) + + const filename = "foo" + fset := NewFileSet() + f := fset.AddFile(filename, fset.Base(), len(src)) + f.SetLinesForContent(src) + + // verify position info + for i, offs := range f.lines { + got1 := f.PositionFor(f.Pos(offs), false) + got2 := f.PositionFor(f.Pos(offs), true) + got3 := f.Position(f.Pos(offs)) + want := Position{filename, offs, i + 1, 1} + checkPos(t, "1. PositionFor unadjusted", got1, want) + checkPos(t, "1. PositionFor adjusted", got2, want) + checkPos(t, "1. Position", got3, want) + } + + // manually add //line info on lines l1, l2 + const l1, l2 = 5, 7 + f.AddLineInfo(f.lines[l1-1], "", 100) + f.AddLineInfo(f.lines[l2-1], "bar", 3) + + // unadjusted position info must remain unchanged + for i, offs := range f.lines { + got1 := f.PositionFor(f.Pos(offs), false) + want := Position{filename, offs, i + 1, 1} + checkPos(t, "2. PositionFor unadjusted", got1, want) + } + + // adjusted position info should have changed + for i, offs := range f.lines { + got2 := f.PositionFor(f.Pos(offs), true) + got3 := f.Position(f.Pos(offs)) + want := Position{filename, offs, i + 1, 1} + // manually compute wanted filename and line + line := want.Line + if i+1 >= l1 { + want.Filename = "" + want.Line = line - l1 + 100 + } + if i+1 >= l2 { + want.Filename = "bar" + want.Line = line - l2 + 3 + } + checkPos(t, "3. PositionFor adjusted", got2, want) + checkPos(t, "3. Position", got3, want) + } +} diff --git a/libgo/go/hash/crc32/crc32.go b/libgo/go/hash/crc32/crc32.go index a2a21a06f95..6a6b9473bea 100644 --- a/libgo/go/hash/crc32/crc32.go +++ b/libgo/go/hash/crc32/crc32.go @@ -17,8 +17,8 @@ const Size = 4 // Predefined polynomials. const ( - // Far and away the most common CRC-32 polynomial. - // Used by ethernet (IEEE 802.3), v.42, fddi, gzip, zip, png, mpeg-2, ... + // IEEE is by far and away the most common CRC-32 polynomial. + // Used by ethernet (IEEE 802.3), v.42, fddi, gzip, zip, png, ... IEEE = 0xedb88320 // Castagnoli's polynomial, used in iSCSI. diff --git a/libgo/go/html/template/error.go b/libgo/go/html/template/error.go index 46e49ccf830..8f99e1b9624 100644 --- a/libgo/go/html/template/error.go +++ b/libgo/go/html/template/error.go @@ -6,12 +6,16 @@ package template import ( "fmt" + "text/template/parse" ) // Error describes a problem encountered during template Escaping. type Error struct { // ErrorCode describes the kind of error. ErrorCode ErrorCode + // Node is the node that caused the problem, if known. + // If not nil, it overrides Name and Line. + Node parse.Node // Name is the name of the template in which the error was encountered. Name string // Line is the line number of the error in the template source or 0. @@ -182,9 +186,13 @@ const ( ) func (e *Error) Error() string { - if e.Line != 0 { + switch { + case e.Node != nil: + loc, _ := (*parse.Tree)(nil).ErrorContext(e.Node) + return fmt.Sprintf("html/template:%s: %s", loc, e.Description) + case e.Line != 0: return fmt.Sprintf("html/template:%s:%d: %s", e.Name, e.Line, e.Description) - } else if e.Name != "" { + case e.Name != "": return fmt.Sprintf("html/template:%s: %s", e.Name, e.Description) } return "html/template: " + e.Description @@ -192,6 +200,6 @@ func (e *Error) Error() string { // errorf creates an error given a format string f and args. // The template Name still needs to be supplied. -func errorf(k ErrorCode, line int, f string, args ...interface{}) *Error { - return &Error{k, "", line, fmt.Sprintf(f, args...)} +func errorf(k ErrorCode, node parse.Node, line int, f string, args ...interface{}) *Error { + return &Error{k, node, "", line, fmt.Sprintf(f, args...)} } diff --git a/libgo/go/html/template/escape.go b/libgo/go/html/template/escape.go index 4e379828d4c..ee01fb12ab8 100644 --- a/libgo/go/html/template/escape.go +++ b/libgo/go/html/template/escape.go @@ -13,40 +13,33 @@ import ( "text/template/parse" ) -// escapeTemplates rewrites the named templates, which must be +// escapeTemplate rewrites the named template, which must be // associated with t, to guarantee that the output of any of the named -// templates is properly escaped. Names should include the names of -// all templates that might be Executed but need not include helper -// templates. If no error is returned, then the named templates have +// templates is properly escaped. If no error is returned, then the named templates have // been modified. Otherwise the named templates have been rendered // unusable. -func escapeTemplates(tmpl *Template, names ...string) error { +func escapeTemplate(tmpl *Template, node parse.Node, name string) error { e := newEscaper(tmpl) - for _, name := range names { - c, _ := e.escapeTree(context{}, name, 0) - var err error - if c.err != nil { - err, c.err.Name = c.err, name - } else if c.state != stateText { - err = &Error{ErrEndContext, name, 0, fmt.Sprintf("ends in a non-text context: %v", c)} - } - if err != nil { - // Prevent execution of unsafe templates. - for _, name := range names { - if t := tmpl.set[name]; t != nil { - t.text.Tree = nil - t.Tree = nil - } - } - return err + c, _ := e.escapeTree(context{}, node, name, 0) + var err error + if c.err != nil { + err, c.err.Name = c.err, name + } else if c.state != stateText { + err = &Error{ErrEndContext, nil, name, 0, fmt.Sprintf("ends in a non-text context: %v", c)} + } + if err != nil { + // Prevent execution of unsafe templates. + if t := tmpl.set[name]; t != nil { + t.escapeErr = err + t.text.Tree = nil + t.Tree = nil } + return err } e.commit() - for _, name := range names { - if t := tmpl.set[name]; t != nil { - t.escaped = true - t.Tree = t.text.Tree - } + if t := tmpl.set[name]; t != nil { + t.escapeErr = escapeOK + t.Tree = t.text.Tree } return nil } @@ -168,7 +161,7 @@ func (e *escaper) escapeAction(c context, n *parse.ActionNode) context { case urlPartUnknown: return context{ state: stateError, - err: errorf(ErrAmbigContext, n.Line, "%s appears in an ambiguous URL context", n), + err: errorf(ErrAmbigContext, n, n.Line, "%s appears in an ambiguous URL context", n), } default: panic(c.urlPart.String()) @@ -338,7 +331,7 @@ func escFnsEq(a, b string) bool { func newIdentCmd(identifier string, pos parse.Pos) *parse.CommandNode { return &parse.CommandNode{ NodeType: parse.NodeCommand, - Args: []parse.Node{parse.NewIdentifier(identifier).SetPos(pos)}, + Args: []parse.Node{parse.NewIdentifier(identifier).SetTree(nil).SetPos(pos)}, // TODO: SetTree. } } @@ -372,7 +365,7 @@ func nudge(c context) context { // join joins the two contexts of a branch template node. The result is an // error context if either of the input contexts are error contexts, or if the // the input contexts differ. -func join(a, b context, line int, nodeName string) context { +func join(a, b context, node parse.Node, nodeName string) context { if a.state == stateError { return a } @@ -405,14 +398,14 @@ func join(a, b context, line int, nodeName string) context { // ends in an unquoted value state even though the else branch // ends in stateBeforeValue. if c, d := nudge(a), nudge(b); !(c.eq(a) && d.eq(b)) { - if e := join(c, d, line, nodeName); e.state != stateError { + if e := join(c, d, node, nodeName); e.state != stateError { return e } } return context{ state: stateError, - err: errorf(ErrBranchEnd, line, "{{%s}} branches end in different contexts: %v, %v", nodeName, a, b), + err: errorf(ErrBranchEnd, node, 0, "{{%s}} branches end in different contexts: %v, %v", nodeName, a, b), } } @@ -424,7 +417,7 @@ func (e *escaper) escapeBranch(c context, n *parse.BranchNode, nodeName string) // We check that executing n.List once results in the same context // as executing n.List twice. c1, _ := e.escapeListConditionally(c0, n.List, nil) - c0 = join(c0, c1, n.Line, nodeName) + c0 = join(c0, c1, n, nodeName) if c0.state == stateError { // Make clear that this is a problem on loop re-entry // since developers tend to overlook that branch when @@ -435,7 +428,7 @@ func (e *escaper) escapeBranch(c context, n *parse.BranchNode, nodeName string) } } c1 := e.escapeList(c, n.ElseList) - return join(c0, c1, n.Line, nodeName) + return join(c0, c1, n, nodeName) } // escapeList escapes a list template node. @@ -487,7 +480,7 @@ func (e *escaper) escapeListConditionally(c context, n *parse.ListNode, filter f // escapeTemplate escapes a {{template}} call node. func (e *escaper) escapeTemplate(c context, n *parse.TemplateNode) context { - c, name := e.escapeTree(c, n.Name, n.Line) + c, name := e.escapeTree(c, n, n.Name, n.Line) if name != n.Name { e.editTemplateNode(n, name) } @@ -496,7 +489,7 @@ func (e *escaper) escapeTemplate(c context, n *parse.TemplateNode) context { // escapeTree escapes the named template starting in the given context as // necessary and returns its output context. -func (e *escaper) escapeTree(c context, name string, line int) (context, string) { +func (e *escaper) escapeTree(c context, node parse.Node, name string, line int) (context, string) { // Mangle the template name with the input context to produce a reliable // identifier. dname := c.mangle(name) @@ -512,12 +505,12 @@ func (e *escaper) escapeTree(c context, name string, line int) (context, string) if e.tmpl.set[name] != nil { return context{ state: stateError, - err: errorf(ErrNoSuchTemplate, line, "%q is an incomplete or empty template", name), + err: errorf(ErrNoSuchTemplate, node, line, "%q is an incomplete or empty template", name), }, dname } return context{ state: stateError, - err: errorf(ErrNoSuchTemplate, line, "no such template %q", name), + err: errorf(ErrNoSuchTemplate, node, line, "no such template %q", name), }, dname } if dname != name { @@ -549,8 +542,7 @@ func (e *escaper) computeOutCtx(c context, t *template.Template) context { if !ok && c1.state != stateError { return context{ state: stateError, - // TODO: Find the first node with a line in t.text.Tree.Root - err: errorf(ErrOutputContext, 0, "cannot compute output context for template %s", t.Name()), + err: errorf(ErrOutputContext, t.Tree.Root, 0, "cannot compute output context for template %s", t.Name()), } } return c1 @@ -694,7 +686,7 @@ func contextAfterText(c context, s []byte) (context, int) { if j := bytes.IndexAny(s[:i], "\"'<=`"); j >= 0 { return context{ state: stateError, - err: errorf(ErrBadHTML, 0, "%q in unquoted attr: %q", s[j:j+1], s[:i]), + err: errorf(ErrBadHTML, nil, 0, "%q in unquoted attr: %q", s[j:j+1], s[:i]), }, len(s) } } diff --git a/libgo/go/html/template/escape_test.go b/libgo/go/html/template/escape_test.go index 3ccf93ece01..ef7b8774845 100644 --- a/libgo/go/html/template/escape_test.go +++ b/libgo/go/html/template/escape_test.go @@ -861,29 +861,29 @@ func TestErrors(t *testing.T) { // Error cases. { "{{if .Cond}}<a{{end}}", - "z:1: {{if}} branches", + "z:1:5: {{if}} branches", }, { "{{if .Cond}}\n{{else}}\n<a{{end}}", - "z:1: {{if}} branches", + "z:1:5: {{if}} branches", }, { // Missing quote in the else branch. `{{if .Cond}}<a href="foo">{{else}}<a href="bar>{{end}}`, - "z:1: {{if}} branches", + "z:1:5: {{if}} branches", }, { // Different kind of attribute: href implies a URL. "<a {{if .Cond}}href='{{else}}title='{{end}}{{.X}}'>", - "z:1: {{if}} branches", + "z:1:8: {{if}} branches", }, { "\n{{with .X}}<a{{end}}", - "z:2: {{with}} branches", + "z:2:7: {{with}} branches", }, { "\n{{with .X}}<a>{{else}}<a{{end}}", - "z:2: {{with}} branches", + "z:2:7: {{with}} branches", }, { "{{range .Items}}<a{{end}}", @@ -891,7 +891,7 @@ func TestErrors(t *testing.T) { }, { "\n{{range .Items}} x='<a{{end}}", - "z:2: on range loop re-entry: {{range}} branches", + "z:2:8: on range loop re-entry: {{range}} branches", }, { "<a b=1 c={{.H}}", @@ -903,7 +903,7 @@ func TestErrors(t *testing.T) { }, { `<a href="{{if .F}}/foo?a={{else}}/bar/{{end}}{{.H}}">`, - "z:1: {{.H}} appears in an ambiguous URL context", + "z:1:47: {{.H}} appears in an ambiguous URL context", }, { `<a onclick="alert('Hello \`, @@ -932,7 +932,7 @@ func TestErrors(t *testing.T) { }, { `{{template "foo"}}`, - "z:1: no such template \"foo\"", + "z:1:11: no such template \"foo\"", }, { `<div{{template "y"}}>` + @@ -994,6 +994,11 @@ func TestErrors(t *testing.T) { t.Errorf("input=%q: error\n\t%q\ndoes not contain expected string\n\t%q", test.input, got, test.err) continue } + // Check that we get the same error if we call Execute again. + if err := tmpl.Execute(buf, nil); err == nil || err.Error() != got { + t.Errorf("input=%q: unexpected error on second call %q", test.input, err) + + } } } diff --git a/libgo/go/html/template/js_test.go b/libgo/go/html/template/js_test.go index 311e1d2c4ea..7af7997de9b 100644 --- a/libgo/go/html/template/js_test.go +++ b/libgo/go/html/template/js_test.go @@ -138,7 +138,7 @@ func TestJSValEscaper(t *testing.T) { // Newlines. {"\r\n\u2028\u2029", `"\r\n\u2028\u2029"`}, // "\v" == "v" on IE 6 so use "\x0b" instead. - {"\t\x0b", `"\u0009\u000b"`}, + {"\t\x0b", `"\t\u000b"`}, {struct{ X, Y int }{1, 2}, `{"X":1,"Y":2}`}, {[]interface{}{}, "[]"}, {[]interface{}{42, "foo", nil}, `[42,"foo",null]`}, diff --git a/libgo/go/html/template/template.go b/libgo/go/html/template/template.go index d389658979a..ce6170105cc 100644 --- a/libgo/go/html/template/template.go +++ b/libgo/go/html/template/template.go @@ -17,7 +17,8 @@ import ( // Template is a specialized Template from "text/template" that produces a safe // HTML document fragment. type Template struct { - escaped bool + // Sticky error if escaping fails. + escapeErr error // We could embed the text/template field, but it's safer not to because // we need to keep our version of the name space and the underlying // template's in sync. @@ -27,6 +28,9 @@ type Template struct { *nameSpace // common to all associated templates } +// escapeOK is a sentinel value used to indicate valid escaping. +var escapeOK = fmt.Errorf("template escaped correctly") + // nameSpace is the data structure shared by all templates in an association. type nameSpace struct { mu sync.Mutex @@ -51,11 +55,12 @@ func (t *Template) Templates() []*Template { func (t *Template) escape() error { t.nameSpace.mu.Lock() defer t.nameSpace.mu.Unlock() - if !t.escaped { - if err := escapeTemplates(t, t.Name()); err != nil { + if t.escapeErr == nil { + if err := escapeTemplate(t, t.text.Root, t.Name()); err != nil { return err } - t.escaped = true + } else if t.escapeErr != escapeOK { + return t.escapeErr } return nil } @@ -97,14 +102,17 @@ func (t *Template) lookupAndEscapeTemplate(name string) (tmpl *Template, err err if tmpl == nil { return nil, fmt.Errorf("html/template: %q is undefined", name) } + if tmpl.escapeErr != nil && tmpl.escapeErr != escapeOK { + return nil, tmpl.escapeErr + } if tmpl.text.Tree == nil || tmpl.text.Root == nil { return nil, fmt.Errorf("html/template: %q is an incomplete template", name) } if t.text.Lookup(name) == nil { panic("html/template internal error: template escaping out of sync") } - if tmpl != nil && !tmpl.escaped { - err = escapeTemplates(tmpl, name) + if tmpl.escapeErr == nil { + err = escapeTemplate(tmpl, tmpl.text.Root, name) } return tmpl, err } @@ -119,7 +127,7 @@ func (t *Template) lookupAndEscapeTemplate(name string) (tmpl *Template, err err // other than space, comments, and template definitions.) func (t *Template) Parse(src string) (*Template, error) { t.nameSpace.mu.Lock() - t.escaped = false + t.escapeErr = nil t.nameSpace.mu.Unlock() ret, err := t.text.Parse(src) if err != nil { @@ -137,7 +145,7 @@ func (t *Template) Parse(src string) (*Template, error) { tmpl = t.new(name) } // Restore our record of this text/template to its unescaped original state. - tmpl.escaped = false + tmpl.escapeErr = nil tmpl.text = v tmpl.Tree = v.Tree } @@ -151,7 +159,7 @@ func (t *Template) Parse(src string) (*Template, error) { func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) { t.nameSpace.mu.Lock() defer t.nameSpace.mu.Unlock() - if t.escaped { + if t.escapeErr != nil { return nil, fmt.Errorf("html/template: cannot AddParseTree to %q after it has executed", t.Name()) } text, err := t.text.AddParseTree(name, tree) @@ -159,7 +167,7 @@ func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error return nil, err } ret := &Template{ - false, + nil, text, text.Tree, t.nameSpace, @@ -179,7 +187,7 @@ func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error func (t *Template) Clone() (*Template, error) { t.nameSpace.mu.Lock() defer t.nameSpace.mu.Unlock() - if t.escaped { + if t.escapeErr != nil { return nil, fmt.Errorf("html/template: cannot Clone %q after it has executed", t.Name()) } textClone, err := t.text.Clone() @@ -187,7 +195,7 @@ func (t *Template) Clone() (*Template, error) { return nil, err } ret := &Template{ - false, + nil, textClone, textClone.Tree, &nameSpace{ @@ -197,12 +205,12 @@ func (t *Template) Clone() (*Template, error) { for _, x := range textClone.Templates() { name := x.Name() src := t.set[name] - if src == nil || src.escaped { + if src == nil || src.escapeErr != nil { return nil, fmt.Errorf("html/template: cannot Clone %q after it has executed", t.Name()) } x.Tree = x.Tree.Copy() ret.set[name] = &Template{ - false, + nil, x, x.Tree, ret.nameSpace, @@ -214,7 +222,7 @@ func (t *Template) Clone() (*Template, error) { // New allocates a new HTML template with the given name. func New(name string) *Template { tmpl := &Template{ - false, + nil, template.New(name), nil, &nameSpace{ @@ -237,7 +245,7 @@ func (t *Template) New(name string) *Template { // new is the implementation of New, without the lock. func (t *Template) new(name string) *Template { tmpl := &Template{ - false, + nil, t.text.New(name), nil, t.nameSpace, diff --git a/libgo/go/html/template/transition.go b/libgo/go/html/template/transition.go index 7f30a7ab8de..b486fcd2854 100644 --- a/libgo/go/html/template/transition.go +++ b/libgo/go/html/template/transition.go @@ -102,7 +102,7 @@ func tTag(c context, s []byte) (context, int) { if i == j { return context{ state: stateError, - err: errorf(ErrBadHTML, 0, "expected space, attr name, or end of tag, but got %q", s[i:]), + err: errorf(ErrBadHTML, nil, 0, "expected space, attr name, or end of tag, but got %q", s[i:]), }, len(s) } switch attrType(string(s[i:j])) { @@ -245,7 +245,7 @@ func tJS(c context, s []byte) (context, int) { default: return context{ state: stateError, - err: errorf(ErrSlashAmbig, 0, "'/' could start a division or regexp: %.32q", s[i:]), + err: errorf(ErrSlashAmbig, nil, 0, "'/' could start a division or regexp: %.32q", s[i:]), }, len(s) } default: @@ -277,7 +277,7 @@ func tJSDelimited(c context, s []byte) (context, int) { if i == len(s) { return context{ state: stateError, - err: errorf(ErrPartialEscape, 0, "unfinished escape sequence in JS string: %q", s), + err: errorf(ErrPartialEscape, nil, 0, "unfinished escape sequence in JS string: %q", s), }, len(s) } case '[': @@ -299,7 +299,7 @@ func tJSDelimited(c context, s []byte) (context, int) { // into charsets is desired. return context{ state: stateError, - err: errorf(ErrPartialCharset, 0, "unfinished JS regexp charset: %q", s), + err: errorf(ErrPartialCharset, nil, 0, "unfinished JS regexp charset: %q", s), }, len(s) } @@ -459,7 +459,7 @@ func tCSSStr(c context, s []byte) (context, int) { if i == len(s) { return context{ state: stateError, - err: errorf(ErrPartialEscape, 0, "unfinished escape sequence in CSS string: %q", s), + err: errorf(ErrPartialEscape, nil, 0, "unfinished escape sequence in CSS string: %q", s), }, len(s) } } else { @@ -489,7 +489,7 @@ func eatAttrName(s []byte, i int) (int, *Error) { // These result in a parse warning in HTML5 and are // indicative of serious problems if seen in an attr // name in a template. - return -1, errorf(ErrBadHTML, 0, "%q in attribute name: %.32q", s[j:j+1], s) + return -1, errorf(ErrBadHTML, nil, 0, "%q in attribute name: %.32q", s[j:j+1], s) default: // No-op. } diff --git a/libgo/go/image/color/palette/gen.go b/libgo/go/image/color/palette/gen.go index 4f4d88345a8..2b5fdaaf2b3 100644 --- a/libgo/go/image/color/palette/gen.go +++ b/libgo/go/image/color/palette/gen.go @@ -7,29 +7,49 @@ package main // This program generates palette.go. Invoke it as -// go run gen.go | gofmt > palette.go +// go run gen.go -output palette.go import ( + "bytes" + "flag" "fmt" + "go/format" + "io" + "io/ioutil" + "log" ) +var filename = flag.String("output", "palette.go", "output file name") + func main() { - fmt.Println(`// Copyright 2013 The Go Authors. All rights reserved. + flag.Parse() + + var buf bytes.Buffer + + fmt.Fprintln(&buf, `// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file.`) - fmt.Println() - fmt.Println("// generated by go run gen.go; DO NOT EDIT") - fmt.Println() - fmt.Println("// Package palette provides standard color palettes.") - fmt.Println("package palette") - fmt.Println() - fmt.Println(`import "image/color"`) - fmt.Println() - printPlan9() - printWebSafe() + fmt.Fprintln(&buf) + fmt.Fprintln(&buf, "// generated by go run gen.go -output palette.go; DO NOT EDIT") + fmt.Fprintln(&buf) + fmt.Fprintln(&buf, "package palette") + fmt.Fprintln(&buf) + fmt.Fprintln(&buf, `import "image/color"`) + fmt.Fprintln(&buf) + printPlan9(&buf) + printWebSafe(&buf) + + data, err := format.Source(buf.Bytes()) + if err != nil { + log.Fatal(err) + } + err = ioutil.WriteFile(*filename, data, 0644) + if err != nil { + log.Fatal(err) + } } -func printPlan9() { +func printPlan9(w io.Writer) { c, lines := [3]int{}, [256]string{} for r, i := 0, 0; r != 4; r++ { for v := 0; v != 4; v, i = v+1, i+16 { @@ -58,27 +78,27 @@ func printPlan9() { } } } - fmt.Println("// Plan9 is a 256-color palette that partitions the 24-bit RGB space") - fmt.Println("// into 4×4×4 subdivision, with 4 shades in each subcube. Compared to the") - fmt.Println("// WebSafe, the idea is to reduce the color resolution by dicing the") - fmt.Println("// color cube into fewer cells, and to use the extra space to increase the") - fmt.Println("// intensity resolution. This results in 16 gray shades (4 gray subcubes with") - fmt.Println("// 4 samples in each), 13 shades of each primary and secondary color (3") - fmt.Println("// subcubes with 4 samples plus black) and a reasonable selection of colors") - fmt.Println("// covering the rest of the color cube. The advantage is better representation") - fmt.Println("// of continuous tones.") - fmt.Println("//") - fmt.Println("// This palette was used in the Plan 9 Operating System, described at") - fmt.Println("// http://plan9.bell-labs.com/magic/man2html/6/color") - fmt.Println("var Plan9 = []color.Color{") + fmt.Fprintln(w, "// Plan9 is a 256-color palette that partitions the 24-bit RGB space") + fmt.Fprintln(w, "// into 4×4×4 subdivision, with 4 shades in each subcube. Compared to the") + fmt.Fprintln(w, "// WebSafe, the idea is to reduce the color resolution by dicing the") + fmt.Fprintln(w, "// color cube into fewer cells, and to use the extra space to increase the") + fmt.Fprintln(w, "// intensity resolution. This results in 16 gray shades (4 gray subcubes with") + fmt.Fprintln(w, "// 4 samples in each), 13 shades of each primary and secondary color (3") + fmt.Fprintln(w, "// subcubes with 4 samples plus black) and a reasonable selection of colors") + fmt.Fprintln(w, "// covering the rest of the color cube. The advantage is better representation") + fmt.Fprintln(w, "// of continuous tones.") + fmt.Fprintln(w, "//") + fmt.Fprintln(w, "// This palette was used in the Plan 9 Operating System, described at") + fmt.Fprintln(w, "// http://plan9.bell-labs.com/magic/man2html/6/color") + fmt.Fprintln(w, "var Plan9 = []color.Color{") for _, line := range lines { - fmt.Println(line) + fmt.Fprintln(w, line) } - fmt.Println("}") - fmt.Println() + fmt.Fprintln(w, "}") + fmt.Fprintln(w) } -func printWebSafe() { +func printWebSafe(w io.Writer) { lines := [6 * 6 * 6]string{} for r := 0; r < 6; r++ { for g := 0; g < 6; g++ { @@ -88,14 +108,14 @@ func printWebSafe() { } } } - fmt.Println("// WebSafe is a 216-color palette that was popularized by early versions") - fmt.Println("// of Netscape Navigator. It is also known as the Netscape Color Cube.") - fmt.Println("//") - fmt.Println("// See http://en.wikipedia.org/wiki/Web_colors#Web-safe_colors for details.") - fmt.Println("var WebSafe = []color.Color{") + fmt.Fprintln(w, "// WebSafe is a 216-color palette that was popularized by early versions") + fmt.Fprintln(w, "// of Netscape Navigator. It is also known as the Netscape Color Cube.") + fmt.Fprintln(w, "//") + fmt.Fprintln(w, "// See http://en.wikipedia.org/wiki/Web_colors#Web-safe_colors for details.") + fmt.Fprintln(w, "var WebSafe = []color.Color{") for _, line := range lines { - fmt.Println(line) + fmt.Fprintln(w, line) } - fmt.Println("}") - fmt.Println() + fmt.Fprintln(w, "}") + fmt.Fprintln(w) } diff --git a/libgo/go/image/color/palette/generate.go b/libgo/go/image/color/palette/generate.go new file mode 100644 index 00000000000..64c2ec0d9ab --- /dev/null +++ b/libgo/go/image/color/palette/generate.go @@ -0,0 +1,8 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go -output palette.go + +// Package palette provides standard color palettes. +package palette diff --git a/libgo/go/image/color/palette/palette.go b/libgo/go/image/color/palette/palette.go index f761e5368d9..0bf2c8e1aa5 100644 --- a/libgo/go/image/color/palette/palette.go +++ b/libgo/go/image/color/palette/palette.go @@ -2,9 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// generated by go run gen.go; DO NOT EDIT +// generated by go run gen.go -output palette.go; DO NOT EDIT -// Package palette provides standard color palettes. package palette import "image/color" diff --git a/libgo/go/image/gif/reader.go b/libgo/go/image/gif/reader.go index 926710a4562..5a863e204f3 100644 --- a/libgo/go/image/gif/reader.go +++ b/libgo/go/image/gif/reader.go @@ -171,7 +171,8 @@ func (d *decoder) decode(r io.Reader, configOnly bool) error { if err != nil { return err } - if d.imageFields&fColorMapFollows != 0 { + useLocalColorMap := d.imageFields&fColorMapFollows != 0 + if useLocalColorMap { m.Palette, err = d.readColorMap() if err != nil { return err @@ -180,6 +181,10 @@ func (d *decoder) decode(r io.Reader, configOnly bool) error { m.Palette = d.globalColorMap } if d.hasTransparentIndex && int(d.transparentIndex) < len(m.Palette) { + if !useLocalColorMap { + // Clone the global color map. + m.Palette = append(color.Palette(nil), d.globalColorMap...) + } m.Palette[d.transparentIndex] = color.RGBA{} } litWidth, err := d.r.ReadByte() diff --git a/libgo/go/image/gif/reader_test.go b/libgo/go/image/gif/reader_test.go index fc2041e9970..7b6f504367c 100644 --- a/libgo/go/image/gif/reader_test.go +++ b/libgo/go/image/gif/reader_test.go @@ -22,16 +22,16 @@ const ( trailerStr = "\x3b" ) -func TestDecode(t *testing.T) { - // lzwEncode returns an LZW encoding (with 2-bit literals) of n zeroes. - lzwEncode := func(n int) []byte { - b := &bytes.Buffer{} - w := lzw.NewWriter(b, lzw.LSB, 2) - w.Write(make([]byte, n)) - w.Close() - return b.Bytes() - } +// lzwEncode returns an LZW encoding (with 2-bit literals) of n zeroes. +func lzwEncode(n int) []byte { + b := &bytes.Buffer{} + w := lzw.NewWriter(b, lzw.LSB, 2) + w.Write(make([]byte, n)) + w.Close() + return b.Bytes() +} +func TestDecode(t *testing.T) { testCases := []struct { nPix int // The number of pixels in the image data. extra bool // Whether to write an extra block after the LZW-encoded data. @@ -90,6 +90,52 @@ func TestDecode(t *testing.T) { } } +func TestTransparentIndex(t *testing.T) { + b := &bytes.Buffer{} + b.WriteString(headerStr) + b.WriteString(paletteStr) + for transparentIndex := 0; transparentIndex < 3; transparentIndex++ { + if transparentIndex < 2 { + // Write the graphic control for the transparent index. + b.WriteString("\x21\xf9\x00\x01\x00\x00") + b.WriteByte(byte(transparentIndex)) + b.WriteByte(0) + } + // Write an image with bounds 2x1, as per TestDecode. + b.WriteString("\x2c\x00\x00\x00\x00\x02\x00\x01\x00\x00\x02") + enc := lzwEncode(2) + if len(enc) > 0xff { + t.Fatalf("compressed length %d is too large", len(enc)) + } + b.WriteByte(byte(len(enc))) + b.Write(enc) + b.WriteByte(0x00) + } + b.WriteString(trailerStr) + + g, err := DecodeAll(b) + if err != nil { + t.Fatalf("DecodeAll: %v", err) + } + c0 := color.RGBA{paletteStr[0], paletteStr[1], paletteStr[2], 0xff} + c1 := color.RGBA{paletteStr[3], paletteStr[4], paletteStr[5], 0xff} + cz := color.RGBA{} + wants := []color.Palette{ + {cz, c1}, + {c0, cz}, + {c0, c1}, + } + if len(g.Image) != len(wants) { + t.Fatalf("got %d images, want %d", len(g.Image), len(wants)) + } + for i, want := range wants { + got := g.Image[i].Palette + if !reflect.DeepEqual(got, want) { + t.Errorf("palette #%d:\ngot %v\nwant %v", i, got, want) + } + } +} + // testGIF is a simple GIF that we can modify to test different scenarios. var testGIF = []byte{ 'G', 'I', 'F', '8', '9', 'a', diff --git a/libgo/go/image/gif/writer.go b/libgo/go/image/gif/writer.go index 15cd40fadf6..49abde704c8 100644 --- a/libgo/go/image/gif/writer.go +++ b/libgo/go/image/gif/writer.go @@ -233,10 +233,20 @@ func (e *encoder) writeImageBlock(pm *image.Paletted, delay int) { e.writeByte(uint8(litWidth)) // LZW Minimum Code Size. lzww := lzw.NewWriter(blockWriter{e: e}, lzw.LSB, litWidth) - _, e.err = lzww.Write(pm.Pix) - if e.err != nil { - lzww.Close() - return + if dx := b.Dx(); dx == pm.Stride { + _, e.err = lzww.Write(pm.Pix) + if e.err != nil { + lzww.Close() + return + } + } else { + for i, y := 0, b.Min.Y; y < b.Max.Y; i, y = i+pm.Stride, y+1 { + _, e.err = lzww.Write(pm.Pix[i : i+dx]) + if e.err != nil { + lzww.Close() + return + } + } } lzww.Close() e.writeByte(0x00) // Block Terminator. diff --git a/libgo/go/image/gif/writer_test.go b/libgo/go/image/gif/writer_test.go index c1ada769c2c..93306ffdb34 100644 --- a/libgo/go/image/gif/writer_test.go +++ b/libgo/go/image/gif/writer_test.go @@ -102,6 +102,29 @@ func TestWriter(t *testing.T) { } } +func TestSubImage(t *testing.T) { + m0, err := readImg("../testdata/video-001.gif") + if err != nil { + t.Fatalf("readImg: %v", err) + } + m0 = m0.(*image.Paletted).SubImage(image.Rect(0, 0, 50, 30)) + var buf bytes.Buffer + err = Encode(&buf, m0, nil) + if err != nil { + t.Fatalf("Encode: %v", err) + } + m1, err := Decode(&buf) + if err != nil { + t.Fatalf("Decode: %v", err) + } + if m0.Bounds() != m1.Bounds() { + t.Fatalf("bounds differ: %v and %v", m0.Bounds(), m1.Bounds()) + } + if averageDelta(m0, m1) != 0 { + t.Fatalf("images differ") + } +} + var frames = []string{ "../testdata/video-001.gif", "../testdata/video-005.gray.gif", @@ -116,7 +139,7 @@ func TestEncodeAll(t *testing.T) { for i, f := range frames { m, err := readGIF(f) if err != nil { - t.Error(f, err) + t.Fatal(f, err) } g0.Image[i] = m.Image[0] } diff --git a/libgo/go/image/image.go b/libgo/go/image/image.go index 32a89ef34ca..6b8e5c4877e 100644 --- a/libgo/go/image/image.go +++ b/libgo/go/image/image.go @@ -72,6 +72,10 @@ func (p *RGBA) ColorModel() color.Model { return color.RGBAModel } func (p *RGBA) Bounds() Rectangle { return p.Rect } func (p *RGBA) At(x, y int) color.Color { + return p.RGBAAt(x, y) +} + +func (p *RGBA) RGBAAt(x, y int) color.RGBA { if !(Point{x, y}.In(p.Rect)) { return color.RGBA{} } @@ -167,6 +171,10 @@ func (p *RGBA64) ColorModel() color.Model { return color.RGBA64Model } func (p *RGBA64) Bounds() Rectangle { return p.Rect } func (p *RGBA64) At(x, y int) color.Color { + return p.RGBA64At(x, y) +} + +func (p *RGBA64) RGBA64At(x, y int) color.RGBA64 { if !(Point{x, y}.In(p.Rect)) { return color.RGBA64{} } @@ -275,6 +283,10 @@ func (p *NRGBA) ColorModel() color.Model { return color.NRGBAModel } func (p *NRGBA) Bounds() Rectangle { return p.Rect } func (p *NRGBA) At(x, y int) color.Color { + return p.NRGBAAt(x, y) +} + +func (p *NRGBA) NRGBAAt(x, y int) color.NRGBA { if !(Point{x, y}.In(p.Rect)) { return color.NRGBA{} } @@ -370,6 +382,10 @@ func (p *NRGBA64) ColorModel() color.Model { return color.NRGBA64Model } func (p *NRGBA64) Bounds() Rectangle { return p.Rect } func (p *NRGBA64) At(x, y int) color.Color { + return p.NRGBA64At(x, y) +} + +func (p *NRGBA64) NRGBA64At(x, y int) color.NRGBA64 { if !(Point{x, y}.In(p.Rect)) { return color.NRGBA64{} } @@ -478,6 +494,10 @@ func (p *Alpha) ColorModel() color.Model { return color.AlphaModel } func (p *Alpha) Bounds() Rectangle { return p.Rect } func (p *Alpha) At(x, y int) color.Color { + return p.AlphaAt(x, y) +} + +func (p *Alpha) AlphaAt(x, y int) color.Alpha { if !(Point{x, y}.In(p.Rect)) { return color.Alpha{} } @@ -566,6 +586,10 @@ func (p *Alpha16) ColorModel() color.Model { return color.Alpha16Model } func (p *Alpha16) Bounds() Rectangle { return p.Rect } func (p *Alpha16) At(x, y int) color.Color { + return p.Alpha16At(x, y) +} + +func (p *Alpha16) Alpha16At(x, y int) color.Alpha16 { if !(Point{x, y}.In(p.Rect)) { return color.Alpha16{} } @@ -657,6 +681,10 @@ func (p *Gray) ColorModel() color.Model { return color.GrayModel } func (p *Gray) Bounds() Rectangle { return p.Rect } func (p *Gray) At(x, y int) color.Color { + return p.GrayAt(x, y) +} + +func (p *Gray) GrayAt(x, y int) color.Gray { if !(Point{x, y}.In(p.Rect)) { return color.Gray{} } @@ -732,6 +760,10 @@ func (p *Gray16) ColorModel() color.Model { return color.Gray16Model } func (p *Gray16) Bounds() Rectangle { return p.Rect } func (p *Gray16) At(x, y int) color.Color { + return p.Gray16At(x, y) +} + +func (p *Gray16) Gray16At(x, y int) color.Gray16 { if !(Point{x, y}.In(p.Rect)) { return color.Gray16{} } diff --git a/libgo/go/image/jpeg/huffman.go b/libgo/go/image/jpeg/huffman.go index f53d873a538..d4ff4cfa0ce 100644 --- a/libgo/go/image/jpeg/huffman.go +++ b/libgo/go/image/jpeg/huffman.go @@ -4,94 +4,96 @@ package jpeg -import "io" +import ( + "io" +) -// Each code is at most 16 bits long. +// maxCodeLength is the maximum (inclusive) number of bits in a Huffman code. const maxCodeLength = 16 -// Each decoded value is a uint8, so there are at most 256 such values. -const maxNumValues = 256 +// maxNCodes is the maximum (inclusive) number of codes in a Huffman tree. +const maxNCodes = 256 -// Bit stream for the Huffman decoder. -// The n least significant bits of a form the unread bits, to be read in MSB to LSB order. -type bits struct { - a uint32 // accumulator. - m uint32 // mask. m==1<<(n-1) when n>0, with m==0 when n==0. - n int // the number of unread bits in a. -} +// lutSize is the log-2 size of the Huffman decoder's look-up table. +const lutSize = 8 -// Huffman table decoder, specified in section C. +// huffman is a Huffman decoder, specified in section C. type huffman struct { - l [maxCodeLength]int - length int // sum of l[i]. - val [maxNumValues]uint8 // the decoded values, as sorted by their encoding. - size [maxNumValues]int // size[i] is the number of bits to encode val[i]. - code [maxNumValues]int // code[i] is the encoding of val[i]. - minCode [maxCodeLength]int // min codes of length i, or -1 if no codes of that length. - maxCode [maxCodeLength]int // max codes of length i, or -1 if no codes of that length. - valIndex [maxCodeLength]int // index into val of minCode[i]. + // length is the number of codes in the tree. + nCodes int32 + // lut is the look-up table for the next lutSize bits in the bit-stream. + // The high 8 bits of the uint16 are the encoded value. The low 8 bits + // are 1 plus the code length, or 0 if the value is too large to fit in + // lutSize bits. + lut [1 << lutSize]uint16 + // vals are the decoded values, sorted by their encoding. + vals [maxNCodes]uint8 + // minCodes[i] is the minimum code of length i, or -1 if there are no + // codes of that length. + minCodes [maxCodeLength]int32 + // maxCodes[i] is the maximum code of length i, or -1 if there are no + // codes of that length. + maxCodes [maxCodeLength]int32 + // valsIndices[i] is the index into vals of minCodes[i]. + valsIndices [maxCodeLength]int32 } -// Reads bytes from the io.Reader to ensure that bits.n is at least n. -func (d *decoder) ensureNBits(n int) error { - for d.b.n < n { - c, err := d.r.ReadByte() +// errShortHuffmanData means that an unexpected EOF occurred while decoding +// Huffman data. +var errShortHuffmanData = FormatError("short Huffman data") + +// ensureNBits reads bytes from the byte buffer to ensure that d.bits.n is at +// least n. For best performance (avoiding function calls inside hot loops), +// the caller is the one responsible for first checking that d.bits.n < n. +func (d *decoder) ensureNBits(n int32) error { + for { + c, err := d.readByteStuffedByte() if err != nil { if err == io.EOF { - return FormatError("short Huffman data") + return errShortHuffmanData } return err } - d.b.a = d.b.a<<8 | uint32(c) - d.b.n += 8 - if d.b.m == 0 { - d.b.m = 1 << 7 + d.bits.a = d.bits.a<<8 | uint32(c) + d.bits.n += 8 + if d.bits.m == 0 { + d.bits.m = 1 << 7 } else { - d.b.m <<= 8 - } - // Byte stuffing, specified in section F.1.2.3. - if c == 0xff { - c, err = d.r.ReadByte() - if err != nil { - if err == io.EOF { - return FormatError("short Huffman data") - } - return err - } - if c != 0x00 { - return FormatError("missing 0xff00 sequence") - } + d.bits.m <<= 8 + } + if d.bits.n >= n { + break } } return nil } -// The composition of RECEIVE and EXTEND, specified in section F.2.2.1. +// receiveExtend is the composition of RECEIVE and EXTEND, specified in section +// F.2.2.1. func (d *decoder) receiveExtend(t uint8) (int32, error) { - if d.b.n < int(t) { - if err := d.ensureNBits(int(t)); err != nil { + if d.bits.n < int32(t) { + if err := d.ensureNBits(int32(t)); err != nil { return 0, err } } - d.b.n -= int(t) - d.b.m >>= t + d.bits.n -= int32(t) + d.bits.m >>= t s := int32(1) << t - x := int32(d.b.a>>uint8(d.b.n)) & (s - 1) + x := int32(d.bits.a>>uint8(d.bits.n)) & (s - 1) if x < s>>1 { x += ((-1) << t) + 1 } return x, nil } -// Processes a Define Huffman Table marker, and initializes a huffman struct from its contents. -// Specified in section B.2.4.2. +// processDHT processes a Define Huffman Table marker, and initializes a huffman +// struct from its contents. Specified in section B.2.4.2. func (d *decoder) processDHT(n int) error { for n > 0 { if n < 17 { return FormatError("DHT has wrong length") } - _, err := io.ReadFull(d.r, d.tmp[0:17]) - if err != nil { + if err := d.readFull(d.tmp[:17]); err != nil { return err } tc := d.tmp[0] >> 4 @@ -104,89 +106,112 @@ func (d *decoder) processDHT(n int) error { } h := &d.huff[tc][th] - // Read l and val (and derive length). - h.length = 0 - for i := 0; i < maxCodeLength; i++ { - h.l[i] = int(d.tmp[i+1]) - h.length += h.l[i] + // Read nCodes and h.vals (and derive h.nCodes). + // nCodes[i] is the number of codes with code length i. + // h.nCodes is the total number of codes. + h.nCodes = 0 + var nCodes [maxCodeLength]int32 + for i := range nCodes { + nCodes[i] = int32(d.tmp[i+1]) + h.nCodes += nCodes[i] } - if h.length == 0 { + if h.nCodes == 0 { return FormatError("Huffman table has zero length") } - if h.length > maxNumValues { + if h.nCodes > maxNCodes { return FormatError("Huffman table has excessive length") } - n -= h.length + 17 + n -= int(h.nCodes) + 17 if n < 0 { return FormatError("DHT has wrong length") } - _, err = io.ReadFull(d.r, h.val[0:h.length]) - if err != nil { + if err := d.readFull(h.vals[:h.nCodes]); err != nil { return err } - // Derive size. - k := 0 - for i := 0; i < maxCodeLength; i++ { - for j := 0; j < h.l[i]; j++ { - h.size[k] = i + 1 - k++ + // Derive the look-up table. + for i := range h.lut { + h.lut[i] = 0 + } + var x, code uint32 + for i := uint32(0); i < lutSize; i++ { + code <<= 1 + for j := int32(0); j < nCodes[i]; j++ { + // The codeLength is 1+i, so shift code by 8-(1+i) to + // calculate the high bits for every 8-bit sequence + // whose codeLength's high bits matches code. + // The high 8 bits of lutValue are the encoded value. + // The low 8 bits are 1 plus the codeLength. + base := uint8(code << (7 - i)) + lutValue := uint16(h.vals[x])<<8 | uint16(2+i) + for k := uint8(0); k < 1<<(7-i); k++ { + h.lut[base|k] = lutValue + } + code++ + x++ } } - // Derive code. - code := 0 - size := h.size[0] - for i := 0; i < h.length; i++ { - if size != h.size[i] { - code <<= uint8(h.size[i] - size) - size = h.size[i] - } - h.code[i] = code - code++ - } - - // Derive minCode, maxCode, and valIndex. - k = 0 - index := 0 - for i := 0; i < maxCodeLength; i++ { - if h.l[i] == 0 { - h.minCode[i] = -1 - h.maxCode[i] = -1 - h.valIndex[i] = -1 + // Derive minCodes, maxCodes, and valsIndices. + var c, index int32 + for i, n := range nCodes { + if n == 0 { + h.minCodes[i] = -1 + h.maxCodes[i] = -1 + h.valsIndices[i] = -1 } else { - h.minCode[i] = k - h.maxCode[i] = k + h.l[i] - 1 - h.valIndex[i] = index - k += h.l[i] - index += h.l[i] + h.minCodes[i] = c + h.maxCodes[i] = c + n - 1 + h.valsIndices[i] = index + c += n + index += n } - k <<= 1 + c <<= 1 } } return nil } -// Returns the next Huffman-coded value from the bit stream, decoded according to h. -// TODO(nigeltao): This decoding algorithm is simple, but slow. A lookahead table, instead of always -// peeling off only 1 bit at time, ought to be faster. +// decodeHuffman returns the next Huffman-coded value from the bit-stream, +// decoded according to h. func (d *decoder) decodeHuffman(h *huffman) (uint8, error) { - if h.length == 0 { + if h.nCodes == 0 { return 0, FormatError("uninitialized Huffman table") } - for i, code := 0, 0; i < maxCodeLength; i++ { - if d.b.n == 0 { + + if d.bits.n < 8 { + if err := d.ensureNBits(8); err != nil { + if err != errMissingFF00 && err != errShortHuffmanData { + return 0, err + } + // There are no more bytes of data in this segment, but we may still + // be able to read the next symbol out of the previously read bits. + // First, undo the readByte that the ensureNBits call made. + d.unreadByteStuffedByte() + goto slowPath + } + } + if v := h.lut[(d.bits.a>>uint32(d.bits.n-lutSize))&0xff]; v != 0 { + n := (v & 0xff) - 1 + d.bits.n -= int32(n) + d.bits.m >>= n + return uint8(v >> 8), nil + } + +slowPath: + for i, code := 0, int32(0); i < maxCodeLength; i++ { + if d.bits.n == 0 { if err := d.ensureNBits(1); err != nil { return 0, err } } - if d.b.a&d.b.m != 0 { + if d.bits.a&d.bits.m != 0 { code |= 1 } - d.b.n-- - d.b.m >>= 1 - if code <= h.maxCode[i] { - return h.val[h.valIndex[i]+code-h.minCode[i]], nil + d.bits.n-- + d.bits.m >>= 1 + if code <= h.maxCodes[i] { + return h.vals[h.valsIndices[i]+code-h.minCodes[i]], nil } code <<= 1 } @@ -194,26 +219,26 @@ func (d *decoder) decodeHuffman(h *huffman) (uint8, error) { } func (d *decoder) decodeBit() (bool, error) { - if d.b.n == 0 { + if d.bits.n == 0 { if err := d.ensureNBits(1); err != nil { return false, err } } - ret := d.b.a&d.b.m != 0 - d.b.n-- - d.b.m >>= 1 + ret := d.bits.a&d.bits.m != 0 + d.bits.n-- + d.bits.m >>= 1 return ret, nil } -func (d *decoder) decodeBits(n int) (uint32, error) { - if d.b.n < n { +func (d *decoder) decodeBits(n int32) (uint32, error) { + if d.bits.n < n { if err := d.ensureNBits(n); err != nil { return 0, err } } - ret := d.b.a >> uint(d.b.n-n) - ret &= (1 << uint(n)) - 1 - d.b.n -= n - d.b.m >>= uint(n) + ret := d.bits.a >> uint32(d.bits.n-n) + ret &= (1 << uint32(n)) - 1 + d.bits.n -= n + d.bits.m >>= uint32(n) return ret, nil } diff --git a/libgo/go/image/jpeg/reader.go b/libgo/go/image/jpeg/reader.go index 356d56220a7..6d8b1d1d036 100644 --- a/libgo/go/image/jpeg/reader.go +++ b/libgo/go/image/jpeg/reader.go @@ -8,7 +8,6 @@ package jpeg import ( - "bufio" "image" "image/color" "io" @@ -84,15 +83,36 @@ var unzig = [blockSize]int{ 53, 60, 61, 54, 47, 55, 62, 63, } -// If the passed in io.Reader does not also have ReadByte, then Decode will introduce its own buffering. +// Reader is deprecated. type Reader interface { + io.ByteReader io.Reader - ReadByte() (c byte, err error) +} + +// bits holds the unprocessed bits that have been taken from the byte-stream. +// The n least significant bits of a form the unread bits, to be read in MSB to +// LSB order. +type bits struct { + a uint32 // accumulator. + m uint32 // mask. m==1<<(n-1) when n>0, with m==0 when n==0. + n int32 // the number of unread bits in a. } type decoder struct { - r Reader - b bits + r io.Reader + bits bits + // bytes is a byte buffer, similar to a bufio.Reader, except that it + // has to be able to unread more than 1 byte, due to byte stuffing. + // Byte stuffing is specified in section F.1.2.3. + bytes struct { + // buf[i:j] are the buffered bytes read from the underlying + // io.Reader that haven't yet been passed further on. + buf [4096]byte + i, j int + // nUnreadable is the number of bytes to back up i after + // overshooting. It can be 0, 1 or 2. + nUnreadable int + } width, height int img1 *image.Gray img3 *image.YCbCr @@ -104,21 +124,160 @@ type decoder struct { progCoeffs [nColorComponent][]block // Saved state between progressive-mode scans. huff [maxTc + 1][maxTh + 1]huffman quant [maxTq + 1]block // Quantization tables, in zig-zag order. - tmp [1024]byte + tmp [blockSize + 1]byte +} + +// fill fills up the d.bytes.buf buffer from the underlying io.Reader. It +// should only be called when there are no unread bytes in d.bytes. +func (d *decoder) fill() error { + if d.bytes.i != d.bytes.j { + panic("jpeg: fill called when unread bytes exist") + } + // Move the last 2 bytes to the start of the buffer, in case we need + // to call unreadByteStuffedByte. + if d.bytes.j > 2 { + d.bytes.buf[0] = d.bytes.buf[d.bytes.j-2] + d.bytes.buf[1] = d.bytes.buf[d.bytes.j-1] + d.bytes.i, d.bytes.j = 2, 2 + } + // Fill in the rest of the buffer. + n, err := d.r.Read(d.bytes.buf[d.bytes.j:]) + d.bytes.j += n + if n > 0 { + err = nil + } + return err +} + +// unreadByteStuffedByte undoes the most recent readByteStuffedByte call, +// giving a byte of data back from d.bits to d.bytes. The Huffman look-up table +// requires at least 8 bits for look-up, which means that Huffman decoding can +// sometimes overshoot and read one or two too many bytes. Two-byte overshoot +// can happen when expecting to read a 0xff 0x00 byte-stuffed byte. +func (d *decoder) unreadByteStuffedByte() { + if d.bytes.nUnreadable == 0 { + panic("jpeg: unreadByteStuffedByte call cannot be fulfilled") + } + d.bytes.i -= d.bytes.nUnreadable + d.bytes.nUnreadable = 0 + if d.bits.n >= 8 { + d.bits.a >>= 8 + d.bits.n -= 8 + d.bits.m >>= 8 + } +} + +// readByte returns the next byte, whether buffered or not buffered. It does +// not care about byte stuffing. +func (d *decoder) readByte() (x byte, err error) { + for d.bytes.i == d.bytes.j { + if err = d.fill(); err != nil { + return 0, err + } + } + x = d.bytes.buf[d.bytes.i] + d.bytes.i++ + d.bytes.nUnreadable = 0 + return x, nil +} + +// errMissingFF00 means that readByteStuffedByte encountered an 0xff byte (a +// marker byte) that wasn't the expected byte-stuffed sequence 0xff, 0x00. +var errMissingFF00 = FormatError("missing 0xff00 sequence") + +// readByteStuffedByte is like readByte but is for byte-stuffed Huffman data. +func (d *decoder) readByteStuffedByte() (x byte, err error) { + // Take the fast path if d.bytes.buf contains at least two bytes. + if d.bytes.i+2 <= d.bytes.j { + x = d.bytes.buf[d.bytes.i] + d.bytes.i++ + d.bytes.nUnreadable = 1 + if x != 0xff { + return x, err + } + if d.bytes.buf[d.bytes.i] != 0x00 { + return 0, errMissingFF00 + } + d.bytes.i++ + d.bytes.nUnreadable = 2 + return 0xff, nil + } + + x, err = d.readByte() + if err != nil { + return 0, err + } + if x != 0xff { + d.bytes.nUnreadable = 1 + return x, nil + } + + x, err = d.readByte() + if err != nil { + d.bytes.nUnreadable = 1 + return 0, err + } + d.bytes.nUnreadable = 2 + if x != 0x00 { + return 0, errMissingFF00 + } + return 0xff, nil } -// Reads and ignores the next n bytes. +// readFull reads exactly len(p) bytes into p. It does not care about byte +// stuffing. +func (d *decoder) readFull(p []byte) error { + // Unread the overshot bytes, if any. + if d.bytes.nUnreadable != 0 { + if d.bits.n >= 8 { + d.unreadByteStuffedByte() + } + d.bytes.nUnreadable = 0 + } + + for { + n := copy(p, d.bytes.buf[d.bytes.i:d.bytes.j]) + p = p[n:] + d.bytes.i += n + if len(p) == 0 { + break + } + if err := d.fill(); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + } + return nil +} + +// ignore ignores the next n bytes. func (d *decoder) ignore(n int) error { - for n > 0 { - m := len(d.tmp) + // Unread the overshot bytes, if any. + if d.bytes.nUnreadable != 0 { + if d.bits.n >= 8 { + d.unreadByteStuffedByte() + } + d.bytes.nUnreadable = 0 + } + + for { + m := d.bytes.j - d.bytes.i if m > n { m = n } - _, err := io.ReadFull(d.r, d.tmp[0:m]) - if err != nil { + d.bytes.i += m + n -= m + if n == 0 { + break + } + if err := d.fill(); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } return err } - n -= m } return nil } @@ -133,8 +292,7 @@ func (d *decoder) processSOF(n int) error { default: return UnsupportedError("SOF has wrong length") } - _, err := io.ReadFull(d.r, d.tmp[:n]) - if err != nil { + if err := d.readFull(d.tmp[:n]); err != nil { return err } // We only support 8-bit precision. @@ -187,8 +345,7 @@ func (d *decoder) processSOF(n int) error { func (d *decoder) processDQT(n int) error { const qtLength = 1 + blockSize for ; n >= qtLength; n -= qtLength { - _, err := io.ReadFull(d.r, d.tmp[0:qtLength]) - if err != nil { + if err := d.readFull(d.tmp[:qtLength]); err != nil { return err } pq := d.tmp[0] >> 4 @@ -214,8 +371,7 @@ func (d *decoder) processDRI(n int) error { if n != 2 { return FormatError("DRI has wrong length") } - _, err := io.ReadFull(d.r, d.tmp[0:2]) - if err != nil { + if err := d.readFull(d.tmp[:2]); err != nil { return err } d.ri = int(d.tmp[0])<<8 + int(d.tmp[1]) @@ -224,15 +380,10 @@ func (d *decoder) processDRI(n int) error { // decode reads a JPEG image from r and returns it as an image.Image. func (d *decoder) decode(r io.Reader, configOnly bool) (image.Image, error) { - if rr, ok := r.(Reader); ok { - d.r = rr - } else { - d.r = bufio.NewReader(r) - } + d.r = r // Check for the Start Of Image marker. - _, err := io.ReadFull(d.r, d.tmp[0:2]) - if err != nil { + if err := d.readFull(d.tmp[:2]); err != nil { return nil, err } if d.tmp[0] != 0xff || d.tmp[1] != soiMarker { @@ -241,7 +392,7 @@ func (d *decoder) decode(r io.Reader, configOnly bool) (image.Image, error) { // Process the remaining segments until the End Of Image marker. for { - _, err := io.ReadFull(d.r, d.tmp[0:2]) + err := d.readFull(d.tmp[:2]) if err != nil { return nil, err } @@ -267,7 +418,7 @@ func (d *decoder) decode(r io.Reader, configOnly bool) (image.Image, error) { // Note that extraneous 0xff bytes in e.g. SOS data are escaped as // "\xff\x00", and so are detected a little further down below. d.tmp[0] = d.tmp[1] - d.tmp[1], err = d.r.ReadByte() + d.tmp[1], err = d.readByte() if err != nil { return nil, err } @@ -280,7 +431,7 @@ func (d *decoder) decode(r io.Reader, configOnly bool) (image.Image, error) { for marker == 0xff { // Section B.1.1.2 says, "Any marker may optionally be preceded by any // number of fill bytes, which are bytes assigned code X'FF'". - marker, err = d.r.ReadByte() + marker, err = d.readByte() if err != nil { return nil, err } @@ -300,8 +451,7 @@ func (d *decoder) decode(r io.Reader, configOnly bool) (image.Image, error) { // Read the 16-bit length of the segment. The value includes the 2 bytes for the // length itself, so we subtract 2 to get the number of remaining bytes. - _, err = io.ReadFull(d.r, d.tmp[0:2]) - if err != nil { + if err = d.readFull(d.tmp[:2]); err != nil { return nil, err } n := int(d.tmp[0])<<8 + int(d.tmp[1]) - 2 diff --git a/libgo/go/image/jpeg/reader_test.go b/libgo/go/image/jpeg/reader_test.go index 926bb043448..4de2e8ee737 100644 --- a/libgo/go/image/jpeg/reader_test.go +++ b/libgo/go/image/jpeg/reader_test.go @@ -9,6 +9,7 @@ import ( "fmt" "image" "image/color" + "io" "io/ioutil" "math/rand" "os" @@ -86,7 +87,51 @@ func decodeFile(filename string) (image.Image, error) { } defer f.Close() return Decode(f) +} + +type eofReader struct { + data []byte // deliver from Read without EOF + dataEOF []byte // then deliver from Read with EOF on last chunk + lenAtEOF int +} + +func (r *eofReader) Read(b []byte) (n int, err error) { + if len(r.data) > 0 { + n = copy(b, r.data) + r.data = r.data[n:] + } else { + n = copy(b, r.dataEOF) + r.dataEOF = r.dataEOF[n:] + if len(r.dataEOF) == 0 { + err = io.EOF + if r.lenAtEOF == -1 { + r.lenAtEOF = n + } + } + } + return +} +func TestDecodeEOF(t *testing.T) { + // Check that if reader returns final data and EOF at same time, jpeg handles it. + data, err := ioutil.ReadFile("../testdata/video-001.jpeg") + if err != nil { + t.Fatal(err) + } + + n := len(data) + for i := 0; i < n; { + r := &eofReader{data[:n-i], data[n-i:], -1} + _, err := Decode(r) + if err != nil { + t.Errorf("Decode with Read() = %d, EOF: %v", r.lenAtEOF, err) + } + if i == 0 { + i = 1 + } else { + i *= 2 + } + } } // check checks that the two pix data are equal, within the given bounds. diff --git a/libgo/go/image/jpeg/scan.go b/libgo/go/image/jpeg/scan.go index 559235d5127..2bd1d9d531d 100644 --- a/libgo/go/image/jpeg/scan.go +++ b/libgo/go/image/jpeg/scan.go @@ -6,7 +6,6 @@ package jpeg import ( "image" - "io" ) // makeImg allocates and initializes the destination image. @@ -41,8 +40,7 @@ func (d *decoder) processSOS(n int) error { if n < 6 || 4+2*d.nComp < n || n%2 != 0 { return FormatError("SOS has wrong length") } - _, err := io.ReadFull(d.r, d.tmp[:n]) - if err != nil { + if err := d.readFull(d.tmp[:n]); err != nil { return err } nComp := int(d.tmp[0]) @@ -67,7 +65,13 @@ func (d *decoder) processSOS(n int) error { } scan[i].compIndex = uint8(compIndex) scan[i].td = d.tmp[2+2*i] >> 4 + if scan[i].td > maxTh { + return FormatError("bad Td value") + } scan[i].ta = d.tmp[2+2*i] & 0x0f + if scan[i].ta > maxTh { + return FormatError("bad Ta value") + } } // zigStart and zigEnd are the spectral selection bounds. @@ -119,18 +123,17 @@ func (d *decoder) processSOS(n int) error { } } - d.b = bits{} + d.bits = bits{} mcu, expectedRST := 0, uint8(rst0Marker) var ( // b is the decoded coefficients, in natural (not zig-zag) order. b block dc [nColorComponent]int32 - // mx0 and my0 are the location of the current (in terms of 8x8 blocks). + // bx and by are the location of the current (in terms of 8x8 blocks). // For example, with 4:2:0 chroma subsampling, the block whose top left // pixel co-ordinates are (16, 8) is the third block in the first row: - // mx0 is 2 and my0 is 0, even though the pixel is in the second MCU. - // TODO(nigeltao): rename mx0 and my0 to bx and by? - mx0, my0 int + // bx is 2 and by is 0, even though the pixel is in the second MCU. + bx, by int blockCount int ) for my := 0; my < myy; my++ { @@ -165,26 +168,26 @@ func (d *decoder) processSOS(n int) error { // 0 1 2 // 3 4 5 if nComp != 1 { - mx0, my0 = d.comp[compIndex].h*mx, d.comp[compIndex].v*my + bx, by = d.comp[compIndex].h*mx, d.comp[compIndex].v*my if h0 == 1 { - my0 += j + by += j } else { - mx0 += j % 2 - my0 += j / 2 + bx += j % 2 + by += j / 2 } } else { q := mxx * d.comp[compIndex].h - mx0 = blockCount % q - my0 = blockCount / q + bx = blockCount % q + by = blockCount / q blockCount++ - if mx0*8 >= d.width || my0*8 >= d.height { + if bx*8 >= d.width || by*8 >= d.height { continue } } // Load the previous partially decoded coefficients, if applicable. if d.progressive { - b = d.progCoeffs[compIndex][my0*mxx*d.comp[compIndex].h+mx0] + b = d.progCoeffs[compIndex][by*mxx*d.comp[compIndex].h+bx] } else { b = block{} } @@ -217,8 +220,9 @@ func (d *decoder) processSOS(n int) error { d.eobRun-- } else { // Decode the AC coefficients, as specified in section F.2.2.2. + huff := &d.huff[acTable][scan[i].ta] for ; zig <= zigEnd; zig++ { - value, err := d.decodeHuffman(&d.huff[acTable][scan[i].ta]) + value, err := d.decodeHuffman(huff) if err != nil { return err } @@ -238,7 +242,7 @@ func (d *decoder) processSOS(n int) error { if val0 != 0x0f { d.eobRun = uint16(1 << val0) if val0 != 0 { - bits, err := d.decodeBits(int(val0)) + bits, err := d.decodeBits(int32(val0)) if err != nil { return err } @@ -256,7 +260,7 @@ func (d *decoder) processSOS(n int) error { if d.progressive { if zigEnd != blockSize-1 || al != 0 { // We haven't completely decoded this 8x8 block. Save the coefficients. - d.progCoeffs[compIndex][my0*mxx*d.comp[compIndex].h+mx0] = b + d.progCoeffs[compIndex][by*mxx*d.comp[compIndex].h+bx] = b // At this point, we could execute the rest of the loop body to dequantize and // perform the inverse DCT, to save early stages of a progressive image to the // *image.YCbCr buffers (the whole point of progressive encoding), but in Go, @@ -273,15 +277,15 @@ func (d *decoder) processSOS(n int) error { idct(&b) dst, stride := []byte(nil), 0 if d.nComp == nGrayComponent { - dst, stride = d.img1.Pix[8*(my0*d.img1.Stride+mx0):], d.img1.Stride + dst, stride = d.img1.Pix[8*(by*d.img1.Stride+bx):], d.img1.Stride } else { switch compIndex { case 0: - dst, stride = d.img3.Y[8*(my0*d.img3.YStride+mx0):], d.img3.YStride + dst, stride = d.img3.Y[8*(by*d.img3.YStride+bx):], d.img3.YStride case 1: - dst, stride = d.img3.Cb[8*(my0*d.img3.CStride+mx0):], d.img3.CStride + dst, stride = d.img3.Cb[8*(by*d.img3.CStride+bx):], d.img3.CStride case 2: - dst, stride = d.img3.Cr[8*(my0*d.img3.CStride+mx0):], d.img3.CStride + dst, stride = d.img3.Cr[8*(by*d.img3.CStride+bx):], d.img3.CStride default: return UnsupportedError("too many components") } @@ -308,8 +312,7 @@ func (d *decoder) processSOS(n int) error { if d.ri > 0 && mcu%d.ri == 0 && mcu < mxx*myy { // A more sophisticated decoder could use RST[0-7] markers to resynchronize from corrupt input, // but this one assumes well-formed input, and hence the restart marker follows immediately. - _, err := io.ReadFull(d.r, d.tmp[0:2]) - if err != nil { + if err := d.readFull(d.tmp[:2]); err != nil { return err } if d.tmp[0] != 0xff || d.tmp[1] != expectedRST { @@ -320,7 +323,7 @@ func (d *decoder) processSOS(n int) error { expectedRST = rst0Marker } // Reset the Huffman decoder. - d.b = bits{} + d.bits = bits{} // Reset the DC components, as per section F.2.1.3.1. dc = [nColorComponent]int32{} // Reset the progressive decoder state, as per section G.1.2.2. @@ -368,7 +371,7 @@ func (d *decoder) refine(b *block, h *huffman, zigStart, zigEnd, delta int32) er if val0 != 0x0f { d.eobRun = uint16(1 << val0) if val0 != 0 { - bits, err := d.decodeBits(int(val0)) + bits, err := d.decodeBits(int32(val0)) if err != nil { return err } diff --git a/libgo/go/image/jpeg/writer.go b/libgo/go/image/jpeg/writer.go index c58fbf30555..91bbde3bf80 100644 --- a/libgo/go/image/jpeg/writer.go +++ b/libgo/go/image/jpeg/writer.go @@ -249,7 +249,7 @@ func (e *encoder) writeByte(b byte) { e.err = e.w.WriteByte(b) } -// emit emits the least significant nBits bits of bits to the bitstream. +// emit emits the least significant nBits bits of bits to the bit-stream. // The precondition is bits < 1<<nBits && nBits <= 16. func (e *encoder) emit(bits, nBits uint32) { nBits += e.nBits @@ -312,32 +312,44 @@ func (e *encoder) writeDQT() { } // writeSOF0 writes the Start Of Frame (Baseline) marker. -func (e *encoder) writeSOF0(size image.Point) { - const markerlen = 8 + 3*nColorComponent +func (e *encoder) writeSOF0(size image.Point, nComponent int) { + markerlen := 8 + 3*nComponent e.writeMarkerHeader(sof0Marker, markerlen) e.buf[0] = 8 // 8-bit color. e.buf[1] = uint8(size.Y >> 8) e.buf[2] = uint8(size.Y & 0xff) e.buf[3] = uint8(size.X >> 8) e.buf[4] = uint8(size.X & 0xff) - e.buf[5] = nColorComponent - for i := 0; i < nColorComponent; i++ { - e.buf[3*i+6] = uint8(i + 1) - // We use 4:2:0 chroma subsampling. - e.buf[3*i+7] = "\x22\x11\x11"[i] - e.buf[3*i+8] = "\x00\x01\x01"[i] + e.buf[5] = uint8(nComponent) + if nComponent == 1 { + e.buf[6] = 1 + // No subsampling for grayscale image. + e.buf[7] = 0x11 + e.buf[8] = 0x00 + } else { + for i := 0; i < nComponent; i++ { + e.buf[3*i+6] = uint8(i + 1) + // We use 4:2:0 chroma subsampling. + e.buf[3*i+7] = "\x22\x11\x11"[i] + e.buf[3*i+8] = "\x00\x01\x01"[i] + } } - e.write(e.buf[:3*(nColorComponent-1)+9]) + e.write(e.buf[:3*(nComponent-1)+9]) } // writeDHT writes the Define Huffman Table marker. -func (e *encoder) writeDHT() { +func (e *encoder) writeDHT(nComponent int) { markerlen := 2 - for _, s := range theHuffmanSpec { + specs := theHuffmanSpec[:] + if nComponent == 1 { + // Drop the Chrominance tables. + specs = specs[:2] + } + for _, s := range specs { markerlen += 1 + 16 + len(s.value) } e.writeMarkerHeader(dhtMarker, markerlen) - for i, s := range theHuffmanSpec { + for i, s := range specs { e.writeByte("\x00\x10\x01\x11"[i]) e.write(s.count[:]) e.write(s.value) @@ -345,8 +357,8 @@ func (e *encoder) writeDHT() { } // writeBlock writes a block of pixel data using the given quantization table, -// returning the post-quantized DC value of the DCT-transformed block. -// b is in natural (not zig-zag) order. +// returning the post-quantized DC value of the DCT-transformed block. b is in +// natural (not zig-zag) order. func (e *encoder) writeBlock(b *block, q quantIndex, prevDC int32) int32 { fdct(b) // Emit the DC delta. @@ -390,6 +402,20 @@ func toYCbCr(m image.Image, p image.Point, yBlock, cbBlock, crBlock *block) { } } +// grayToY stores the 8x8 region of m whose top-left corner is p in yBlock. +func grayToY(m *image.Gray, p image.Point, yBlock *block) { + b := m.Bounds() + xmax := b.Max.X - 1 + ymax := b.Max.Y - 1 + pix := m.Pix + for j := 0; j < 8; j++ { + for i := 0; i < 8; i++ { + idx := m.PixOffset(min(p.X+i, xmax), min(p.Y+j, ymax)) + yBlock[8*j+i] = int32(pix[idx]) + } + } +} + // rgbaToYCbCr is a specialized version of toYCbCr for image.RGBA images. func rgbaToYCbCr(m *image.RGBA, p image.Point, yBlock, cbBlock, crBlock *block) { b := m.Bounds() @@ -430,7 +456,18 @@ func scale(dst *block, src *[4]block) { } } -// sosHeader is the SOS marker "\xff\xda" followed by 12 bytes: +// sosHeaderY is the SOS marker "\xff\xda" followed by 8 bytes: +// - the marker length "\x00\x08", +// - the number of components "\x01", +// - component 1 uses DC table 0 and AC table 0 "\x01\x00", +// - the bytes "\x00\x3f\x00". Section B.2.3 of the spec says that for +// sequential DCTs, those bytes (8-bit Ss, 8-bit Se, 4-bit Ah, 4-bit Al) +// should be 0x00, 0x3f, 0x00<<4 | 0x00. +var sosHeaderY = []byte{ + 0xff, 0xda, 0x00, 0x08, 0x01, 0x01, 0x00, 0x00, 0x3f, 0x00, +} + +// sosHeaderYCbCr is the SOS marker "\xff\xda" followed by 12 bytes: // - the marker length "\x00\x0c", // - the number of components "\x03", // - component 1 uses DC table 0 and AC table 0 "\x01\x00", @@ -439,14 +476,19 @@ func scale(dst *block, src *[4]block) { // - the bytes "\x00\x3f\x00". Section B.2.3 of the spec says that for // sequential DCTs, those bytes (8-bit Ss, 8-bit Se, 4-bit Ah, 4-bit Al) // should be 0x00, 0x3f, 0x00<<4 | 0x00. -var sosHeader = []byte{ +var sosHeaderYCbCr = []byte{ 0xff, 0xda, 0x00, 0x0c, 0x03, 0x01, 0x00, 0x02, 0x11, 0x03, 0x11, 0x00, 0x3f, 0x00, } // writeSOS writes the StartOfScan marker. func (e *encoder) writeSOS(m image.Image) { - e.write(sosHeader) + switch m.(type) { + case *image.Gray: + e.write(sosHeaderY) + default: + e.write(sosHeaderYCbCr) + } var ( // Scratch buffers to hold the YCbCr values. // The blocks are in natural (not zig-zag) order. @@ -456,24 +498,36 @@ func (e *encoder) writeSOS(m image.Image) { prevDCY, prevDCCb, prevDCCr int32 ) bounds := m.Bounds() - rgba, _ := m.(*image.RGBA) - for y := bounds.Min.Y; y < bounds.Max.Y; y += 16 { - for x := bounds.Min.X; x < bounds.Max.X; x += 16 { - for i := 0; i < 4; i++ { - xOff := (i & 1) * 8 - yOff := (i & 2) * 4 - p := image.Pt(x+xOff, y+yOff) - if rgba != nil { - rgbaToYCbCr(rgba, p, &b, &cb[i], &cr[i]) - } else { - toYCbCr(m, p, &b, &cb[i], &cr[i]) - } + switch m := m.(type) { + // TODO(wathiede): switch on m.ColorModel() instead of type. + case *image.Gray: + for y := bounds.Min.Y; y < bounds.Max.Y; y += 8 { + for x := bounds.Min.X; x < bounds.Max.X; x += 8 { + p := image.Pt(x, y) + grayToY(m, p, &b) prevDCY = e.writeBlock(&b, 0, prevDCY) } - scale(&b, &cb) - prevDCCb = e.writeBlock(&b, 1, prevDCCb) - scale(&b, &cr) - prevDCCr = e.writeBlock(&b, 1, prevDCCr) + } + default: + rgba, _ := m.(*image.RGBA) + for y := bounds.Min.Y; y < bounds.Max.Y; y += 16 { + for x := bounds.Min.X; x < bounds.Max.X; x += 16 { + for i := 0; i < 4; i++ { + xOff := (i & 1) * 8 + yOff := (i & 2) * 4 + p := image.Pt(x+xOff, y+yOff) + if rgba != nil { + rgbaToYCbCr(rgba, p, &b, &cb[i], &cr[i]) + } else { + toYCbCr(m, p, &b, &cb[i], &cr[i]) + } + prevDCY = e.writeBlock(&b, 0, prevDCY) + } + scale(&b, &cb) + prevDCCb = e.writeBlock(&b, 1, prevDCCb) + scale(&b, &cr) + prevDCCr = e.writeBlock(&b, 1, prevDCCr) + } } } // Pad the last byte with 1's. @@ -532,6 +586,13 @@ func Encode(w io.Writer, m image.Image, o *Options) error { e.quant[i][j] = uint8(x) } } + // Compute number of components based on input image type. + nComponent := 3 + switch m.(type) { + // TODO(wathiede): switch on m.ColorModel() instead of type. + case *image.Gray: + nComponent = 1 + } // Write the Start Of Image marker. e.buf[0] = 0xff e.buf[1] = 0xd8 @@ -539,9 +600,9 @@ func Encode(w io.Writer, m image.Image, o *Options) error { // Write the quantization tables. e.writeDQT() // Write the image dimensions. - e.writeSOF0(b.Size()) + e.writeSOF0(b.Size(), nComponent) // Write the Huffman tables. - e.writeDHT() + e.writeDHT(nComponent) // Write the image data. e.writeSOS(m) // Write the End Of Image marker. diff --git a/libgo/go/image/jpeg/writer_test.go b/libgo/go/image/jpeg/writer_test.go index 514b455dce5..3df3cfcc5bb 100644 --- a/libgo/go/image/jpeg/writer_test.go +++ b/libgo/go/image/jpeg/writer_test.go @@ -160,6 +160,34 @@ func TestWriter(t *testing.T) { } } +// TestWriteGrayscale tests that a grayscale images survives a round-trip +// through encode/decode cycle. +func TestWriteGrayscale(t *testing.T) { + m0 := image.NewGray(image.Rect(0, 0, 32, 32)) + for i := range m0.Pix { + m0.Pix[i] = uint8(i) + } + var buf bytes.Buffer + if err := Encode(&buf, m0, nil); err != nil { + t.Fatal(err) + } + m1, err := Decode(&buf) + if err != nil { + t.Fatal(err) + } + if m0.Bounds() != m1.Bounds() { + t.Fatalf("bounds differ: %v and %v", m0.Bounds(), m1.Bounds()) + } + if _, ok := m1.(*image.Gray); !ok { + t.Errorf("got %T, want *image.Gray", m1) + } + // Compare the average delta to the tolerance level. + want := int64(2 << 8) + if got := averageDelta(m0, m1); got > want { + t.Errorf("average delta too high; got %d, want <= %d", got, want) + } +} + // averageDelta returns the average delta in RGB space. The two images must // have the same bounds. func averageDelta(m0, m1 image.Image) int64 { diff --git a/libgo/go/image/png/paeth.go b/libgo/go/image/png/paeth.go index 37978aa662d..9ed6300c865 100644 --- a/libgo/go/image/png/paeth.go +++ b/libgo/go/image/png/paeth.go @@ -4,6 +4,21 @@ package png +// intSize is either 32 or 64. +const intSize = 32 << (^uint(0) >> 63) + +func abs(x int) int { + // m := -1 if x < 0. m := 0 otherwise. + m := x >> (intSize - 1) + + // In two's complement representation, the negative number + // of any number (except the smallest one) can be computed + // by flipping all the bits and add 1. This is faster than + // code with a branch. + // See Hacker's Delight, section 2-4. + return (x ^ m) - m +} + // paeth implements the Paeth filter function, as per the PNG specification. func paeth(a, b, c uint8) uint8 { // This is an optimized version of the sample code in the PNG spec. @@ -16,16 +31,9 @@ func paeth(a, b, c uint8) uint8 { pc := int(c) pa := int(b) - pc pb := int(a) - pc - pc = pa + pb - if pa < 0 { - pa = -pa - } - if pb < 0 { - pb = -pb - } - if pc < 0 { - pc = -pc - } + pc = abs(pa + pb) + pa = abs(pa) + pb = abs(pb) if pa <= pb && pa <= pc { return a } else if pb <= pc { @@ -44,16 +52,9 @@ func filterPaeth(cdat, pdat []byte, bytesPerPixel int) { b = int(pdat[j]) pa = b - c pb = a - c - pc = pa + pb - if pa < 0 { - pa = -pa - } - if pb < 0 { - pb = -pb - } - if pc < 0 { - pc = -pc - } + pc = abs(pa + pb) + pa = abs(pa) + pb = abs(pb) if pa <= pb && pa <= pc { // No-op. } else if pb <= pc { diff --git a/libgo/go/image/png/paeth_test.go b/libgo/go/image/png/paeth_test.go index bb084861ae9..cfc1896cd7f 100644 --- a/libgo/go/image/png/paeth_test.go +++ b/libgo/go/image/png/paeth_test.go @@ -10,7 +10,7 @@ import ( "testing" ) -func abs(x int) int { +func slowAbs(x int) int { if x < 0 { return -x } @@ -21,9 +21,9 @@ func abs(x int) int { // It is a straight port of the sample code in the PNG spec, section 9.4. func slowPaeth(a, b, c uint8) uint8 { p := int(a) + int(b) - int(c) - pa := abs(p - int(a)) - pb := abs(p - int(b)) - pc := abs(p - int(c)) + pa := slowAbs(p - int(a)) + pb := slowAbs(p - int(b)) + pc := slowAbs(p - int(c)) if pa <= pb && pa <= pc { return a } else if pb <= pc { diff --git a/libgo/go/image/png/reader.go b/libgo/go/image/png/reader.go index dfe2991024d..0a40ca161d9 100644 --- a/libgo/go/image/png/reader.go +++ b/libgo/go/image/png/reader.go @@ -57,6 +57,29 @@ const ( nFilter = 5 ) +// Interlace type. +const ( + itNone = 0 + itAdam7 = 1 +) + +// interlaceScan defines the placement and size of a pass for Adam7 interlacing. +type interlaceScan struct { + xFactor, yFactor, xOffset, yOffset int +} + +// interlacing defines Adam7 interlacing, with 7 passes of reduced images. +// See http://www.w3.org/TR/PNG/#8Interlace +var interlacing = []interlaceScan{ + {8, 8, 0, 0}, + {8, 8, 4, 0}, + {4, 8, 0, 4}, + {4, 4, 2, 0}, + {2, 4, 0, 2}, + {2, 2, 1, 0}, + {1, 2, 0, 1}, +} + // Decoding stage. // The PNG specification says that the IHDR, PLTE (if present), IDAT and IEND // chunks must appear in that order. There may be multiple IDAT chunks, and @@ -84,6 +107,7 @@ type decoder struct { stage int idatLength uint32 tmp [3 * 256]byte + interlace int } // A FormatError reports that the input is not a valid PNG. @@ -113,9 +137,16 @@ func (d *decoder) parseIHDR(length uint32) error { return err } d.crc.Write(d.tmp[:13]) - if d.tmp[10] != 0 || d.tmp[11] != 0 || d.tmp[12] != 0 { - return UnsupportedError("compression, filter or interlace method") + if d.tmp[10] != 0 { + return UnsupportedError("compression method") } + if d.tmp[11] != 0 { + return UnsupportedError("filter method") + } + if d.tmp[12] != itNone && d.tmp[12] != itAdam7 { + return FormatError("invalid interlace method") + } + d.interlace = int(d.tmp[12]) w := int32(binary.BigEndian.Uint32(d.tmp[0:4])) h := int32(binary.BigEndian.Uint32(d.tmp[4:8])) if w < 0 || h < 0 { @@ -287,7 +318,42 @@ func (d *decoder) decode() (image.Image, error) { return nil, err } defer r.Close() - bitsPerPixel := 0 + var img image.Image + if d.interlace == itNone { + img, err = d.readImagePass(r, 0, false) + } else if d.interlace == itAdam7 { + // Allocate a blank image of the full size. + img, err = d.readImagePass(nil, 0, true) + for pass := 0; pass < 7; pass++ { + imagePass, err := d.readImagePass(r, pass, false) + if err != nil { + return nil, err + } + d.mergePassInto(img, imagePass, pass) + } + } + + // Check for EOF, to verify the zlib checksum. + n := 0 + for i := 0; n == 0 && err == nil; i++ { + if i == 100 { + return nil, io.ErrNoProgress + } + n, err = r.Read(d.tmp[:1]) + } + if err != nil && err != io.EOF { + return nil, FormatError(err.Error()) + } + if n != 0 || d.idatLength != 0 { + return nil, FormatError("too much pixel data") + } + + return img, nil +} + +// readImagePass reads a single image pass, sized according to the pass number. +func (d *decoder) readImagePass(r io.Reader, pass int, allocateOnly bool) (image.Image, error) { + var bitsPerPixel int = 0 pixOffset := 0 var ( gray *image.Gray @@ -299,52 +365,63 @@ func (d *decoder) decode() (image.Image, error) { nrgba64 *image.NRGBA64 img image.Image ) + width, height := d.width, d.height + if d.interlace == itAdam7 && !allocateOnly { + p := interlacing[pass] + // Add the multiplication factor and subtract one, effectively rounding up. + width = (width - p.xOffset + p.xFactor - 1) / p.xFactor + height = (height - p.yOffset + p.yFactor - 1) / p.yFactor + } switch d.cb { case cbG1, cbG2, cbG4, cbG8: bitsPerPixel = d.depth - gray = image.NewGray(image.Rect(0, 0, d.width, d.height)) + gray = image.NewGray(image.Rect(0, 0, width, height)) img = gray case cbGA8: bitsPerPixel = 16 - nrgba = image.NewNRGBA(image.Rect(0, 0, d.width, d.height)) + nrgba = image.NewNRGBA(image.Rect(0, 0, width, height)) img = nrgba case cbTC8: bitsPerPixel = 24 - rgba = image.NewRGBA(image.Rect(0, 0, d.width, d.height)) + rgba = image.NewRGBA(image.Rect(0, 0, width, height)) img = rgba case cbP1, cbP2, cbP4, cbP8: bitsPerPixel = d.depth - paletted = image.NewPaletted(image.Rect(0, 0, d.width, d.height), d.palette) + paletted = image.NewPaletted(image.Rect(0, 0, width, height), d.palette) img = paletted case cbTCA8: bitsPerPixel = 32 - nrgba = image.NewNRGBA(image.Rect(0, 0, d.width, d.height)) + nrgba = image.NewNRGBA(image.Rect(0, 0, width, height)) img = nrgba case cbG16: bitsPerPixel = 16 - gray16 = image.NewGray16(image.Rect(0, 0, d.width, d.height)) + gray16 = image.NewGray16(image.Rect(0, 0, width, height)) img = gray16 case cbGA16: bitsPerPixel = 32 - nrgba64 = image.NewNRGBA64(image.Rect(0, 0, d.width, d.height)) + nrgba64 = image.NewNRGBA64(image.Rect(0, 0, width, height)) img = nrgba64 case cbTC16: bitsPerPixel = 48 - rgba64 = image.NewRGBA64(image.Rect(0, 0, d.width, d.height)) + rgba64 = image.NewRGBA64(image.Rect(0, 0, width, height)) img = rgba64 case cbTCA16: bitsPerPixel = 64 - nrgba64 = image.NewNRGBA64(image.Rect(0, 0, d.width, d.height)) + nrgba64 = image.NewNRGBA64(image.Rect(0, 0, width, height)) img = nrgba64 } + if allocateOnly { + return img, nil + } bytesPerPixel := (bitsPerPixel + 7) / 8 - // cr and pr are the bytes for the current and previous row. // The +1 is for the per-row filter type, which is at cr[0]. - cr := make([]uint8, 1+(bitsPerPixel*d.width+7)/8) - pr := make([]uint8, 1+(bitsPerPixel*d.width+7)/8) + rowSize := 1 + (bitsPerPixel*width+7)/8 + // cr and pr are the bytes for the current and previous row. + cr := make([]uint8, rowSize) + pr := make([]uint8, rowSize) - for y := 0; y < d.height; y++ { + for y := 0; y < height; y++ { // Read the decompressed bytes. _, err := io.ReadFull(r, cr) if err != nil { @@ -381,25 +458,25 @@ func (d *decoder) decode() (image.Image, error) { // Convert from bytes to colors. switch d.cb { case cbG1: - for x := 0; x < d.width; x += 8 { + for x := 0; x < width; x += 8 { b := cdat[x/8] - for x2 := 0; x2 < 8 && x+x2 < d.width; x2++ { + for x2 := 0; x2 < 8 && x+x2 < width; x2++ { gray.SetGray(x+x2, y, color.Gray{(b >> 7) * 0xff}) b <<= 1 } } case cbG2: - for x := 0; x < d.width; x += 4 { + for x := 0; x < width; x += 4 { b := cdat[x/4] - for x2 := 0; x2 < 4 && x+x2 < d.width; x2++ { + for x2 := 0; x2 < 4 && x+x2 < width; x2++ { gray.SetGray(x+x2, y, color.Gray{(b >> 6) * 0x55}) b <<= 2 } } case cbG4: - for x := 0; x < d.width; x += 2 { + for x := 0; x < width; x += 2 { b := cdat[x/2] - for x2 := 0; x2 < 2 && x+x2 < d.width; x2++ { + for x2 := 0; x2 < 2 && x+x2 < width; x2++ { gray.SetGray(x+x2, y, color.Gray{(b >> 4) * 0x11}) b <<= 4 } @@ -408,13 +485,13 @@ func (d *decoder) decode() (image.Image, error) { copy(gray.Pix[pixOffset:], cdat) pixOffset += gray.Stride case cbGA8: - for x := 0; x < d.width; x++ { + for x := 0; x < width; x++ { ycol := cdat[2*x+0] nrgba.SetNRGBA(x, y, color.NRGBA{ycol, ycol, ycol, cdat[2*x+1]}) } case cbTC8: pix, i, j := rgba.Pix, pixOffset, 0 - for x := 0; x < d.width; x++ { + for x := 0; x < width; x++ { pix[i+0] = cdat[j+0] pix[i+1] = cdat[j+1] pix[i+2] = cdat[j+2] @@ -424,9 +501,9 @@ func (d *decoder) decode() (image.Image, error) { } pixOffset += rgba.Stride case cbP1: - for x := 0; x < d.width; x += 8 { + for x := 0; x < width; x += 8 { b := cdat[x/8] - for x2 := 0; x2 < 8 && x+x2 < d.width; x2++ { + for x2 := 0; x2 < 8 && x+x2 < width; x2++ { idx := b >> 7 if len(paletted.Palette) <= int(idx) { paletted.Palette = paletted.Palette[:int(idx)+1] @@ -436,9 +513,9 @@ func (d *decoder) decode() (image.Image, error) { } } case cbP2: - for x := 0; x < d.width; x += 4 { + for x := 0; x < width; x += 4 { b := cdat[x/4] - for x2 := 0; x2 < 4 && x+x2 < d.width; x2++ { + for x2 := 0; x2 < 4 && x+x2 < width; x2++ { idx := b >> 6 if len(paletted.Palette) <= int(idx) { paletted.Palette = paletted.Palette[:int(idx)+1] @@ -448,9 +525,9 @@ func (d *decoder) decode() (image.Image, error) { } } case cbP4: - for x := 0; x < d.width; x += 2 { + for x := 0; x < width; x += 2 { b := cdat[x/2] - for x2 := 0; x2 < 2 && x+x2 < d.width; x2++ { + for x2 := 0; x2 < 2 && x+x2 < width; x2++ { idx := b >> 4 if len(paletted.Palette) <= int(idx) { paletted.Palette = paletted.Palette[:int(idx)+1] @@ -461,7 +538,7 @@ func (d *decoder) decode() (image.Image, error) { } case cbP8: if len(paletted.Palette) != 255 { - for x := 0; x < d.width; x++ { + for x := 0; x < width; x++ { if len(paletted.Palette) <= int(cdat[x]) { paletted.Palette = paletted.Palette[:int(cdat[x])+1] } @@ -473,25 +550,25 @@ func (d *decoder) decode() (image.Image, error) { copy(nrgba.Pix[pixOffset:], cdat) pixOffset += nrgba.Stride case cbG16: - for x := 0; x < d.width; x++ { + for x := 0; x < width; x++ { ycol := uint16(cdat[2*x+0])<<8 | uint16(cdat[2*x+1]) gray16.SetGray16(x, y, color.Gray16{ycol}) } case cbGA16: - for x := 0; x < d.width; x++ { + for x := 0; x < width; x++ { ycol := uint16(cdat[4*x+0])<<8 | uint16(cdat[4*x+1]) acol := uint16(cdat[4*x+2])<<8 | uint16(cdat[4*x+3]) nrgba64.SetNRGBA64(x, y, color.NRGBA64{ycol, ycol, ycol, acol}) } case cbTC16: - for x := 0; x < d.width; x++ { + for x := 0; x < width; x++ { rcol := uint16(cdat[6*x+0])<<8 | uint16(cdat[6*x+1]) gcol := uint16(cdat[6*x+2])<<8 | uint16(cdat[6*x+3]) bcol := uint16(cdat[6*x+4])<<8 | uint16(cdat[6*x+5]) rgba64.SetRGBA64(x, y, color.RGBA64{rcol, gcol, bcol, 0xffff}) } case cbTCA16: - for x := 0; x < d.width; x++ { + for x := 0; x < width; x++ { rcol := uint16(cdat[8*x+0])<<8 | uint16(cdat[8*x+1]) gcol := uint16(cdat[8*x+2])<<8 | uint16(cdat[8*x+3]) bcol := uint16(cdat[8*x+4])<<8 | uint16(cdat[8*x+5]) @@ -504,22 +581,66 @@ func (d *decoder) decode() (image.Image, error) { pr, cr = cr, pr } - // Check for EOF, to verify the zlib checksum. - n := 0 - for i := 0; n == 0 && err == nil; i++ { - if i == 100 { - return nil, io.ErrNoProgress + return img, nil +} + +// mergePassInto merges a single pass into a full sized image. +func (d *decoder) mergePassInto(dst image.Image, src image.Image, pass int) { + p := interlacing[pass] + var ( + srcPix []uint8 + dstPix []uint8 + stride int + rect image.Rectangle + bytesPerPixel int + ) + switch target := dst.(type) { + case *image.Alpha: + srcPix = src.(*image.Alpha).Pix + dstPix, stride, rect = target.Pix, target.Stride, target.Rect + bytesPerPixel = 1 + case *image.Alpha16: + srcPix = src.(*image.Alpha16).Pix + dstPix, stride, rect = target.Pix, target.Stride, target.Rect + bytesPerPixel = 2 + case *image.Gray: + srcPix = src.(*image.Gray).Pix + dstPix, stride, rect = target.Pix, target.Stride, target.Rect + bytesPerPixel = 1 + case *image.Gray16: + srcPix = src.(*image.Gray16).Pix + dstPix, stride, rect = target.Pix, target.Stride, target.Rect + bytesPerPixel = 2 + case *image.NRGBA: + srcPix = src.(*image.NRGBA).Pix + dstPix, stride, rect = target.Pix, target.Stride, target.Rect + bytesPerPixel = 4 + case *image.NRGBA64: + srcPix = src.(*image.NRGBA64).Pix + dstPix, stride, rect = target.Pix, target.Stride, target.Rect + bytesPerPixel = 8 + case *image.Paletted: + srcPix = src.(*image.Paletted).Pix + dstPix, stride, rect = target.Pix, target.Stride, target.Rect + bytesPerPixel = 1 + case *image.RGBA: + srcPix = src.(*image.RGBA).Pix + dstPix, stride, rect = target.Pix, target.Stride, target.Rect + bytesPerPixel = 4 + case *image.RGBA64: + srcPix = src.(*image.RGBA64).Pix + dstPix, stride, rect = target.Pix, target.Stride, target.Rect + bytesPerPixel = 8 + } + s, bounds := 0, src.Bounds() + for y := bounds.Min.Y; y < bounds.Max.Y; y++ { + dBase := (y*p.yFactor+p.yOffset-rect.Min.Y)*stride + (p.xOffset-rect.Min.X)*bytesPerPixel + for x := bounds.Min.X; x < bounds.Max.X; x++ { + d := dBase + x*p.xFactor*bytesPerPixel + copy(dstPix[d:], srcPix[s:s+bytesPerPixel]) + s += bytesPerPixel } - n, err = r.Read(pr[:1]) } - if err != nil && err != io.EOF { - return nil, FormatError(err.Error()) - } - if n != 0 || d.idatLength != 0 { - return nil, FormatError("too much pixel data") - } - - return img, nil } func (d *decoder) parseIDAT(length uint32) (err error) { diff --git a/libgo/go/image/png/reader_test.go b/libgo/go/image/png/reader_test.go index ac0d949a9d3..ce772eb6f09 100644 --- a/libgo/go/image/png/reader_test.go +++ b/libgo/go/image/png/reader_test.go @@ -30,6 +30,7 @@ var filenames = []string{ "basn3p01", "basn3p02", "basn3p04", + "basn3p04-31i", "basn3p08", "basn3p08-trns", "basn4a08", @@ -186,6 +187,13 @@ func sng(w io.WriteCloser, filename string, png image.Image) { c = 0 } } + if c != 0 { + for c != 8/bitdepth { + b = b << uint(bitdepth) + c++ + } + fmt.Fprintf(w, "%02x", b) + } } io.WriteString(w, "\n") } @@ -235,8 +243,8 @@ func TestReader(t *testing.T) { // Compare the two, in SNG format, line by line. for { - pdone := pb.Scan() - sdone := sb.Scan() + pdone := !pb.Scan() + sdone := !sb.Scan() if pdone && sdone { break } @@ -348,3 +356,7 @@ func BenchmarkDecodePaletted(b *testing.B) { func BenchmarkDecodeRGB(b *testing.B) { benchmarkDecode(b, "testdata/benchRGB.png", 4) } + +func BenchmarkDecodeInterlacing(b *testing.B) { + benchmarkDecode(b, "testdata/benchRGB-interlace.png", 4) +} diff --git a/libgo/go/image/png/writer.go b/libgo/go/image/png/writer.go index 629452cbfa1..df23270ee97 100644 --- a/libgo/go/image/png/writer.go +++ b/libgo/go/image/png/writer.go @@ -14,7 +14,13 @@ import ( "strconv" ) +// Encoder configures encoding PNG images. +type Encoder struct { + CompressionLevel CompressionLevel +} + type encoder struct { + enc *Encoder w io.Writer m image.Image cb int @@ -24,6 +30,18 @@ type encoder struct { tmp [4 * 256]byte } +type CompressionLevel int + +const ( + DefaultCompression CompressionLevel = 0 + NoCompression CompressionLevel = -1 + BestSpeed CompressionLevel = -2 + BestCompression CompressionLevel = -3 + + // Positive CompressionLevel values are reserved to mean a numeric zlib + // compression level, although that is not implemented yet. +) + // Big-endian. func writeUint32(b []uint8, u uint32) { b[0] = uint8(u >> 24) @@ -188,7 +206,7 @@ func filter(cr *[nFilter][]byte, pr []byte, bpp int) int { // The Paeth filter. sum = 0 for i := 0; i < bpp; i++ { - cdat4[i] = cdat0[i] - paeth(0, pdat[i], 0) + cdat4[i] = cdat0[i] - pdat[i] sum += abs8(cdat4[i]) } for i := bpp; i < n; i++ { @@ -255,8 +273,11 @@ func filter(cr *[nFilter][]byte, pr []byte, bpp int) int { return filter } -func writeImage(w io.Writer, m image.Image, cb int) error { - zw := zlib.NewWriter(w) +func writeImage(w io.Writer, m image.Image, cb int, level int) error { + zw, err := zlib.NewWriterLevel(w, level) + if err != nil { + return err + } defer zw.Close() bpp := 0 // Bytes per pixel. @@ -399,7 +420,10 @@ func writeImage(w io.Writer, m image.Image, cb int) error { } // Apply the filter. - f := filter(&cr, pr, bpp) + f := ftNone + if level != zlib.NoCompression { + f = filter(&cr, pr, bpp) + } // Write the compressed bytes. if _, err := zw.Write(cr[f]); err != nil { @@ -419,18 +443,41 @@ func (e *encoder) writeIDATs() { } var bw *bufio.Writer bw = bufio.NewWriterSize(e, 1<<15) - e.err = writeImage(bw, e.m, e.cb) + e.err = writeImage(bw, e.m, e.cb, levelToZlib(e.enc.CompressionLevel)) if e.err != nil { return } e.err = bw.Flush() } +// This function is required because we want the zero value of +// Encoder.CompressionLevel to map to zlib.DefaultCompression. +func levelToZlib(l CompressionLevel) int { + switch l { + case DefaultCompression: + return zlib.DefaultCompression + case NoCompression: + return zlib.NoCompression + case BestSpeed: + return zlib.BestSpeed + case BestCompression: + return zlib.BestCompression + default: + return zlib.DefaultCompression + } +} + func (e *encoder) writeIEND() { e.writeChunk(nil, "IEND") } -// Encode writes the Image m to w in PNG format. Any Image may be encoded, but -// images that are not image.NRGBA might be encoded lossily. +// Encode writes the Image m to w in PNG format. Any Image may be +// encoded, but images that are not image.NRGBA might be encoded lossily. func Encode(w io.Writer, m image.Image) error { + var e Encoder + return e.Encode(w, m) +} + +// Encode writes the Image m to w in PNG format. +func (enc *Encoder) Encode(w io.Writer, m image.Image) error { // Obviously, negative widths and heights are invalid. Furthermore, the PNG // spec section 11.2.2 says that zero is invalid. Excessively large images are // also rejected. @@ -440,6 +487,7 @@ func Encode(w io.Writer, m image.Image) error { } var e encoder + e.enc = enc e.w = w e.m = m diff --git a/libgo/go/image/png/writer_test.go b/libgo/go/image/png/writer_test.go index 3116fc9ff94..d67a815698f 100644 --- a/libgo/go/image/png/writer_test.go +++ b/libgo/go/image/png/writer_test.go @@ -40,11 +40,7 @@ func encodeDecode(m image.Image) (image.Image, error) { if err != nil { return nil, err } - m, err = Decode(&b) - if err != nil { - return nil, err - } - return m, nil + return Decode(&b) } func TestWriter(t *testing.T) { @@ -81,6 +77,29 @@ func TestWriter(t *testing.T) { } } +func TestWriterLevels(t *testing.T) { + m := image.NewNRGBA(image.Rect(0, 0, 100, 100)) + + var b1, b2 bytes.Buffer + if err := (&Encoder{}).Encode(&b1, m); err != nil { + t.Fatal(err) + } + noenc := &Encoder{CompressionLevel: NoCompression} + if err := noenc.Encode(&b2, m); err != nil { + t.Fatal(err) + } + + if b2.Len() <= b1.Len() { + t.Error("DefaultCompression encoding was larger than NoCompression encoding") + } + if _, err := Decode(&b1); err != nil { + t.Error("cannot decode DefaultCompression") + } + if _, err := Decode(&b2); err != nil { + t.Error("cannot decode NoCompression") + } +} + func TestSubImage(t *testing.T) { m0 := image.NewRGBA(image.Rect(0, 0, 256, 256)) for y := 0; y < 256; y++ { diff --git a/libgo/go/image/ycbcr.go b/libgo/go/image/ycbcr.go index 5b73bef7895..7c773f2f0a4 100644 --- a/libgo/go/image/ycbcr.go +++ b/libgo/go/image/ycbcr.go @@ -60,6 +60,10 @@ func (p *YCbCr) Bounds() Rectangle { } func (p *YCbCr) At(x, y int) color.Color { + return p.YCbCrAt(x, y) +} + +func (p *YCbCr) YCbCrAt(x, y int) color.YCbCr { if !(Point{x, y}.In(p.Rect)) { return color.YCbCr{} } diff --git a/libgo/go/index/suffixarray/suffixarray_test.go b/libgo/go/index/suffixarray/suffixarray_test.go index df3e449d322..644f00c7577 100644 --- a/libgo/go/index/suffixarray/suffixarray_test.go +++ b/libgo/go/index/suffixarray/suffixarray_test.go @@ -287,7 +287,7 @@ func BenchmarkNewIndexRepeat(b *testing.B) { func BenchmarkSaveRestore(b *testing.B) { b.StopTimer() r := rand.New(rand.NewSource(0x5a77a1)) // guarantee always same sequence - data := make([]byte, 10<<20) // 10MB of data to index + data := make([]byte, 1<<20) // 1MB of data to index for i := range data { data[i] = byte(r.Intn(256)) } diff --git a/libgo/go/internal/syscall/dummy.go b/libgo/go/internal/syscall/dummy.go new file mode 100644 index 00000000000..b00eb273f92 --- /dev/null +++ b/libgo/go/internal/syscall/dummy.go @@ -0,0 +1,5 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syscall diff --git a/libgo/go/internal/syscall/getrandom_linux.go b/libgo/go/internal/syscall/getrandom_linux.go new file mode 100644 index 00000000000..944bab3f5d4 --- /dev/null +++ b/libgo/go/internal/syscall/getrandom_linux.go @@ -0,0 +1,56 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syscall + +import ( + "runtime" + "sync/atomic" + stdsyscall "syscall" + "unsafe" +) + +var randomTrap = map[string]uintptr{ + "386": 355, + "amd64": 318, + "arm": 384, +}[runtime.GOARCH] + +var randomUnsupported int32 // atomic + +// GetRandomFlag is a flag supported by the getrandom system call. +type GetRandomFlag uintptr + +const ( + // GRND_NONBLOCK means return EAGAIN rather than blocking. + GRND_NONBLOCK GetRandomFlag = 0x0001 + + // GRND_RANDOM means use the /dev/random pool instead of /dev/urandom. + GRND_RANDOM GetRandomFlag = 0x0002 +) + +// GetRandom calls the Linux getrandom system call. +// See https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=c6e9d6f38894798696f23c8084ca7edbf16ee895 +func GetRandom(p []byte, flags GetRandomFlag) (n int, err error) { + if randomTrap == 0 { + return 0, stdsyscall.ENOSYS + } + if len(p) == 0 { + return 0, nil + } + if atomic.LoadInt32(&randomUnsupported) != 0 { + return 0, stdsyscall.ENOSYS + } + r1, _, errno := stdsyscall.Syscall(randomTrap, + uintptr(unsafe.Pointer(&p[0])), + uintptr(len(p)), + uintptr(flags)) + if errno != 0 { + if errno == stdsyscall.ENOSYS { + atomic.StoreInt32(&randomUnsupported, 1) + } + return 0, errno + } + return int(r1), nil +} diff --git a/libgo/go/io/io.go b/libgo/go/io/io.go index 022fdb67645..7507a84929f 100644 --- a/libgo/go/io/io.go +++ b/libgo/go/io/io.go @@ -62,8 +62,11 @@ var ErrNoProgress = errors.New("multiple Read calls return no data or error") // allowed EOF behaviors. // // Implementations of Read are discouraged from returning a -// zero byte count with a nil error, and callers should treat -// that situation as a no-op. +// zero byte count with a nil error, except when len(p) == 0. +// Callers should treat a return of 0 and nil as indicating that +// nothing happened; in particular it does not indicate EOF. +// +// Implementations must not retain p. type Reader interface { Read(p []byte) (n int, err error) } @@ -75,6 +78,8 @@ type Reader interface { // and any error encountered that caused the write to stop early. // Write must return a non-nil error if it returns n < len(p). // Write must not modify the slice data, even temporarily. +// +// Implementations must not retain p. type Writer interface { Write(p []byte) (n int, err error) } @@ -192,6 +197,8 @@ type WriterTo interface { // // Clients of ReadAt can execute parallel ReadAt calls on the // same input source. +// +// Implementations must not retain p. type ReaderAt interface { ReadAt(p []byte, off int64) (n int, err error) } @@ -209,6 +216,8 @@ type ReaderAt interface { // // Clients of WriteAt can execute parallel WriteAt calls on the same // destination if the ranges do not overlap. +// +// Implementations must not retain p. type WriterAt interface { WriteAt(p []byte, off int64) (n int, err error) } diff --git a/libgo/go/log/syslog/syslog_test.go b/libgo/go/log/syslog/syslog_test.go index 24a460f6d9e..6a863fed312 100644 --- a/libgo/go/log/syslog/syslog_test.go +++ b/libgo/go/log/syslog/syslog_test.go @@ -314,7 +314,7 @@ func TestConcurrentReconnect(t *testing.T) { count := make(chan int) go func() { ct := 0 - for _ = range done { + for range done { ct++ // we are looking for 500 out of 1000 events // here because lots of log messages are lost diff --git a/libgo/go/log/syslog/syslog_unix.go b/libgo/go/log/syslog/syslog_unix.go index f6d2f1b7a39..1cdabec6922 100644 --- a/libgo/go/log/syslog/syslog_unix.go +++ b/libgo/go/log/syslog/syslog_unix.go @@ -16,7 +16,7 @@ import ( func unixSyslog() (conn serverConn, err error) { logTypes := []string{"unixgram", "unix"} - logPaths := []string{"/dev/log", "/var/run/syslog"} + logPaths := []string{"/dev/log", "/var/run/syslog", "/var/run/log"} for _, network := range logTypes { for _, path := range logPaths { conn, err := net.Dial(network, path) diff --git a/libgo/go/math/all_test.go b/libgo/go/math/all_test.go index 0d8b10f67fa..763efb2e647 100644 --- a/libgo/go/math/all_test.go +++ b/libgo/go/math/all_test.go @@ -456,7 +456,19 @@ var modf = [][2]float64{ {1.0000000000000000e+00, 8.2530809168085506044576505e-01}, {-8.0000000000000000e+00, -6.8592476857560136238589621e-01}, } -var nextafter = []float64{ +var nextafter32 = []float32{ + 4.979012489318848e+00, + 7.738873004913330e+00, + -2.768800258636475e-01, + -5.010602951049805e+00, + 9.636294364929199e+00, + 2.926377534866333e+00, + 5.229084014892578e+00, + 2.727940082550049e+00, + 1.825308203697205e+00, + -8.685923576354980e+00, +} +var nextafter64 = []float64{ 4.97901192488367438926388786e+00, 7.73887247457810545370193722e+00, -2.7688005719200153853520874e-01, @@ -1331,7 +1343,32 @@ var modfSC = [][2]float64{ {NaN(), NaN()}, } -var vfnextafterSC = [][2]float64{ +var vfnextafter32SC = [][2]float32{ + {0, 0}, + {0, float32(Copysign(0, -1))}, + {0, -1}, + {0, float32(NaN())}, + {float32(Copysign(0, -1)), 1}, + {float32(Copysign(0, -1)), 0}, + {float32(Copysign(0, -1)), float32(Copysign(0, -1))}, + {float32(Copysign(0, -1)), -1}, + {float32(NaN()), 0}, + {float32(NaN()), float32(NaN())}, +} +var nextafter32SC = []float32{ + 0, + 0, + -1.401298464e-45, // Float32frombits(0x80000001) + float32(NaN()), + 1.401298464e-45, // Float32frombits(0x00000001) + float32(Copysign(0, -1)), + float32(Copysign(0, -1)), + -1.401298464e-45, // Float32frombits(0x80000001) + float32(NaN()), + float32(NaN()), +} + +var vfnextafter64SC = [][2]float64{ {0, 0}, {0, Copysign(0, -1)}, {0, -1}, @@ -1343,7 +1380,7 @@ var vfnextafterSC = [][2]float64{ {NaN(), 0}, {NaN(), NaN()}, } -var nextafterSC = []float64{ +var nextafter64SC = []float64{ 0, 0, -4.9406564584124654418e-324, // Float64frombits(0x8000000000000001) @@ -2303,15 +2340,29 @@ func TestModf(t *testing.T) { } } -func TestNextafter(t *testing.T) { +func TestNextafter32(t *testing.T) { + for i := 0; i < len(vf); i++ { + vfi := float32(vf[i]) + if f := Nextafter32(vfi, 10); nextafter32[i] != f { + t.Errorf("Nextafter32(%g, %g) = %g want %g", vfi, 10.0, f, nextafter32[i]) + } + } + for i := 0; i < len(vfnextafter32SC); i++ { + if f := Nextafter32(vfnextafter32SC[i][0], vfnextafter32SC[i][1]); !alike(float64(nextafter32SC[i]), float64(f)) { + t.Errorf("Nextafter32(%g, %g) = %g want %g", vfnextafter32SC[i][0], vfnextafter32SC[i][1], f, nextafter32SC[i]) + } + } +} + +func TestNextafter64(t *testing.T) { for i := 0; i < len(vf); i++ { - if f := Nextafter(vf[i], 10); nextafter[i] != f { - t.Errorf("Nextafter(%g, %g) = %g want %g", vf[i], 10.0, f, nextafter[i]) + if f := Nextafter(vf[i], 10); nextafter64[i] != f { + t.Errorf("Nextafter64(%g, %g) = %g want %g", vf[i], 10.0, f, nextafter64[i]) } } - for i := 0; i < len(vfnextafterSC); i++ { - if f := Nextafter(vfnextafterSC[i][0], vfnextafterSC[i][1]); !alike(nextafterSC[i], f) { - t.Errorf("Nextafter(%g, %g) = %g want %g", vfnextafterSC[i][0], vfnextafterSC[i][1], f, nextafterSC[i]) + for i := 0; i < len(vfnextafter64SC); i++ { + if f := Nextafter(vfnextafter64SC[i][0], vfnextafter64SC[i][1]); !alike(nextafter64SC[i], f) { + t.Errorf("Nextafter64(%g, %g) = %g want %g", vfnextafter64SC[i][0], vfnextafter64SC[i][1], f, nextafter64SC[i]) } } } @@ -2827,7 +2878,13 @@ func BenchmarkModf(b *testing.B) { } } -func BenchmarkNextafter(b *testing.B) { +func BenchmarkNextafter32(b *testing.B) { + for i := 0; i < b.N; i++ { + Nextafter32(.5, 1) + } +} + +func BenchmarkNextafter64(b *testing.B) { for i := 0; i < b.N; i++ { Nextafter(.5, 1) } diff --git a/libgo/go/math/big/int.go b/libgo/go/math/big/int.go index 269949d6160..d22e39e7c94 100644 --- a/libgo/go/math/big/int.go +++ b/libgo/go/math/big/int.go @@ -510,10 +510,30 @@ func (z *Int) Scan(s fmt.ScanState, ch rune) error { return err } +// low32 returns the least significant 32 bits of z. +func low32(z nat) uint32 { + if len(z) == 0 { + return 0 + } + return uint32(z[0]) +} + +// low64 returns the least significant 64 bits of z. +func low64(z nat) uint64 { + if len(z) == 0 { + return 0 + } + v := uint64(z[0]) + if _W == 32 && len(z) > 1 { + v |= uint64(z[1]) << 32 + } + return v +} + // Int64 returns the int64 representation of x. // If x cannot be represented in an int64, the result is undefined. func (x *Int) Int64() int64 { - v := int64(x.Uint64()) + v := int64(low64(x.abs)) if x.neg { v = -v } @@ -523,14 +543,7 @@ func (x *Int) Int64() int64 { // Uint64 returns the uint64 representation of x. // If x cannot be represented in a uint64, the result is undefined. func (x *Int) Uint64() uint64 { - if len(x.abs) == 0 { - return 0 - } - v := uint64(x.abs[0]) - if _W == 32 && len(x.abs) > 1 { - v |= uint64(x.abs[1]) << 32 - } - return v + return low64(x.abs) } // SetString sets z to the value of s, interpreted in the given base, @@ -592,6 +605,12 @@ func (z *Int) Exp(x, y, m *Int) *Int { z.abs = z.abs.expNN(x.abs, yWords, mWords) z.neg = len(z.abs) > 0 && x.neg && len(yWords) > 0 && yWords[0]&1 == 1 // 0 has no sign + if z.neg && len(mWords) > 0 { + // make modulus result positive + z.abs = z.abs.sub(mWords, z.abs) // z == x**y mod |m| && 0 <= z < |m| + z.neg = false + } + return z } @@ -733,15 +752,16 @@ func (z *Int) Rand(rnd *rand.Rand, n *Int) *Int { return z } -// ModInverse sets z to the multiplicative inverse of g in the group ℤ/pℤ (where -// p is a prime) and returns z. -func (z *Int) ModInverse(g, p *Int) *Int { +// ModInverse sets z to the multiplicative inverse of g in the ring ℤ/nℤ +// and returns z. If g and n are not relatively prime, the result is undefined. +func (z *Int) ModInverse(g, n *Int) *Int { var d Int - d.GCD(z, nil, g, p) - // x and y are such that g*x + p*y = d. Since p is prime, d = 1. Taking - // that modulo p results in g*x = 1, therefore x is the inverse element. + d.GCD(z, nil, g, n) + // x and y are such that g*x + n*y = d. Since g and n are + // relatively prime, d = 1. Taking that modulo n results in + // g*x = 1, therefore x is the inverse element. if z.neg { - z.Add(z, p) + z.Add(z, n) } return z } @@ -997,12 +1017,12 @@ func (z *Int) UnmarshalJSON(text []byte) error { return nil } -// MarshalText implements the encoding.TextMarshaler interface +// MarshalText implements the encoding.TextMarshaler interface. func (z *Int) MarshalText() (text []byte, err error) { return []byte(z.String()), nil } -// UnmarshalText implements the encoding.TextUnmarshaler interface +// UnmarshalText implements the encoding.TextUnmarshaler interface. func (z *Int) UnmarshalText(text []byte) error { if _, ok := z.SetString(string(text), 0); !ok { return fmt.Errorf("math/big: cannot unmarshal %q into a *big.Int", text) diff --git a/libgo/go/math/big/int_test.go b/libgo/go/math/big/int_test.go index 299dc72fb1a..6070cf325d2 100644 --- a/libgo/go/math/big/int_test.go +++ b/libgo/go/math/big/int_test.go @@ -787,6 +787,7 @@ var expTests = []struct { {"-5", "0", "", "1"}, {"5", "1", "", "5"}, {"-5", "1", "", "-5"}, + {"-5", "1", "7", "2"}, {"-2", "3", "2", "0"}, {"5", "2", "", "25"}, {"1", "65537", "2", "1"}, @@ -802,6 +803,13 @@ var expTests = []struct { "29834729834729834729347290846729561262544958723956495615629569234729836259263598127342374289365912465901365498236492183464", "23537740700184054162508175125554701713153216681790245129157191391322321508055833908509185839069455749219131480588829346291", }, + // test case for issue 8822 + { + "-0x1BCE04427D8032319A89E5C4136456671AC620883F2C4139E57F91307C485AD2D6204F4F87A58262652DB5DBBAC72B0613E51B835E7153BEC6068F5C8D696B74DBD18FEC316AEF73985CF0475663208EB46B4F17DD9DA55367B03323E5491A70997B90C059FB34809E6EE55BCFBD5F2F52233BFE62E6AA9E4E26A1D4C2439883D14F2633D55D8AA66A1ACD5595E778AC3A280517F1157989E70C1A437B849F1877B779CC3CDDEDE2DAA6594A6C66D181A00A5F777EE60596D8773998F6E988DEAE4CCA60E4DDCF9590543C89F74F603259FCAD71660D30294FBBE6490300F78A9D63FA660DC9417B8B9DDA28BEB3977B621B988E23D4D954F322C3540541BC649ABD504C50FADFD9F0987D58A2BF689313A285E773FF02899A6EF887D1D4A0D2", + "0xB08FFB20760FFED58FADA86DFEF71AD72AA0FA763219618FE022C197E54708BB1191C66470250FCE8879487507CEE41381CA4D932F81C2B3F1AB20B539D50DCD", + "0xAC6BDB41324A9A9BF166DE5E1389582FAF72B6651987EE07FC3192943DB56050A37329CBB4A099ED8193E0757767A13DD52312AB4B03310DCD7F48A9DA04FD50E8083969EDB767B0CF6095179A163AB3661A05FBD5FAAAE82918A9962F0B93B855F97993EC975EEAA80D740ADBF4FF747359D041D5C33EA71D281E446B14773BCA97B43A23FB801676BD207A436C6481F1D2B9078717461A5B9D32E688F87748544523B524B0D57D5EA77A2775D2ECFA032CFBDBF52FB3786160279004E57AE6AF874E7303CE53299CCC041C7BC308D82A5698F3A8D0C38271AE35F8E9DBFBB694B5C803D89F7AE435DE236D525F54759B65E372FCD68EF20FA7111F9E4AFF73", + "21484252197776302499639938883777710321993113097987201050501182909581359357618579566746556372589385361683610524730509041328855066514963385522570894839035884713051640171474186548713546686476761306436434146475140156284389181808675016576845833340494848283681088886584219750554408060556769486628029028720727393293111678826356480455433909233520504112074401376133077150471237549474149190242010469539006449596611576612573955754349042329130631128234637924786466585703488460540228477440853493392086251021228087076124706778899179648655221663765993962724699135217212118535057766739392069738618682722216712319320435674779146070442", + }, } func TestExp(t *testing.T) { @@ -833,12 +841,12 @@ func TestExp(t *testing.T) { } if m == nil { - // the result should be the same as for m == 0; - // specifically, there should be no div-zero panic + // The result should be the same as for m == 0; + // specifically, there should be no div-zero panic. m = &Int{abs: nat{}} // m != nil && len(m.abs) == 0 z2 := new(Int).Exp(x, y, m) if z2.Cmp(z1) != 0 { - t.Errorf("#%d: got %s want %s", i, z1, z2) + t.Errorf("#%d: got %s want %s", i, z2, z1) } } } @@ -1440,24 +1448,40 @@ func TestNot(t *testing.T) { var modInverseTests = []struct { element string - prime string + modulus string }{ - {"1", "7"}, - {"1", "13"}, + {"1234567", "458948883992"}, {"239487239847", "2410312426921032588552076022197566074856950548502459942654116941958108831682612228890093858261341614673227141477904012196503648957050582631942730706805009223062734745341073406696246014589361659774041027169249453200378729434170325843778659198143763193776859869524088940195577346119843545301547043747207749969763750084308926339295559968882457872412993810129130294592999947926365264059284647209730384947211681434464714438488520940127459844288859336526896320919633919"}, } func TestModInverse(t *testing.T) { - var element, prime Int + var element, modulus, gcd, inverse Int one := NewInt(1) for i, test := range modInverseTests { (&element).SetString(test.element, 10) - (&prime).SetString(test.prime, 10) - inverse := new(Int).ModInverse(&element, &prime) - inverse.Mul(inverse, &element) - inverse.Mod(inverse, &prime) - if inverse.Cmp(one) != 0 { - t.Errorf("#%d: failed (e·e^(-1)=%s)", i, inverse) + (&modulus).SetString(test.modulus, 10) + (&inverse).ModInverse(&element, &modulus) + (&inverse).Mul(&inverse, &element) + (&inverse).Mod(&inverse, &modulus) + if (&inverse).Cmp(one) != 0 { + t.Errorf("#%d: failed (e·e^(-1)=%s)", i, &inverse) + } + } + // exhaustive test for small values + for n := 2; n < 100; n++ { + (&modulus).SetInt64(int64(n)) + for x := 1; x < n; x++ { + (&element).SetInt64(int64(x)) + (&gcd).GCD(nil, nil, &element, &modulus) + if (&gcd).Cmp(one) != 0 { + continue + } + (&inverse).ModInverse(&element, &modulus) + (&inverse).Mul(&inverse, &element) + (&inverse).Mod(&inverse, &modulus) + if (&inverse).Cmp(one) != 0 { + t.Errorf("ModInverse(%d,%d)*%d%%%d=%d, not 1", &element, &modulus, &element, &modulus, &inverse) + } } } } diff --git a/libgo/go/math/big/rat.go b/libgo/go/math/big/rat.go index f0973b3902f..c5339fe4431 100644 --- a/libgo/go/math/big/rat.go +++ b/libgo/go/math/big/rat.go @@ -64,28 +64,125 @@ func (z *Rat) SetFloat64(f float64) *Rat { return z.norm() } -// isFinite reports whether f represents a finite rational value. -// It is equivalent to !math.IsNan(f) && !math.IsInf(f, 0). -func isFinite(f float64) bool { - return math.Abs(f) <= math.MaxFloat64 -} +// quotToFloat32 returns the non-negative float32 value +// nearest to the quotient a/b, using round-to-even in +// halfway cases. It does not mutate its arguments. +// Preconditions: b is non-zero; a and b have no common factors. +func quotToFloat32(a, b nat) (f float32, exact bool) { + const ( + // float size in bits + Fsize = 32 + + // mantissa + Msize = 23 + Msize1 = Msize + 1 // incl. implicit 1 + Msize2 = Msize1 + 1 + + // exponent + Esize = Fsize - Msize1 + Ebias = 1<<(Esize-1) - 1 + Emin = 1 - Ebias + Emax = Ebias + ) -// low64 returns the least significant 64 bits of natural number z. -func low64(z nat) uint64 { - if len(z) == 0 { - return 0 + // TODO(adonovan): specialize common degenerate cases: 1.0, integers. + alen := a.bitLen() + if alen == 0 { + return 0, true } - if _W == 32 && len(z) > 1 { - return uint64(z[1])<<32 | uint64(z[0]) + blen := b.bitLen() + if blen == 0 { + panic("division by zero") } - return uint64(z[0]) + + // 1. Left-shift A or B such that quotient A/B is in [1<<Msize1, 1<<(Msize2+1) + // (Msize2 bits if A < B when they are left-aligned, Msize2+1 bits if A >= B). + // This is 2 or 3 more than the float32 mantissa field width of Msize: + // - the optional extra bit is shifted away in step 3 below. + // - the high-order 1 is omitted in "normal" representation; + // - the low-order 1 will be used during rounding then discarded. + exp := alen - blen + var a2, b2 nat + a2 = a2.set(a) + b2 = b2.set(b) + if shift := Msize2 - exp; shift > 0 { + a2 = a2.shl(a2, uint(shift)) + } else if shift < 0 { + b2 = b2.shl(b2, uint(-shift)) + } + + // 2. Compute quotient and remainder (q, r). NB: due to the + // extra shift, the low-order bit of q is logically the + // high-order bit of r. + var q nat + q, r := q.div(a2, a2, b2) // (recycle a2) + mantissa := low32(q) + haveRem := len(r) > 0 // mantissa&1 && !haveRem => remainder is exactly half + + // 3. If quotient didn't fit in Msize2 bits, redo division by b2<<1 + // (in effect---we accomplish this incrementally). + if mantissa>>Msize2 == 1 { + if mantissa&1 == 1 { + haveRem = true + } + mantissa >>= 1 + exp++ + } + if mantissa>>Msize1 != 1 { + panic(fmt.Sprintf("expected exactly %d bits of result", Msize2)) + } + + // 4. Rounding. + if Emin-Msize <= exp && exp <= Emin { + // Denormal case; lose 'shift' bits of precision. + shift := uint(Emin - (exp - 1)) // [1..Esize1) + lostbits := mantissa & (1<<shift - 1) + haveRem = haveRem || lostbits != 0 + mantissa >>= shift + exp = 2 - Ebias // == exp + shift + } + // Round q using round-half-to-even. + exact = !haveRem + if mantissa&1 != 0 { + exact = false + if haveRem || mantissa&2 != 0 { + if mantissa++; mantissa >= 1<<Msize2 { + // Complete rollover 11...1 => 100...0, so shift is safe + mantissa >>= 1 + exp++ + } + } + } + mantissa >>= 1 // discard rounding bit. Mantissa now scaled by 1<<Msize1. + + f = float32(math.Ldexp(float64(mantissa), exp-Msize1)) + if math.IsInf(float64(f), 0) { + exact = false + } + return } -// quotToFloat returns the non-negative IEEE 754 double-precision -// value nearest to the quotient a/b, using round-to-even in halfway -// cases. It does not mutate its arguments. +// quotToFloat64 returns the non-negative float64 value +// nearest to the quotient a/b, using round-to-even in +// halfway cases. It does not mutate its arguments. // Preconditions: b is non-zero; a and b have no common factors. -func quotToFloat(a, b nat) (f float64, exact bool) { +func quotToFloat64(a, b nat) (f float64, exact bool) { + const ( + // float size in bits + Fsize = 64 + + // mantissa + Msize = 52 + Msize1 = Msize + 1 // incl. implicit 1 + Msize2 = Msize1 + 1 + + // exponent + Esize = Fsize - Msize1 + Ebias = 1<<(Esize-1) - 1 + Emin = 1 - Ebias + Emax = Ebias + ) + // TODO(adonovan): specialize common degenerate cases: 1.0, integers. alen := a.bitLen() if alen == 0 { @@ -96,17 +193,17 @@ func quotToFloat(a, b nat) (f float64, exact bool) { panic("division by zero") } - // 1. Left-shift A or B such that quotient A/B is in [1<<53, 1<<55). - // (54 bits if A<B when they are left-aligned, 55 bits if A>=B.) - // This is 2 or 3 more than the float64 mantissa field width of 52: + // 1. Left-shift A or B such that quotient A/B is in [1<<Msize1, 1<<(Msize2+1) + // (Msize2 bits if A < B when they are left-aligned, Msize2+1 bits if A >= B). + // This is 2 or 3 more than the float64 mantissa field width of Msize: // - the optional extra bit is shifted away in step 3 below. - // - the high-order 1 is omitted in float64 "normal" representation; + // - the high-order 1 is omitted in "normal" representation; // - the low-order 1 will be used during rounding then discarded. exp := alen - blen var a2, b2 nat a2 = a2.set(a) b2 = b2.set(b) - if shift := 54 - exp; shift > 0 { + if shift := Msize2 - exp; shift > 0 { a2 = a2.shl(a2, uint(shift)) } else if shift < 0 { b2 = b2.shl(b2, uint(-shift)) @@ -120,49 +217,65 @@ func quotToFloat(a, b nat) (f float64, exact bool) { mantissa := low64(q) haveRem := len(r) > 0 // mantissa&1 && !haveRem => remainder is exactly half - // 3. If quotient didn't fit in 54 bits, re-do division by b2<<1 + // 3. If quotient didn't fit in Msize2 bits, redo division by b2<<1 // (in effect---we accomplish this incrementally). - if mantissa>>54 == 1 { + if mantissa>>Msize2 == 1 { if mantissa&1 == 1 { haveRem = true } mantissa >>= 1 exp++ } - if mantissa>>53 != 1 { - panic("expected exactly 54 bits of result") + if mantissa>>Msize1 != 1 { + panic(fmt.Sprintf("expected exactly %d bits of result", Msize2)) } // 4. Rounding. - if -1022-52 <= exp && exp <= -1022 { + if Emin-Msize <= exp && exp <= Emin { // Denormal case; lose 'shift' bits of precision. - shift := uint64(-1022 - (exp - 1)) // [1..53) + shift := uint(Emin - (exp - 1)) // [1..Esize1) lostbits := mantissa & (1<<shift - 1) haveRem = haveRem || lostbits != 0 mantissa >>= shift - exp = -1023 + 2 + exp = 2 - Ebias // == exp + shift } // Round q using round-half-to-even. exact = !haveRem if mantissa&1 != 0 { exact = false if haveRem || mantissa&2 != 0 { - if mantissa++; mantissa >= 1<<54 { + if mantissa++; mantissa >= 1<<Msize2 { // Complete rollover 11...1 => 100...0, so shift is safe mantissa >>= 1 exp++ } } } - mantissa >>= 1 // discard rounding bit. Mantissa now scaled by 2^53. + mantissa >>= 1 // discard rounding bit. Mantissa now scaled by 1<<Msize1. - f = math.Ldexp(float64(mantissa), exp-53) + f = math.Ldexp(float64(mantissa), exp-Msize1) if math.IsInf(f, 0) { exact = false } return } +// Float32 returns the nearest float32 value for x and a bool indicating +// whether f represents x exactly. If the magnitude of x is too large to +// be represented by a float32, f is an infinity and exact is false. +// The sign of f always matches the sign of x, even if f == 0. +func (x *Rat) Float32() (f float32, exact bool) { + b := x.b.abs + if len(b) == 0 { + b = b.set(natOne) // materialize denominator + } + f, exact = quotToFloat32(x.a.abs, b) + if x.a.neg { + f = -f + } + return +} + // Float64 returns the nearest float64 value for x and a bool indicating // whether f represents x exactly. If the magnitude of x is too large to // be represented by a float64, f is an infinity and exact is false. @@ -172,7 +285,7 @@ func (x *Rat) Float64() (f float64, exact bool) { if len(b) == 0 { b = b.set(natOne) // materialize denominator } - f, exact = quotToFloat(x.a.abs, b) + f, exact = quotToFloat64(x.a.abs, b) if x.a.neg { f = -f } @@ -439,6 +552,9 @@ func (z *Rat) SetString(s string) (*Rat, bool) { if z.b.abs, _, err = z.b.abs.scan(strings.NewReader(s), 10); err != nil { return nil, false } + if len(z.b.abs) == 0 { + return nil, false + } return z.norm(), true } @@ -586,12 +702,12 @@ func (z *Rat) GobDecode(buf []byte) error { return nil } -// MarshalText implements the encoding.TextMarshaler interface +// MarshalText implements the encoding.TextMarshaler interface. func (r *Rat) MarshalText() (text []byte, err error) { return []byte(r.RatString()), nil } -// UnmarshalText implements the encoding.TextUnmarshaler interface +// UnmarshalText implements the encoding.TextUnmarshaler interface. func (r *Rat) UnmarshalText(text []byte) error { if _, ok := r.SetString(string(text)); !ok { return fmt.Errorf("math/big: cannot unmarshal %q into a *big.Rat", text) diff --git a/libgo/go/math/big/rat_test.go b/libgo/go/math/big/rat_test.go index 414a67d419d..5dbbb3510f0 100644 --- a/libgo/go/math/big/rat_test.go +++ b/libgo/go/math/big/rat_test.go @@ -89,6 +89,7 @@ var setStringTests = []struct { {"53/70893980658822810696", "53/70893980658822810696", true}, {"106/141787961317645621392", "53/70893980658822810696", true}, {"204211327800791583.81095", "4084226556015831676219/20000", true}, + {in: "1/0", ok: false}, } func TestRatSetString(t *testing.T) { @@ -751,7 +752,6 @@ var float64inputs = []string{ // http://www.exploringbinary.com/java-hangs-when-converting-2-2250738585072012e-308/ "2.2250738585072012e-308", // http://www.exploringbinary.com/php-hangs-on-numeric-value-2-2250738585072011e-308/ - "2.2250738585072011e-308", // A very large number (initially wrongly parsed by the fast algorithm). @@ -790,6 +790,68 @@ var float64inputs = []string{ "1/3", } +// isFinite reports whether f represents a finite rational value. +// It is equivalent to !math.IsNan(f) && !math.IsInf(f, 0). +func isFinite(f float64) bool { + return math.Abs(f) <= math.MaxFloat64 +} + +func TestFloat32SpecialCases(t *testing.T) { + for _, input := range float64inputs { + if strings.HasPrefix(input, "long:") { + if testing.Short() { + continue + } + input = input[len("long:"):] + } + + r, ok := new(Rat).SetString(input) + if !ok { + t.Errorf("Rat.SetString(%q) failed", input) + continue + } + f, exact := r.Float32() + + // 1. Check string -> Rat -> float32 conversions are + // consistent with strconv.ParseFloat. + // Skip this check if the input uses "a/b" rational syntax. + if !strings.Contains(input, "/") { + e64, _ := strconv.ParseFloat(input, 32) + e := float32(e64) + + // Careful: negative Rats too small for + // float64 become -0, but Rat obviously cannot + // preserve the sign from SetString("-0"). + switch { + case math.Float32bits(e) == math.Float32bits(f): + // Ok: bitwise equal. + case f == 0 && r.Num().BitLen() == 0: + // Ok: Rat(0) is equivalent to both +/- float64(0). + default: + t.Errorf("strconv.ParseFloat(%q) = %g (%b), want %g (%b); delta = %g", input, e, e, f, f, f-e) + } + } + + if !isFinite(float64(f)) { + continue + } + + // 2. Check f is best approximation to r. + if !checkIsBestApprox32(t, f, r) { + // Append context information. + t.Errorf("(input was %q)", input) + } + + // 3. Check f->R->f roundtrip is non-lossy. + checkNonLossyRoundtrip32(t, f) + + // 4. Check exactness using slow algorithm. + if wasExact := new(Rat).SetFloat64(float64(f)).Cmp(r) == 0; wasExact != exact { + t.Errorf("Rat.SetString(%q).Float32().exact = %t, want %t", input, exact, wasExact) + } + } +} + func TestFloat64SpecialCases(t *testing.T) { for _, input := range float64inputs { if strings.HasPrefix(input, "long:") { @@ -830,13 +892,13 @@ func TestFloat64SpecialCases(t *testing.T) { } // 2. Check f is best approximation to r. - if !checkIsBestApprox(t, f, r) { + if !checkIsBestApprox64(t, f, r) { // Append context information. t.Errorf("(input was %q)", input) } // 3. Check f->R->f roundtrip is non-lossy. - checkNonLossyRoundtrip(t, f) + checkNonLossyRoundtrip64(t, f) // 4. Check exactness using slow algorithm. if wasExact := new(Rat).SetFloat64(f).Cmp(r) == 0; wasExact != exact { @@ -845,6 +907,54 @@ func TestFloat64SpecialCases(t *testing.T) { } } +func TestFloat32Distribution(t *testing.T) { + // Generate a distribution of (sign, mantissa, exp) values + // broader than the float32 range, and check Rat.Float32() + // always picks the closest float32 approximation. + var add = []int64{ + 0, + 1, + 3, + 5, + 7, + 9, + 11, + } + var winc, einc = uint64(1), 1 // soak test (~1.5s on x86-64) + if testing.Short() { + winc, einc = 5, 15 // quick test (~60ms on x86-64) + } + + for _, sign := range "+-" { + for _, a := range add { + for wid := uint64(0); wid < 30; wid += winc { + b := 1<<wid + a + if sign == '-' { + b = -b + } + for exp := -150; exp < 150; exp += einc { + num, den := NewInt(b), NewInt(1) + if exp > 0 { + num.Lsh(num, uint(exp)) + } else { + den.Lsh(den, uint(-exp)) + } + r := new(Rat).SetFrac(num, den) + f, _ := r.Float32() + + if !checkIsBestApprox32(t, f, r) { + // Append context information. + t.Errorf("(input was mantissa %#x, exp %d; f = %g (%b); f ~ %g; r = %v)", + b, exp, f, f, math.Ldexp(float64(b), exp), r) + } + + checkNonLossyRoundtrip32(t, f) + } + } + } + } +} + func TestFloat64Distribution(t *testing.T) { // Generate a distribution of (sign, mantissa, exp) values // broader than the float64 range, and check Rat.Float64() @@ -858,7 +968,7 @@ func TestFloat64Distribution(t *testing.T) { 9, 11, } - var winc, einc = uint64(1), int(1) // soak test (~75s on x86-64) + var winc, einc = uint64(1), 1 // soak test (~75s on x86-64) if testing.Short() { winc, einc = 10, 500 // quick test (~12ms on x86-64) } @@ -866,7 +976,7 @@ func TestFloat64Distribution(t *testing.T) { for _, sign := range "+-" { for _, a := range add { for wid := uint64(0); wid < 60; wid += winc { - b := int64(1<<wid + a) + b := 1<<wid + a if sign == '-' { b = -b } @@ -880,20 +990,20 @@ func TestFloat64Distribution(t *testing.T) { r := new(Rat).SetFrac(num, den) f, _ := r.Float64() - if !checkIsBestApprox(t, f, r) { + if !checkIsBestApprox64(t, f, r) { // Append context information. t.Errorf("(input was mantissa %#x, exp %d; f = %g (%b); f ~ %g; r = %v)", b, exp, f, f, math.Ldexp(float64(b), exp), r) } - checkNonLossyRoundtrip(t, f) + checkNonLossyRoundtrip64(t, f) } } } } } -// TestFloat64NonFinite checks that SetFloat64 of a non-finite value +// TestSetFloat64NonFinite checks that SetFloat64 of a non-finite value // returns nil. func TestSetFloat64NonFinite(t *testing.T) { for _, f := range []float64{math.NaN(), math.Inf(+1), math.Inf(-1)} { @@ -904,9 +1014,27 @@ func TestSetFloat64NonFinite(t *testing.T) { } } -// checkNonLossyRoundtrip checks that a float->Rat->float roundtrip is +// checkNonLossyRoundtrip32 checks that a float->Rat->float roundtrip is // non-lossy for finite f. -func checkNonLossyRoundtrip(t *testing.T, f float64) { +func checkNonLossyRoundtrip32(t *testing.T, f float32) { + if !isFinite(float64(f)) { + return + } + r := new(Rat).SetFloat64(float64(f)) + if r == nil { + t.Errorf("Rat.SetFloat64(float64(%g) (%b)) == nil", f, f) + return + } + f2, exact := r.Float32() + if f != f2 || !exact { + t.Errorf("Rat.SetFloat64(float64(%g)).Float32() = %g (%b), %v, want %g (%b), %v; delta = %b", + f, f2, f2, exact, f, f, true, f2-f) + } +} + +// checkNonLossyRoundtrip64 checks that a float->Rat->float roundtrip is +// non-lossy for finite f. +func checkNonLossyRoundtrip64(t *testing.T, f float64) { if !isFinite(f) { return } @@ -928,10 +1056,47 @@ func delta(r *Rat, f float64) *Rat { return d.Abs(d) } -// checkIsBestApprox checks that f is the best possible float64 +// checkIsBestApprox32 checks that f is the best possible float32 +// approximation of r. +// Returns true on success. +func checkIsBestApprox32(t *testing.T, f float32, r *Rat) bool { + if math.Abs(float64(f)) >= math.MaxFloat32 { + // Cannot check +Inf, -Inf, nor the float next to them (MaxFloat32). + // But we have tests for these special cases. + return true + } + + // r must be strictly between f0 and f1, the floats bracketing f. + f0 := math.Nextafter32(f, float32(math.Inf(-1))) + f1 := math.Nextafter32(f, float32(math.Inf(+1))) + + // For f to be correct, r must be closer to f than to f0 or f1. + df := delta(r, float64(f)) + df0 := delta(r, float64(f0)) + df1 := delta(r, float64(f1)) + if df.Cmp(df0) > 0 { + t.Errorf("Rat(%v).Float32() = %g (%b), but previous float32 %g (%b) is closer", r, f, f, f0, f0) + return false + } + if df.Cmp(df1) > 0 { + t.Errorf("Rat(%v).Float32() = %g (%b), but next float32 %g (%b) is closer", r, f, f, f1, f1) + return false + } + if df.Cmp(df0) == 0 && !isEven32(f) { + t.Errorf("Rat(%v).Float32() = %g (%b); halfway should have rounded to %g (%b) instead", r, f, f, f0, f0) + return false + } + if df.Cmp(df1) == 0 && !isEven32(f) { + t.Errorf("Rat(%v).Float32() = %g (%b); halfway should have rounded to %g (%b) instead", r, f, f, f1, f1) + return false + } + return true +} + +// checkIsBestApprox64 checks that f is the best possible float64 // approximation of r. // Returns true on success. -func checkIsBestApprox(t *testing.T, f float64, r *Rat) bool { +func checkIsBestApprox64(t *testing.T, f float64, r *Rat) bool { if math.Abs(f) >= math.MaxFloat64 { // Cannot check +Inf, -Inf, nor the float next to them (MaxFloat64). // But we have tests for these special cases. @@ -954,18 +1119,19 @@ func checkIsBestApprox(t *testing.T, f float64, r *Rat) bool { t.Errorf("Rat(%v).Float64() = %g (%b), but next float64 %g (%b) is closer", r, f, f, f1, f1) return false } - if df.Cmp(df0) == 0 && !isEven(f) { + if df.Cmp(df0) == 0 && !isEven64(f) { t.Errorf("Rat(%v).Float64() = %g (%b); halfway should have rounded to %g (%b) instead", r, f, f, f0, f0) return false } - if df.Cmp(df1) == 0 && !isEven(f) { + if df.Cmp(df1) == 0 && !isEven64(f) { t.Errorf("Rat(%v).Float64() = %g (%b); halfway should have rounded to %g (%b) instead", r, f, f, f1, f1) return false } return true } -func isEven(f float64) bool { return math.Float64bits(f)&1 == 0 } +func isEven32(f float32) bool { return math.Float32bits(f)&1 == 0 } +func isEven64(f float64) bool { return math.Float64bits(f)&1 == 0 } func TestIsFinite(t *testing.T) { finites := []float64{ diff --git a/libgo/go/math/nextafter.go b/libgo/go/math/nextafter.go index 7c4b5bcdfef..bbb139986aa 100644 --- a/libgo/go/math/nextafter.go +++ b/libgo/go/math/nextafter.go @@ -4,12 +4,32 @@ package math -// Nextafter returns the next representable value after x towards y. -// If x == y, then x is returned. -// -// Special cases are: -// Nextafter(NaN, y) = NaN -// Nextafter(x, NaN) = NaN +// Nextafter32 returns the next representable float32 value after x towards y. +// Special cases: +// Nextafter32(x, x) = x +// Nextafter32(NaN, y) = NaN +// Nextafter32(x, NaN) = NaN +func Nextafter32(x, y float32) (r float32) { + switch { + case IsNaN(float64(x)) || IsNaN(float64(y)): // special case + r = float32(NaN()) + case x == y: + r = x + case x == 0: + r = float32(Copysign(float64(Float32frombits(1)), float64(y))) + case (y > x) == (x > 0): + r = Float32frombits(Float32bits(x) + 1) + default: + r = Float32frombits(Float32bits(x) - 1) + } + return +} + +// Nextafter returns the next representable float64 value after x towards y. +// Special cases: +// Nextafter64(x, x) = x +// Nextafter64(NaN, y) = NaN +// Nextafter64(x, NaN) = NaN func Nextafter(x, y float64) (r float64) { switch { case IsNaN(x) || IsNaN(y): // special case diff --git a/libgo/go/math/sqrt.go b/libgo/go/math/sqrt.go index 78475973eb0..56122b59814 100644 --- a/libgo/go/math/sqrt.go +++ b/libgo/go/math/sqrt.go @@ -87,7 +87,7 @@ func Sqrt(x float64) float64 { // // // Notes: Rounding mode detection omitted. The constants "mask", "shift", -// and "bias" are found in src/pkg/math/bits.go +// and "bias" are found in src/math/bits.go // Sqrt returns the square root of x. // diff --git a/libgo/go/mime/multipart/multipart.go b/libgo/go/mime/multipart/multipart.go index 7382efab967..01a667d930d 100644 --- a/libgo/go/mime/multipart/multipart.go +++ b/libgo/go/mime/multipart/multipart.go @@ -90,8 +90,7 @@ func (p *Part) parseContentDisposition() { func NewReader(r io.Reader, boundary string) *Reader { b := []byte("\r\n--" + boundary + "--") return &Reader{ - bufReader: bufio.NewReader(r), - + bufReader: bufio.NewReader(r), nl: b[:2], nlDashBoundary: b[:len(b)-2], dashBoundaryDash: b[2:], diff --git a/libgo/go/mime/multipart/writer_test.go b/libgo/go/mime/multipart/writer_test.go index 52d68bcb68c..ba00c97ecee 100644 --- a/libgo/go/mime/multipart/writer_test.go +++ b/libgo/go/mime/multipart/writer_test.go @@ -111,3 +111,18 @@ func TestWriterSetBoundary(t *testing.T) { t.Errorf("expected my-separator in output. got: %q", got) } } + +func TestWriterBoundaryGoroutines(t *testing.T) { + // Verify there's no data race accessing any lazy boundary if it's used by + // different goroutines. This was previously broken by + // https://codereview.appspot.com/95760043/ and reverted in + // https://codereview.appspot.com/117600043/ + w := NewWriter(ioutil.Discard) + done := make(chan int) + go func() { + w.CreateFormField("foo") + done <- 1 + }() + w.Boundary() + <-done +} diff --git a/libgo/go/mime/type.go b/libgo/go/mime/type.go index 00cff263bad..ffda1f0ce5f 100644 --- a/libgo/go/mime/type.go +++ b/libgo/go/mime/type.go @@ -11,26 +11,41 @@ import ( "sync" ) -var mimeTypes = map[string]string{ - ".css": "text/css; charset=utf-8", - ".gif": "image/gif", - ".htm": "text/html; charset=utf-8", - ".html": "text/html; charset=utf-8", - ".jpg": "image/jpeg", - ".js": "application/x-javascript", - ".pdf": "application/pdf", - ".png": "image/png", - ".xml": "text/xml; charset=utf-8", -} +var ( + mimeLock sync.RWMutex + mimeTypesLower = map[string]string{ + ".css": "text/css; charset=utf-8", + ".gif": "image/gif", + ".htm": "text/html; charset=utf-8", + ".html": "text/html; charset=utf-8", + ".jpg": "image/jpeg", + ".js": "application/x-javascript", + ".pdf": "application/pdf", + ".png": "image/png", + ".xml": "text/xml; charset=utf-8", + } + mimeTypes = clone(mimeTypesLower) +) -var mimeLock sync.RWMutex +func clone(m map[string]string) map[string]string { + m2 := make(map[string]string, len(m)) + for k, v := range m { + m2[k] = v + if strings.ToLower(k) != k { + panic("keys in mimeTypesLower must be lowercase") + } + } + return m2 +} -var once sync.Once +var once sync.Once // guards initMime // TypeByExtension returns the MIME type associated with the file extension ext. // The extension ext should begin with a leading dot, as in ".html". // When ext has no associated type, TypeByExtension returns "". // +// Extensions are looked up first case-sensitively, then case-insensitively. +// // The built-in table is small but on unix it is augmented by the local // system's mime.types file(s) if available under one or more of these // names: @@ -39,23 +54,49 @@ var once sync.Once // /etc/apache2/mime.types // /etc/apache/mime.types // -// Windows system mime types are extracted from registry. +// On Windows, MIME types are extracted from the registry. // // Text types have the charset parameter set to "utf-8" by default. func TypeByExtension(ext string) string { once.Do(initMime) mimeLock.RLock() - typename := mimeTypes[ext] - mimeLock.RUnlock() - return typename + defer mimeLock.RUnlock() + + // Case-sensitive lookup. + v := mimeTypes[ext] + if v != "" { + return v + } + + // Case-insensitive lookup. + // Optimistically assume a short ASCII extension and be + // allocation-free in that case. + var buf [10]byte + lower := buf[:0] + const utf8RuneSelf = 0x80 // from utf8 package, but not importing it. + for i := 0; i < len(ext); i++ { + c := ext[i] + if c >= utf8RuneSelf { + // Slow path. + return mimeTypesLower[strings.ToLower(ext)] + } + if 'A' <= c && c <= 'Z' { + lower = append(lower, c+('a'-'A')) + } else { + lower = append(lower, c) + } + } + // The conversion from []byte to string doesn't allocate in + // a map lookup. + return mimeTypesLower[string(lower)] } // AddExtensionType sets the MIME type associated with -// the extension ext to typ. The extension should begin with +// the extension ext to typ. The extension should begin with // a leading dot, as in ".html". func AddExtensionType(ext, typ string) error { - if ext == "" || ext[0] != '.' { - return fmt.Errorf(`mime: extension "%s" misses dot`, ext) + if !strings.HasPrefix(ext, ".") { + return fmt.Errorf(`mime: extension %q misses dot`, ext) } once.Do(initMime) return setExtensionType(ext, typ) @@ -70,8 +111,11 @@ func setExtensionType(extension, mimeType string) error { param["charset"] = "utf-8" mimeType = FormatMediaType(mimeType, param) } + extLower := strings.ToLower(extension) + mimeLock.Lock() mimeTypes[extension] = mimeType + mimeTypesLower[extLower] = mimeType mimeLock.Unlock() return nil } diff --git a/libgo/go/mime/type_plan9.go b/libgo/go/mime/type_plan9.go index b8f0511ee7d..8cbf6777f19 100644 --- a/libgo/go/mime/type_plan9.go +++ b/libgo/go/mime/type_plan9.go @@ -48,6 +48,6 @@ func initMimeForTests() map[string]string { return map[string]string{ ".t1": "application/test", ".t2": "text/test; charset=utf-8", - ".png": "image/png", + ".pNg": "image/png", } } diff --git a/libgo/go/mime/type_test.go b/libgo/go/mime/type_test.go index 07e1cd5daec..e4ec25450ce 100644 --- a/libgo/go/mime/type_test.go +++ b/libgo/go/mime/type_test.go @@ -4,7 +4,9 @@ package mime -import "testing" +import ( + "testing" +) var typeTests = initMimeForTests() @@ -14,16 +16,40 @@ func TestTypeByExtension(t *testing.T) { if val != want { t.Errorf("TypeByExtension(%q) = %q, want %q", ext, val, want) } - } } -func TestCustomExtension(t *testing.T) { - custom := "text/xml; charset=iso-8859-1" - if error := AddExtensionType(".xml", custom); error != nil { - t.Fatalf("error %s for AddExtension(%s)", error, custom) +func TestTypeByExtensionCase(t *testing.T) { + const custom = "test/test; charset=iso-8859-1" + const caps = "test/test; WAS=ALLCAPS" + if err := AddExtensionType(".TEST", caps); err != nil { + t.Fatalf("error %s for AddExtension(%s)", err, custom) + } + if err := AddExtensionType(".tesT", custom); err != nil { + t.Fatalf("error %s for AddExtension(%s)", err, custom) + } + + // case-sensitive lookup + if got := TypeByExtension(".tesT"); got != custom { + t.Fatalf("for .tesT, got %q; want %q", got, custom) + } + if got := TypeByExtension(".TEST"); got != caps { + t.Fatalf("for .TEST, got %q; want %s", got, caps) } - if registered := TypeByExtension(".xml"); registered != custom { - t.Fatalf("registered %s instead of %s", registered, custom) + + // case-insensitive + if got := TypeByExtension(".TesT"); got != custom { + t.Fatalf("for .TesT, got %q; want %q", got, custom) + } +} + +func TestLookupMallocs(t *testing.T) { + n := testing.AllocsPerRun(10000, func() { + TypeByExtension(".html") + TypeByExtension(".HtML") + }) + // Changed from 0 to 1 for gccgo, pending escape analysis. + if n > 1 { + t.Errorf("allocs = %v; want 0", n) } } diff --git a/libgo/go/mime/type_unix.go b/libgo/go/mime/type_unix.go index 1d394315a49..3e404cf7429 100644 --- a/libgo/go/mime/type_unix.go +++ b/libgo/go/mime/type_unix.go @@ -53,7 +53,7 @@ func initMime() { func initMimeForTests() map[string]string { typeFiles = []string{"testdata/test.types"} return map[string]string{ - ".t1": "application/test", + ".T1": "application/test", ".t2": "text/test; charset=utf-8", ".png": "image/png", } diff --git a/libgo/go/mime/type_windows.go b/libgo/go/mime/type_windows.go index 180f948d16f..ae758d78b31 100644 --- a/libgo/go/mime/type_windows.go +++ b/libgo/go/mime/type_windows.go @@ -58,6 +58,6 @@ func initMime() { func initMimeForTests() map[string]string { return map[string]string{ - ".png": "image/png", + ".PnG": "image/png", } } diff --git a/libgo/go/net/cgo_android.go b/libgo/go/net/cgo_android.go new file mode 100644 index 00000000000..3819ce56a4f --- /dev/null +++ b/libgo/go/net/cgo_android.go @@ -0,0 +1,14 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build cgo,!netgo + +package net + +//#include <netdb.h> +import "C" + +func cgoAddrInfoFlags() C.int { + return C.AI_CANONNAME +} diff --git a/libgo/go/net/cgo_linux.go b/libgo/go/net/cgo_linux.go index 77522f9141b..0e332261acc 100644 --- a/libgo/go/net/cgo_linux.go +++ b/libgo/go/net/cgo_linux.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build cgo,!netgo +// +build !android,cgo,!netgo package net diff --git a/libgo/go/net/conn_test.go b/libgo/go/net/conn_test.go index 37bb4e2c071..9c9d1a8057d 100644 --- a/libgo/go/net/conn_test.go +++ b/libgo/go/net/conn_test.go @@ -38,7 +38,7 @@ func TestConnAndListener(t *testing.T) { } case "unixpacket": switch runtime.GOOS { - case "darwin", "nacl", "openbsd", "plan9", "windows": + case "android", "darwin", "nacl", "openbsd", "plan9", "windows": continue case "freebsd": // FreeBSD 8 doesn't support unixpacket continue diff --git a/libgo/go/net/dial.go b/libgo/go/net/dial.go index 93569c253cd..e6f0436cdd3 100644 --- a/libgo/go/net/dial.go +++ b/libgo/go/net/dial.go @@ -118,9 +118,8 @@ func resolveAddr(op, net, addr string, deadline time.Time) (netaddr, error) { // "unixpacket". // // For TCP and UDP networks, addresses have the form host:port. -// If host is a literal IPv6 address or host name, it must be enclosed -// in square brackets as in "[::1]:80", "[ipv6-host]:http" or -// "[ipv6-host%zone]:80". +// If host is a literal IPv6 address it must be enclosed +// in square brackets as in "[::1]:80" or "[ipv6-host%zone]:80". // The functions JoinHostPort and SplitHostPort manipulate addresses // in this form. // @@ -214,14 +213,12 @@ func dialMulti(net, addr string, la Addr, ras addrList, deadline time.Time) (Con nracers := len(ras) for nracers > 0 { sig <- true - select { - case racer := <-lane: - if racer.error == nil { - return racer.Conn, nil - } - lastErr = racer.error - nracers-- + racer := <-lane + if racer.error == nil { + return racer.Conn, nil } + lastErr = racer.error + nracers-- } return nil, lastErr } diff --git a/libgo/go/net/dial_test.go b/libgo/go/net/dial_test.go index f9260fd281b..42898d669f7 100644 --- a/libgo/go/net/dial_test.go +++ b/libgo/go/net/dial_test.go @@ -119,6 +119,7 @@ func TestSelfConnect(t *testing.T) { // TODO(brainman): do not know why it hangs. t.Skip("skipping known-broken test on windows") } + // Test that Dial does not honor self-connects. // See the comment in DialTCP. @@ -149,8 +150,12 @@ func TestSelfConnect(t *testing.T) { for i := 0; i < n; i++ { c, err := DialTimeout("tcp", addr, time.Millisecond) if err == nil { + if c.LocalAddr().String() == addr { + t.Errorf("#%d: Dial %q self-connect", i, addr) + } else { + t.Logf("#%d: Dial %q succeeded - possibly racing with other listener", i, addr) + } c.Close() - t.Errorf("#%d: Dial %q succeeded", i, addr) } } } @@ -334,6 +339,8 @@ func numTCP() (ntcp, nopen, nclose int, err error) { } func TestDialMultiFDLeak(t *testing.T) { + t.Skip("flaky test - golang.org/issue/8764") + if !supportsIPv4 || !supportsIPv6 { t.Skip("neither ipv4 nor ipv6 is supported") } @@ -460,6 +467,11 @@ func TestDialer(t *testing.T) { } func TestDialDualStackLocalhost(t *testing.T) { + switch runtime.GOOS { + case "nacl": + t.Skipf("skipping test on %q", runtime.GOOS) + } + if ips, err := LookupIP("localhost"); err != nil { t.Fatalf("LookupIP failed: %v", err) } else if len(ips) < 2 || !supportsIPv4 || !supportsIPv6 { @@ -488,7 +500,7 @@ func TestDialDualStackLocalhost(t *testing.T) { } d := &Dialer{DualStack: true} - for _ = range dss.lns { + for range dss.lns { if c, err := d.Dial("tcp", "localhost:"+dss.port); err != nil { t.Errorf("Dial failed: %v", err) } else { diff --git a/libgo/go/net/dnsclient.go b/libgo/go/net/dnsclient.go index 9bffa11f916..e8014e4ffc9 100644 --- a/libgo/go/net/dnsclient.go +++ b/libgo/go/net/dnsclient.go @@ -196,9 +196,7 @@ func (addrs byPriorityWeight) shuffleByWeight() { s += int(addrs[i].Weight) if s > n { if i > 0 { - t := addrs[i] - copy(addrs[1:i+1], addrs[0:i]) - addrs[0] = t + addrs[0], addrs[i] = addrs[i], addrs[0] } break } diff --git a/libgo/go/net/dnsclient_unix.go b/libgo/go/net/dnsclient_unix.go index 3713efd0e3c..7511083f795 100644 --- a/libgo/go/net/dnsclient_unix.go +++ b/libgo/go/net/dnsclient_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris +// +build darwin dragonfly freebsd linux netbsd openbsd solaris // DNS client: see RFC 1035. // Has to be linked into package net for Dial. @@ -16,6 +16,7 @@ package net import ( + "errors" "io" "math/rand" "os" @@ -23,118 +24,174 @@ import ( "time" ) -// Send a request on the connection and hope for a reply. -// Up to cfg.attempts attempts. -func exchange(cfg *dnsConfig, c Conn, name string, qtype uint16) (*dnsMsg, error) { - _, useTCP := c.(*TCPConn) - if len(name) >= 256 { - return nil, &DNSError{Err: "name too long", Name: name} +// A dnsConn represents a DNS transport endpoint. +type dnsConn interface { + Conn + + // readDNSResponse reads a DNS response message from the DNS + // transport endpoint and returns the received DNS response + // message. + readDNSResponse() (*dnsMsg, error) + + // writeDNSQuery writes a DNS query message to the DNS + // connection endpoint. + writeDNSQuery(*dnsMsg) error +} + +func (c *UDPConn) readDNSResponse() (*dnsMsg, error) { + b := make([]byte, 512) // see RFC 1035 + n, err := c.Read(b) + if err != nil { + return nil, err } - out := new(dnsMsg) - out.id = uint16(rand.Int()) ^ uint16(time.Now().UnixNano()) - out.question = []dnsQuestion{ - {name, qtype, dnsClassINET}, + msg := &dnsMsg{} + if !msg.Unpack(b[:n]) { + return nil, errors.New("cannot unmarshal DNS message") } - out.recursion_desired = true - msg, ok := out.Pack() + return msg, nil +} + +func (c *UDPConn) writeDNSQuery(msg *dnsMsg) error { + b, ok := msg.Pack() if !ok { - return nil, &DNSError{Err: "internal error - cannot pack message", Name: name} + return errors.New("cannot marshal DNS message") } - if useTCP { - mlen := uint16(len(msg)) - msg = append([]byte{byte(mlen >> 8), byte(mlen)}, msg...) + if _, err := c.Write(b); err != nil { + return err } - for attempt := 0; attempt < cfg.attempts; attempt++ { - n, err := c.Write(msg) + return nil +} + +func (c *TCPConn) readDNSResponse() (*dnsMsg, error) { + b := make([]byte, 1280) // 1280 is a reasonable initial size for IP over Ethernet, see RFC 4035 + if _, err := io.ReadFull(c, b[:2]); err != nil { + return nil, err + } + l := int(b[0])<<8 | int(b[1]) + if l > len(b) { + b = make([]byte, l) + } + n, err := io.ReadFull(c, b[:l]) + if err != nil { + return nil, err + } + msg := &dnsMsg{} + if !msg.Unpack(b[:n]) { + return nil, errors.New("cannot unmarshal DNS message") + } + return msg, nil +} + +func (c *TCPConn) writeDNSQuery(msg *dnsMsg) error { + b, ok := msg.Pack() + if !ok { + return errors.New("cannot marshal DNS message") + } + l := uint16(len(b)) + b = append([]byte{byte(l >> 8), byte(l)}, b...) + if _, err := c.Write(b); err != nil { + return err + } + return nil +} + +func (d *Dialer) dialDNS(network, server string) (dnsConn, error) { + switch network { + case "tcp", "tcp4", "tcp6", "udp", "udp4", "udp6": + default: + return nil, UnknownNetworkError(network) + } + // Calling Dial here is scary -- we have to be sure not to + // dial a name that will require a DNS lookup, or Dial will + // call back here to translate it. The DNS config parser has + // already checked that all the cfg.servers[i] are IP + // addresses, which Dial will use without a DNS lookup. + c, err := d.Dial(network, server) + if err != nil { + return nil, err + } + switch network { + case "tcp", "tcp4", "tcp6": + return c.(*TCPConn), nil + case "udp", "udp4", "udp6": + return c.(*UDPConn), nil + } + panic("unreachable") +} + +// exchange sends a query on the connection and hopes for a response. +func exchange(server, name string, qtype uint16, timeout time.Duration) (*dnsMsg, error) { + d := Dialer{Timeout: timeout} + out := dnsMsg{ + dnsMsgHdr: dnsMsgHdr{ + recursion_desired: true, + }, + question: []dnsQuestion{ + {name, qtype, dnsClassINET}, + }, + } + for _, network := range []string{"udp", "tcp"} { + c, err := d.dialDNS(network, server) if err != nil { return nil, err } - - if cfg.timeout == 0 { - c.SetReadDeadline(noDeadline) - } else { - c.SetReadDeadline(time.Now().Add(time.Duration(cfg.timeout) * time.Second)) + defer c.Close() + if timeout > 0 { + c.SetDeadline(time.Now().Add(timeout)) } - buf := make([]byte, 2000) - if useTCP { - n, err = io.ReadFull(c, buf[:2]) - if err != nil { - if e, ok := err.(Error); ok && e.Timeout() { - continue - } - } - mlen := int(buf[0])<<8 | int(buf[1]) - if mlen > len(buf) { - buf = make([]byte, mlen) - } - n, err = io.ReadFull(c, buf[:mlen]) - } else { - n, err = c.Read(buf) + out.id = uint16(rand.Int()) ^ uint16(time.Now().UnixNano()) + if err := c.writeDNSQuery(&out); err != nil { + return nil, err } + in, err := c.readDNSResponse() if err != nil { - if e, ok := err.(Error); ok && e.Timeout() { - continue - } return nil, err } - buf = buf[:n] - in := new(dnsMsg) - if !in.Unpack(buf) || in.id != out.id { + if in.id != out.id { + return nil, errors.New("DNS message ID mismatch") + } + if in.truncated { // see RFC 5966 continue } return in, nil } - var server string - if a := c.RemoteAddr(); a != nil { - server = a.String() - } - return nil, &DNSError{Err: "no answer from server", Name: name, Server: server, IsTimeout: true} + return nil, errors.New("no answer from DNS server") } // Do a lookup for a single name, which must be rooted // (otherwise answer will not find the answers). -func tryOneName(cfg *dnsConfig, name string, qtype uint16) (cname string, addrs []dnsRR, err error) { +func tryOneName(cfg *dnsConfig, name string, qtype uint16) (string, []dnsRR, error) { if len(cfg.servers) == 0 { return "", nil, &DNSError{Err: "no DNS servers", Name: name} } - for i := 0; i < len(cfg.servers); i++ { - // Calling Dial here is scary -- we have to be sure - // not to dial a name that will require a DNS lookup, - // or Dial will call back here to translate it. - // The DNS config parser has already checked that - // all the cfg.servers[i] are IP addresses, which - // Dial will use without a DNS lookup. - server := cfg.servers[i] + ":53" - c, cerr := Dial("udp", server) - if cerr != nil { - err = cerr - continue - } - msg, merr := exchange(cfg, c, name, qtype) - c.Close() - if merr != nil { - err = merr - continue - } - if msg.truncated { // see RFC 5966 - c, cerr = Dial("tcp", server) - if cerr != nil { - err = cerr + if len(name) >= 256 { + return "", nil, &DNSError{Err: "DNS name too long", Name: name} + } + timeout := time.Duration(cfg.timeout) * time.Second + var lastErr error + for i := 0; i < cfg.attempts; i++ { + for _, server := range cfg.servers { + server = JoinHostPort(server, "53") + msg, err := exchange(server, name, qtype, timeout) + if err != nil { + lastErr = &DNSError{ + Err: err.Error(), + Name: name, + Server: server, + } + if nerr, ok := err.(Error); ok && nerr.Timeout() { + lastErr.(*DNSError).IsTimeout = true + } continue } - msg, merr = exchange(cfg, c, name, qtype) - c.Close() - if merr != nil { - err = merr - continue + cname, addrs, err := answer(name, server, msg, qtype) + if err == nil || err.(*DNSError).Err == noSuchHost { + return cname, addrs, err } - } - cname, addrs, err = answer(name, server, msg, qtype) - if err == nil || err.(*DNSError).Err == noSuchHost { - break + lastErr = err } } - return + return "", nil, lastErr } func convertRR_A(records []dnsRR) []IP { @@ -240,13 +297,10 @@ func lookup(name string, qtype uint16) (cname string, addrs []dnsRR, err error) } // Can try as ordinary name. cname, addrs, err = tryOneName(cfg.dnsConfig, rname, qtype) - if err == nil { + if rooted || err == nil { return } } - if rooted { - return - } // Otherwise, try suffixes. for i := 0; i < len(cfg.dnsConfig.search); i++ { @@ -260,15 +314,15 @@ func lookup(name string, qtype uint16) (cname string, addrs []dnsRR, err error) } } - // Last ditch effort: try unsuffixed. - rname := name - if !rooted { - rname += "." - } - cname, addrs, err = tryOneName(cfg.dnsConfig, rname, qtype) - if err == nil { - return + // Last ditch effort: try unsuffixed only if we haven't already, + // that is, name is not rooted and has less than ndots dots. + if count(name, '.') < cfg.dnsConfig.ndots { + cname, addrs, err = tryOneName(cfg.dnsConfig, name+".", qtype) + if err == nil { + return + } } + if e, ok := err.(*DNSError); ok { // Show original name passed to lookup, not suffixed one. // In general we might have tried many suffixes; showing @@ -320,31 +374,36 @@ func goLookupIP(name string) (addrs []IP, err error) { return } } - var records []dnsRR - var cname string - var err4, err6 error - cname, records, err4 = lookup(name, dnsTypeA) - addrs = convertRR_A(records) - if cname != "" { - name = cname - } - _, records, err6 = lookup(name, dnsTypeAAAA) - if err4 != nil && err6 == nil { - // Ignore A error because AAAA lookup succeeded. - err4 = nil + type racer struct { + qtype uint16 + rrs []dnsRR + error } - if err6 != nil && len(addrs) > 0 { - // Ignore AAAA error because A lookup succeeded. - err6 = nil + lane := make(chan racer, 1) + qtypes := [...]uint16{dnsTypeA, dnsTypeAAAA} + for _, qtype := range qtypes { + go func(qtype uint16) { + _, rrs, err := lookup(name, qtype) + lane <- racer{qtype, rrs, err} + }(qtype) } - if err4 != nil { - return nil, err4 + var lastErr error + for range qtypes { + racer := <-lane + if racer.error != nil { + lastErr = racer.error + continue + } + switch racer.qtype { + case dnsTypeA: + addrs = append(addrs, convertRR_A(racer.rrs)...) + case dnsTypeAAAA: + addrs = append(addrs, convertRR_AAAA(racer.rrs)...) + } } - if err6 != nil { - return nil, err6 + if len(addrs) == 0 && lastErr != nil { + return nil, lastErr } - - addrs = append(addrs, convertRR_AAAA(records)...) return addrs, nil } diff --git a/libgo/go/net/dnsclient_unix_test.go b/libgo/go/net/dnsclient_unix_test.go index 2350142d610..1167c26b39d 100644 --- a/libgo/go/net/dnsclient_unix_test.go +++ b/libgo/go/net/dnsclient_unix_test.go @@ -16,19 +16,79 @@ import ( "time" ) -func TestTCPLookup(t *testing.T) { +var dnsTransportFallbackTests = []struct { + server string + name string + qtype uint16 + timeout int + rcode int +}{ + // Querying "com." with qtype=255 usually makes an answer + // which requires more than 512 bytes. + {"8.8.8.8:53", "com.", dnsTypeALL, 2, dnsRcodeSuccess}, + {"8.8.4.4:53", "com.", dnsTypeALL, 4, dnsRcodeSuccess}, +} + +func TestDNSTransportFallback(t *testing.T) { if testing.Short() || !*testExternal { t.Skip("skipping test to avoid external network") } - c, err := Dial("tcp", "8.8.8.8:53") - if err != nil { - t.Fatalf("Dial failed: %v", err) + + for _, tt := range dnsTransportFallbackTests { + timeout := time.Duration(tt.timeout) * time.Second + msg, err := exchange(tt.server, tt.name, tt.qtype, timeout) + if err != nil { + t.Error(err) + continue + } + switch msg.rcode { + case tt.rcode, dnsRcodeServerFailure: + default: + t.Errorf("got %v from %v; want %v", msg.rcode, tt.server, tt.rcode) + continue + } } - defer c.Close() - cfg := &dnsConfig{timeout: 10, attempts: 3} - _, err = exchange(cfg, c, "com.", dnsTypeALL) - if err != nil { - t.Fatalf("exchange failed: %v", err) +} + +// See RFC 6761 for further information about the reserved, pseudo +// domain names. +var specialDomainNameTests = []struct { + name string + qtype uint16 + rcode int +}{ + // Name resoltion APIs and libraries should not recongnize the + // followings as special. + {"1.0.168.192.in-addr.arpa.", dnsTypePTR, dnsRcodeNameError}, + {"test.", dnsTypeALL, dnsRcodeNameError}, + {"example.com.", dnsTypeALL, dnsRcodeSuccess}, + + // Name resoltion APIs and libraries should recongnize the + // followings as special and should not send any queries. + // Though, we test those names here for verifying nagative + // answers at DNS query-response interaction level. + {"localhost.", dnsTypeALL, dnsRcodeNameError}, + {"invalid.", dnsTypeALL, dnsRcodeNameError}, +} + +func TestSpecialDomainName(t *testing.T) { + if testing.Short() || !*testExternal { + t.Skip("skipping test to avoid external network") + } + + server := "8.8.8.8:53" + for _, tt := range specialDomainNameTests { + msg, err := exchange(server, tt.name, tt.qtype, 0) + if err != nil { + t.Error(err) + continue + } + switch msg.rcode { + case tt.rcode, dnsRcodeServerFailure: + default: + t.Errorf("got %v from %v; want %v", msg.rcode, server, tt.rcode) + continue + } } } @@ -144,7 +204,7 @@ func TestReloadResolvConfChange(t *testing.T) { if _, err := goLookupIP("golang.org"); err != nil { t.Fatalf("goLookupIP(good) failed: %v", err) } - r.WantServers([]string{"[8.8.8.8]"}) + r.WantServers([]string{"8.8.8.8"}) // Using a bad resolv.conf when we had a good one // before should not update the config @@ -155,5 +215,32 @@ func TestReloadResolvConfChange(t *testing.T) { // A new good config should get picked up r.SetConf("nameserver 8.8.4.4") - r.WantServers([]string{"[8.8.4.4]"}) + r.WantServers([]string{"8.8.4.4"}) +} + +func BenchmarkGoLookupIP(b *testing.B) { + for i := 0; i < b.N; i++ { + goLookupIP("www.example.com") + } +} + +func BenchmarkGoLookupIPNoSuchHost(b *testing.B) { + for i := 0; i < b.N; i++ { + goLookupIP("some.nonexistent") + } +} + +func BenchmarkGoLookupIPWithBrokenNameServer(b *testing.B) { + onceLoadConfig.Do(loadDefaultConfig) + if cfg.dnserr != nil || cfg.dnsConfig == nil { + b.Fatalf("loadConfig failed: %v", cfg.dnserr) + } + // This looks ugly but it's safe as long as benchmarks are run + // sequentially in package testing. + orig := cfg.dnsConfig + cfg.dnsConfig.servers = append([]string{"203.0.113.254"}, cfg.dnsConfig.servers...) // use TEST-NET-3 block, see RFC 5737 + for i := 0; i < b.N; i++ { + goLookupIP("www.example.com") + } + cfg.dnsConfig = orig } diff --git a/libgo/go/net/dnsconfig_unix.go b/libgo/go/net/dnsconfig_unix.go index db45716f124..66ab7c4dd30 100644 --- a/libgo/go/net/dnsconfig_unix.go +++ b/libgo/go/net/dnsconfig_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris +// +build darwin dragonfly freebsd linux netbsd openbsd solaris // Read system DNS config from /etc/resolv.conf @@ -25,13 +25,12 @@ func dnsReadConfig(filename string) (*dnsConfig, error) { if err != nil { return nil, &DNSConfigError{err} } - conf := new(dnsConfig) - conf.servers = make([]string, 0, 3) // small, but the standard limit - conf.search = make([]string, 0) - conf.ndots = 1 - conf.timeout = 5 - conf.attempts = 2 - conf.rotate = false + defer file.close() + conf := &dnsConfig{ + ndots: 1, + timeout: 5, + attempts: 2, + } for line, ok := file.readLine(); ok; line, ok = file.readLine() { f := getFields(line) if len(f) < 1 { @@ -39,30 +38,20 @@ func dnsReadConfig(filename string) (*dnsConfig, error) { } switch f[0] { case "nameserver": // add one name server - a := conf.servers - n := len(a) - if len(f) > 1 && n < cap(a) { + if len(f) > 1 && len(conf.servers) < 3 { // small, but the standard limit // One more check: make sure server name is // just an IP address. Otherwise we need DNS // to look it up. - name := f[1] - switch len(ParseIP(name)) { - case 16: - name = "[" + name + "]" - fallthrough - case 4: - a = a[0 : n+1] - a[n] = name - conf.servers = a + if parseIPv4(f[1]) != nil { + conf.servers = append(conf.servers, f[1]) + } else if ip, _ := parseIPv6(f[1], true); ip != nil { + conf.servers = append(conf.servers, f[1]) } } case "domain": // set search path to just this domain if len(f) > 1 { - conf.search = make([]string, 1) - conf.search[0] = f[1] - } else { - conf.search = make([]string, 0) + conf.search = []string{f[1]} } case "search": // set search path to given servers @@ -99,8 +88,6 @@ func dnsReadConfig(filename string) (*dnsConfig, error) { } } } - file.close() - return conf, nil } diff --git a/libgo/go/net/dnsconfig_unix_test.go b/libgo/go/net/dnsconfig_unix_test.go index 37ed4931dbe..94fb0c32e24 100644 --- a/libgo/go/net/dnsconfig_unix_test.go +++ b/libgo/go/net/dnsconfig_unix_test.go @@ -6,41 +6,64 @@ package net -import "testing" +import ( + "reflect" + "testing" +) -func TestDNSReadConfig(t *testing.T) { - dnsConfig, err := dnsReadConfig("testdata/resolv.conf") - if err != nil { - t.Fatal(err) - } - - if len(dnsConfig.servers) != 1 { - t.Errorf("len(dnsConfig.servers) = %d; want %d", len(dnsConfig.servers), 1) - } - if dnsConfig.servers[0] != "[192.168.1.1]" { - t.Errorf("dnsConfig.servers[0] = %s; want %s", dnsConfig.servers[0], "[192.168.1.1]") - } - - if len(dnsConfig.search) != 1 { - t.Errorf("len(dnsConfig.search) = %d; want %d", len(dnsConfig.search), 1) - } - if dnsConfig.search[0] != "Home" { - t.Errorf("dnsConfig.search[0] = %s; want %s", dnsConfig.search[0], "Home") - } - - if dnsConfig.ndots != 5 { - t.Errorf("dnsConfig.ndots = %d; want %d", dnsConfig.ndots, 5) - } - - if dnsConfig.timeout != 10 { - t.Errorf("dnsConfig.timeout = %d; want %d", dnsConfig.timeout, 10) - } - - if dnsConfig.attempts != 3 { - t.Errorf("dnsConfig.attempts = %d; want %d", dnsConfig.attempts, 3) - } +var dnsReadConfigTests = []struct { + name string + conf dnsConfig +}{ + { + name: "testdata/resolv.conf", + conf: dnsConfig{ + servers: []string{"8.8.8.8", "2001:4860:4860::8888", "fe80::1%lo0"}, + search: []string{"localdomain"}, + ndots: 5, + timeout: 10, + attempts: 3, + rotate: true, + }, + }, + { + name: "testdata/domain-resolv.conf", + conf: dnsConfig{ + servers: []string{"8.8.8.8"}, + search: []string{"localdomain"}, + ndots: 1, + timeout: 5, + attempts: 2, + }, + }, + { + name: "testdata/search-resolv.conf", + conf: dnsConfig{ + servers: []string{"8.8.8.8"}, + search: []string{"test", "invalid"}, + ndots: 1, + timeout: 5, + attempts: 2, + }, + }, + { + name: "testdata/empty-resolv.conf", + conf: dnsConfig{ + ndots: 1, + timeout: 5, + attempts: 2, + }, + }, +} - if dnsConfig.rotate != true { - t.Errorf("dnsConfig.rotate = %t; want %t", dnsConfig.rotate, true) +func TestDNSReadConfig(t *testing.T) { + for _, tt := range dnsReadConfigTests { + conf, err := dnsReadConfig(tt.name) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(conf, &tt.conf) { + t.Errorf("got %v; want %v", conf, &tt.conf) + } } } diff --git a/libgo/go/net/fd_unix.go b/libgo/go/net/fd_unix.go index 7c73ddca777..7a97faeba34 100644 --- a/libgo/go/net/fd_unix.go +++ b/libgo/go/net/fd_unix.go @@ -401,7 +401,7 @@ func (fd *netFD) writeMsg(p []byte, oob []byte, sa syscall.Sockaddr) (n int, oob return } -func (fd *netFD) accept(toAddr func(syscall.Sockaddr) Addr) (netfd *netFD, err error) { +func (fd *netFD) accept() (netfd *netFD, err error) { if err := fd.readLock(); err != nil { return nil, err } @@ -438,7 +438,7 @@ func (fd *netFD) accept(toAddr func(syscall.Sockaddr) Addr) (netfd *netFD, err e return nil, err } lsa, _ := syscall.Getsockname(netfd.sysfd) - netfd.setAddr(toAddr(lsa), toAddr(rsa)) + netfd.setAddr(netfd.addrFunc()(lsa), netfd.addrFunc()(rsa)) return netfd, nil } diff --git a/libgo/go/net/fd_windows.go b/libgo/go/net/fd_windows.go index d1129dccc47..f3a534a1de0 100644 --- a/libgo/go/net/fd_windows.go +++ b/libgo/go/net/fd_windows.go @@ -294,6 +294,18 @@ func (fd *netFD) init() error { fd.skipSyncNotif = true } } + // Disable SIO_UDP_CONNRESET behavior. + // http://support.microsoft.com/kb/263823 + switch fd.net { + case "udp", "udp4", "udp6": + ret := uint32(0) + flag := uint32(0) + size := uint32(unsafe.Sizeof(flag)) + err := syscall.WSAIoctl(fd.sysfd, syscall.SIO_UDP_CONNRESET, (*byte)(unsafe.Pointer(&flag)), size, nil, 0, &ret, nil, 0) + if err != nil { + return os.NewSyscallError("WSAIoctl", err) + } + } fd.rop.mode = 'r' fd.wop.mode = 'w' fd.rop.fd = fd @@ -520,7 +532,7 @@ func (fd *netFD) writeTo(buf []byte, sa syscall.Sockaddr) (int, error) { }) } -func (fd *netFD) acceptOne(toAddr func(syscall.Sockaddr) Addr, rawsa []syscall.RawSockaddrAny, o *operation) (*netFD, error) { +func (fd *netFD) acceptOne(rawsa []syscall.RawSockaddrAny, o *operation) (*netFD, error) { // Get new socket. s, err := sysSocket(fd.family, fd.sotype, 0) if err != nil { @@ -559,7 +571,7 @@ func (fd *netFD) acceptOne(toAddr func(syscall.Sockaddr) Addr, rawsa []syscall.R return netfd, nil } -func (fd *netFD) accept(toAddr func(syscall.Sockaddr) Addr) (*netFD, error) { +func (fd *netFD) accept() (*netFD, error) { if err := fd.readLock(); err != nil { return nil, err } @@ -570,7 +582,7 @@ func (fd *netFD) accept(toAddr func(syscall.Sockaddr) Addr) (*netFD, error) { var err error var rawsa [2]syscall.RawSockaddrAny for { - netfd, err = fd.acceptOne(toAddr, rawsa[:], o) + netfd, err = fd.acceptOne(rawsa[:], o) if err == nil { break } @@ -603,7 +615,7 @@ func (fd *netFD) accept(toAddr func(syscall.Sockaddr) Addr) (*netFD, error) { lsa, _ := lrsa.Sockaddr() rsa, _ := rrsa.Sockaddr() - netfd.setAddr(toAddr(lsa), toAddr(rsa)) + netfd.setAddr(netfd.addrFunc()(lsa), netfd.addrFunc()(rsa)) return netfd, nil } diff --git a/libgo/go/net/file_stub.go b/libgo/go/net/file_stub.go new file mode 100644 index 00000000000..4281072ef93 --- /dev/null +++ b/libgo/go/net/file_stub.go @@ -0,0 +1,38 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl + +package net + +import ( + "os" + "syscall" +) + +// FileConn returns a copy of the network connection corresponding to +// the open file f. It is the caller's responsibility to close f when +// finished. Closing c does not affect f, and closing f does not +// affect c. +func FileConn(f *os.File) (c Conn, err error) { + return nil, syscall.ENOPROTOOPT + +} + +// FileListener returns a copy of the network listener corresponding +// to the open file f. It is the caller's responsibility to close l +// when finished. Closing l does not affect f, and closing f does not +// affect l. +func FileListener(f *os.File) (l Listener, err error) { + return nil, syscall.ENOPROTOOPT + +} + +// FilePacketConn returns a copy of the packet network connection +// corresponding to the open file f. It is the caller's +// responsibility to close f when finished. Closing c does not affect +// f, and closing f does not affect c. +func FilePacketConn(f *os.File) (c PacketConn, err error) { + return nil, syscall.ENOPROTOOPT +} diff --git a/libgo/go/net/file_test.go b/libgo/go/net/file_test.go index d81bca78249..6fab06a9c6e 100644 --- a/libgo/go/net/file_test.go +++ b/libgo/go/net/file_test.go @@ -89,7 +89,7 @@ var fileListenerTests = []struct { func TestFileListener(t *testing.T) { switch runtime.GOOS { - case "windows": + case "nacl", "windows": t.Skipf("skipping test on %q", runtime.GOOS) } diff --git a/libgo/go/net/file_unix.go b/libgo/go/net/file_unix.go index 07b3ecf6263..214a4196c8e 100644 --- a/libgo/go/net/file_unix.go +++ b/libgo/go/net/file_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris +// +build darwin dragonfly freebsd linux netbsd openbsd solaris package net diff --git a/libgo/go/net/hosts.go b/libgo/go/net/hosts.go index e6674ba3415..9400503e41e 100644 --- a/libgo/go/net/hosts.go +++ b/libgo/go/net/hosts.go @@ -51,7 +51,7 @@ func readHosts() { } } // Update the data cache. - hosts.expire = time.Now().Add(cacheMaxAge) + hosts.expire = now.Add(cacheMaxAge) hosts.path = hp hosts.byName = hs hosts.byAddr = is diff --git a/libgo/go/net/http/client.go b/libgo/go/net/http/client.go index a5a3abe6138..ce884d1f07b 100644 --- a/libgo/go/net/http/client.go +++ b/libgo/go/net/http/client.go @@ -101,6 +101,30 @@ type RoundTripper interface { // return true if the string includes a port. func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } +// refererForURL returns a referer without any authentication info or +// an empty string if lastReq scheme is https and newReq scheme is http. +func refererForURL(lastReq, newReq *url.URL) string { + // https://tools.ietf.org/html/rfc7231#section-5.5.2 + // "Clients SHOULD NOT include a Referer header field in a + // (non-secure) HTTP request if the referring page was + // transferred with a secure protocol." + if lastReq.Scheme == "https" && newReq.Scheme == "http" { + return "" + } + referer := lastReq.String() + if lastReq.User != nil { + // This is not very efficient, but is the best we can + // do without: + // - introducing a new method on URL + // - creating a race condition + // - copying the URL struct manually, which would cause + // maintenance problems down the line + auth := lastReq.User.String() + "@" + referer = strings.Replace(referer, auth, "", 1) + } + return referer +} + // Used in Send to implement io.ReadCloser by bundling together the // bufio.Reader through which we read the response, and the underlying // network connection. @@ -324,8 +348,8 @@ func (c *Client) doFollowingRedirects(ireq *Request, shouldRedirect func(int) bo if len(via) > 0 { // Add the Referer header. lastReq := via[len(via)-1] - if lastReq.URL.Scheme != "https" { - nreq.Header.Set("Referer", lastReq.URL.String()) + if ref := refererForURL(lastReq.URL, nreq.URL); ref != "" { + nreq.Header.Set("Referer", ref) } err = redirectChecker(nreq, via) diff --git a/libgo/go/net/http/client_test.go b/libgo/go/net/http/client_test.go index 6392c1baf39..56b6563c486 100644 --- a/libgo/go/net/http/client_test.go +++ b/libgo/go/net/http/client_test.go @@ -1036,3 +1036,40 @@ func TestClientTrailers(t *testing.T) { t.Errorf("Response trailers = %#v; want %#v", res.Trailer, want) } } + +func TestReferer(t *testing.T) { + tests := []struct { + lastReq, newReq string // from -> to URLs + want string + }{ + // don't send user: + {"http://gopher@test.com", "http://link.com", "http://test.com"}, + {"https://gopher@test.com", "https://link.com", "https://test.com"}, + + // don't send a user and password: + {"http://gopher:go@test.com", "http://link.com", "http://test.com"}, + {"https://gopher:go@test.com", "https://link.com", "https://test.com"}, + + // nothing to do: + {"http://test.com", "http://link.com", "http://test.com"}, + {"https://test.com", "https://link.com", "https://test.com"}, + + // https to http doesn't send a referer: + {"https://test.com", "http://link.com", ""}, + {"https://gopher:go@test.com", "http://link.com", ""}, + } + for _, tt := range tests { + l, err := url.Parse(tt.lastReq) + if err != nil { + t.Fatal(err) + } + n, err := url.Parse(tt.newReq) + if err != nil { + t.Fatal(err) + } + r := ExportRefererForURL(l, n) + if r != tt.want { + t.Errorf("refererForURL(%q, %q) = %q; want %q", tt.lastReq, tt.newReq, r, tt.want) + } + } +} diff --git a/libgo/go/net/http/cookie.go b/libgo/go/net/http/cookie.go index dc60ba87f5f..a0d0fdbbd07 100644 --- a/libgo/go/net/http/cookie.go +++ b/libgo/go/net/http/cookie.go @@ -56,7 +56,7 @@ func readSetCookies(h Header) []*Cookie { if !isCookieNameValid(name) { continue } - value, success := parseCookieValue(value) + value, success := parseCookieValue(value, true) if !success { continue } @@ -76,7 +76,7 @@ func readSetCookies(h Header) []*Cookie { attr, val = attr[:j], attr[j+1:] } lowerAttr := strings.ToLower(attr) - val, success = parseCookieValue(val) + val, success = parseCookieValue(val, false) if !success { c.Unparsed = append(c.Unparsed, parts[i]) continue @@ -205,7 +205,7 @@ func readCookies(h Header, filter string) []*Cookie { if filter != "" && filter != name { continue } - val, success := parseCookieValue(val) + val, success := parseCookieValue(val, true) if !success { continue } @@ -345,9 +345,9 @@ func sanitizeOrWarn(fieldName string, valid func(byte) bool, v string) string { return string(buf) } -func parseCookieValue(raw string) (string, bool) { +func parseCookieValue(raw string, allowDoubleQuote bool) (string, bool) { // Strip the quotes, if present. - if len(raw) > 1 && raw[0] == '"' && raw[len(raw)-1] == '"' { + if allowDoubleQuote && len(raw) > 1 && raw[0] == '"' && raw[len(raw)-1] == '"' { raw = raw[1 : len(raw)-1] } for i := 0; i < len(raw); i++ { diff --git a/libgo/go/net/http/cookie_test.go b/libgo/go/net/http/cookie_test.go index f78f37299f4..98dc2fade0d 100644 --- a/libgo/go/net/http/cookie_test.go +++ b/libgo/go/net/http/cookie_test.go @@ -313,6 +313,14 @@ var readCookiesTests = []struct { {Name: "c2", Value: "v2"}, }, }, + { + Header{"Cookie": {`Cookie-1="v$1"; c2="v2"`}}, + "", + []*Cookie{ + {Name: "Cookie-1", Value: "v$1"}, + {Name: "c2", Value: "v2"}, + }, + }, } func TestReadCookies(t *testing.T) { @@ -327,6 +335,30 @@ func TestReadCookies(t *testing.T) { } } +func TestSetCookieDoubleQuotes(t *testing.T) { + res := &Response{Header: Header{}} + res.Header.Add("Set-Cookie", `quoted0=none; max-age=30`) + res.Header.Add("Set-Cookie", `quoted1="cookieValue"; max-age=31`) + res.Header.Add("Set-Cookie", `quoted2=cookieAV; max-age="32"`) + res.Header.Add("Set-Cookie", `quoted3="both"; max-age="33"`) + got := res.Cookies() + want := []*Cookie{ + {Name: "quoted0", Value: "none", MaxAge: 30}, + {Name: "quoted1", Value: "cookieValue", MaxAge: 31}, + {Name: "quoted2", Value: "cookieAV"}, + {Name: "quoted3", Value: "both"}, + } + if len(got) != len(want) { + t.Fatal("got %d cookies, want %d", len(got), len(want)) + } + for i, w := range want { + g := got[i] + if g.Name != w.Name || g.Value != w.Value || g.MaxAge != w.MaxAge { + t.Errorf("cookie #%d:\ngot %v\nwant %v", i, g, w) + } + } +} + func TestCookieSanitizeValue(t *testing.T) { defer log.SetOutput(os.Stderr) var logbuf bytes.Buffer diff --git a/libgo/go/net/http/cookiejar/jar.go b/libgo/go/net/http/cookiejar/jar.go index 389ab58e418..0e0fac9286e 100644 --- a/libgo/go/net/http/cookiejar/jar.go +++ b/libgo/go/net/http/cookiejar/jar.go @@ -30,7 +30,7 @@ import ( // set a cookie for bar.com. // // A public suffix list implementation is in the package -// code.google.com/p/go.net/publicsuffix. +// golang.org/x/net/publicsuffix. type PublicSuffixList interface { // PublicSuffix returns the public suffix of domain. // diff --git a/libgo/go/net/http/export_test.go b/libgo/go/net/http/export_test.go index 960563b2409..87b6c0773aa 100644 --- a/libgo/go/net/http/export_test.go +++ b/libgo/go/net/http/export_test.go @@ -9,6 +9,7 @@ package http import ( "net" + "net/url" "time" ) @@ -57,6 +58,26 @@ func (t *Transport) IdleConnChMapSizeForTesting() int { return len(t.idleConnCh) } +func (t *Transport) IsIdleForTesting() bool { + t.idleMu.Lock() + defer t.idleMu.Unlock() + return t.wantIdle +} + +func (t *Transport) RequestIdleConnChForTesting() { + t.getIdleConnCh(connectMethod{nil, "http", "example.com"}) +} + +func (t *Transport) PutIdleTestConn() bool { + c, _ := net.Pipe() + return t.putIdleConn(&persistConn{ + t: t, + conn: c, // dummy + closech: make(chan struct{}), // so it can be closed + cacheKey: connectMethodKey{"", "http", "example.com"}, + }) +} + func NewTestTimeoutHandler(handler Handler, ch <-chan time.Time) Handler { f := func() <-chan time.Time { return ch @@ -66,7 +87,22 @@ func NewTestTimeoutHandler(handler Handler, ch <-chan time.Time) Handler { func ResetCachedEnvironment() { httpProxyEnv.reset() + httpsProxyEnv.reset() noProxyEnv.reset() } var DefaultUserAgent = defaultUserAgent + +func ExportRefererForURL(lastReq, newReq *url.URL) string { + return refererForURL(lastReq, newReq) +} + +// SetPendingDialHooks sets the hooks that run before and after handling +// pending dials. +func SetPendingDialHooks(before, after func()) { + prePendingDial, postPendingDial = before, after +} + +var ExportServerNewConn = (*Server).newConn + +var ExportCloseWriteAndWait = (*conn).closeWriteAndWait diff --git a/libgo/go/net/http/fs.go b/libgo/go/net/http/fs.go index 8576cf844a3..e322f710a5d 100644 --- a/libgo/go/net/http/fs.go +++ b/libgo/go/net/http/fs.go @@ -22,8 +22,12 @@ import ( "time" ) -// A Dir implements http.FileSystem using the native file -// system restricted to a specific directory tree. +// A Dir implements FileSystem using the native file system restricted to a +// specific directory tree. +// +// While the FileSystem.Open method takes '/'-separated paths, a Dir's string +// value is a filename on the native file system, not a URL, so it is separated +// by filepath.Separator, which isn't necessarily '/'. // // An empty Dir is treated as ".". type Dir string @@ -139,7 +143,7 @@ func serveContent(w ResponseWriter, r *Request, name string, modtime time.Time, if checkLastModified(w, r, modtime) { return } - rangeReq, done := checkETag(w, r) + rangeReq, done := checkETag(w, r, modtime) if done { return } @@ -212,12 +216,6 @@ func serveContent(w ResponseWriter, r *Request, name string, modtime time.Time, code = StatusPartialContent w.Header().Set("Content-Range", ra.contentRange(size)) case len(ranges) > 1: - for _, ra := range ranges { - if ra.start > size { - Error(w, err.Error(), StatusRequestedRangeNotSatisfiable) - return - } - } sendSize = rangesMIMESize(ranges, ctype, size) code = StatusPartialContent @@ -281,11 +279,14 @@ func checkLastModified(w ResponseWriter, r *Request, modtime time.Time) bool { } // checkETag implements If-None-Match and If-Range checks. -// The ETag must have been previously set in the ResponseWriter's headers. +// +// The ETag or modtime must have been previously set in the +// ResponseWriter's headers. The modtime is only compared at second +// granularity and may be the zero value to mean unknown. // // The return value is the effective request "Range" header to use and // whether this request is now considered done. -func checkETag(w ResponseWriter, r *Request) (rangeReq string, done bool) { +func checkETag(w ResponseWriter, r *Request, modtime time.Time) (rangeReq string, done bool) { etag := w.Header().get("Etag") rangeReq = r.Header.get("Range") @@ -296,11 +297,17 @@ func checkETag(w ResponseWriter, r *Request) (rangeReq string, done bool) { // We only support ETag versions. // The caller must have set the ETag on the response already. if ir := r.Header.get("If-Range"); ir != "" && ir != etag { - // TODO(bradfitz): handle If-Range requests with Last-Modified - // times instead of ETags? I'd rather not, at least for - // now. That seems like a bug/compromise in the RFC 2616, and - // I've never heard of anybody caring about that (yet). - rangeReq = "" + // The If-Range value is typically the ETag value, but it may also be + // the modtime date. See golang.org/issue/8367. + timeMatches := false + if !modtime.IsZero() { + if t, err := ParseTime(ir); err == nil && t.Unix() == modtime.Unix() { + timeMatches = true + } + } + if !timeMatches { + rangeReq = "" + } } if inm := r.Header.get("If-None-Match"); inm != "" { @@ -378,7 +385,7 @@ func serveFile(w ResponseWriter, r *Request, fs FileSystem, name string, redirec // use contents of index.html for directory, if present if d.IsDir() { - index := name + indexPage + index := strings.TrimSuffix(name, "/") + indexPage ff, err := fs.Open(index) if err == nil { defer ff.Close() @@ -400,7 +407,7 @@ func serveFile(w ResponseWriter, r *Request, fs FileSystem, name string, redirec return } - // serverContent will check modification time + // serveContent will check modification time sizeFunc := func() (int64, error) { return d.Size(), nil } serveContent(w, r, d.Name(), d.ModTime(), sizeFunc, f) } diff --git a/libgo/go/net/http/fs_test.go b/libgo/go/net/http/fs_test.go index c9a77c9f6aa..2ddd4ca5fe9 100644 --- a/libgo/go/net/http/fs_test.go +++ b/libgo/go/net/http/fs_test.go @@ -721,6 +721,28 @@ func TestServeContent(t *testing.T) { wantStatus: 200, wantContentType: "text/css; charset=utf-8", }, + "range_with_modtime": { + file: "testdata/style.css", + modtime: time.Date(2014, 6, 25, 17, 12, 18, 0 /* nanos */, time.UTC), + reqHeader: map[string]string{ + "Range": "bytes=0-4", + "If-Range": "Wed, 25 Jun 2014 17:12:18 GMT", + }, + wantStatus: StatusPartialContent, + wantContentType: "text/css; charset=utf-8", + wantLastMod: "Wed, 25 Jun 2014 17:12:18 GMT", + }, + "range_with_modtime_nanos": { + file: "testdata/style.css", + modtime: time.Date(2014, 6, 25, 17, 12, 18, 123 /* nanos */, time.UTC), + reqHeader: map[string]string{ + "Range": "bytes=0-4", + "If-Range": "Wed, 25 Jun 2014 17:12:18 GMT", + }, + wantStatus: StatusPartialContent, + wantContentType: "text/css; charset=utf-8", + wantLastMod: "Wed, 25 Jun 2014 17:12:18 GMT", + }, } for testName, tt := range tests { var content io.ReadSeeker @@ -860,4 +882,41 @@ func TestLinuxSendfileChild(*testing.T) { } } +func TestFileServerCleanPath(t *testing.T) { + tests := []struct { + path string + wantCode int + wantOpen []string + }{ + {"/", 200, []string{"/", "/index.html"}}, + {"/dir", 301, []string{"/dir"}}, + {"/dir/", 200, []string{"/dir", "/dir/index.html"}}, + } + for _, tt := range tests { + var log []string + rr := httptest.NewRecorder() + req, _ := NewRequest("GET", "http://foo.localhost"+tt.path, nil) + FileServer(fileServerCleanPathDir{&log}).ServeHTTP(rr, req) + if !reflect.DeepEqual(log, tt.wantOpen) { + t.Logf("For %s: Opens = %q; want %q", tt.path, log, tt.wantOpen) + } + if rr.Code != tt.wantCode { + t.Logf("For %s: Response code = %d; want %d", tt.path, rr.Code, tt.wantCode) + } + } +} + +type fileServerCleanPathDir struct { + log *[]string +} + +func (d fileServerCleanPathDir) Open(path string) (File, error) { + *(d.log) = append(*(d.log), path) + if path == "/" || path == "/dir" || path == "/dir/" { + // Just return back something that's a directory. + return Dir(".").Open(".") + } + return nil, os.ErrNotExist +} + type panicOnSeek struct{ io.ReadSeeker } diff --git a/libgo/go/net/http/httptest/server.go b/libgo/go/net/http/httptest/server.go index 7f265552f52..789e7bf41e6 100644 --- a/libgo/go/net/http/httptest/server.go +++ b/libgo/go/net/http/httptest/server.go @@ -203,7 +203,7 @@ func (h *waitGroupHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // localhostCert is a PEM-encoded TLS cert with SAN IPs // "127.0.0.1" and "[::1]", expiring at the last second of 2049 (the end // of ASN.1 time). -// generated from src/pkg/crypto/tls: +// generated from src/crypto/tls: // go run generate_cert.go --rsa-bits 512 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h var localhostCert = []byte(`-----BEGIN CERTIFICATE----- MIIBdzCCASOgAwIBAgIBADALBgkqhkiG9w0BAQUwEjEQMA4GA1UEChMHQWNtZSBD diff --git a/libgo/go/net/http/httptest/server_test.go b/libgo/go/net/http/httptest/server_test.go index 4fc4c702082..500a9f0b800 100644 --- a/libgo/go/net/http/httptest/server_test.go +++ b/libgo/go/net/http/httptest/server_test.go @@ -8,7 +8,6 @@ import ( "io/ioutil" "net/http" "testing" - "time" ) func TestServer(t *testing.T) { @@ -28,26 +27,3 @@ func TestServer(t *testing.T) { t.Errorf("got %q, want hello", string(got)) } } - -func TestIssue7264(t *testing.T) { - t.Skip("broken test - removed at tip") - for i := 0; i < 1000; i++ { - func() { - inHandler := make(chan bool, 1) - ts := NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - inHandler <- true - })) - defer ts.Close() - tr := &http.Transport{ - ResponseHeaderTimeout: time.Nanosecond, - } - defer tr.CloseIdleConnections() - c := &http.Client{Transport: tr} - res, err := c.Get(ts.URL) - <-inHandler - if err == nil { - res.Body.Close() - } - }() - } -} diff --git a/libgo/go/net/http/httputil/chunked.go b/libgo/go/net/http/httputil/chunked.go deleted file mode 100644 index 9632bfd19d5..00000000000 --- a/libgo/go/net/http/httputil/chunked.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// The wire protocol for HTTP's "chunked" Transfer-Encoding. - -// This code is duplicated in net/http and net/http/httputil. -// Please make any changes in both files. - -package httputil - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" -) - -const maxLineLength = 4096 // assumed <= bufio.defaultBufSize - -var ErrLineTooLong = errors.New("header line too long") - -// newChunkedReader returns a new chunkedReader that translates the data read from r -// out of HTTP "chunked" format before returning it. -// The chunkedReader returns io.EOF when the final 0-length chunk is read. -// -// newChunkedReader is not needed by normal applications. The http package -// automatically decodes chunking when reading response bodies. -func newChunkedReader(r io.Reader) io.Reader { - br, ok := r.(*bufio.Reader) - if !ok { - br = bufio.NewReader(r) - } - return &chunkedReader{r: br} -} - -type chunkedReader struct { - r *bufio.Reader - n uint64 // unread bytes in chunk - err error - buf [2]byte -} - -func (cr *chunkedReader) beginChunk() { - // chunk-size CRLF - var line []byte - line, cr.err = readLine(cr.r) - if cr.err != nil { - return - } - cr.n, cr.err = parseHexUint(line) - if cr.err != nil { - return - } - if cr.n == 0 { - cr.err = io.EOF - } -} - -func (cr *chunkedReader) chunkHeaderAvailable() bool { - n := cr.r.Buffered() - if n > 0 { - peek, _ := cr.r.Peek(n) - return bytes.IndexByte(peek, '\n') >= 0 - } - return false -} - -func (cr *chunkedReader) Read(b []uint8) (n int, err error) { - for cr.err == nil { - if cr.n == 0 { - if n > 0 && !cr.chunkHeaderAvailable() { - // We've read enough. Don't potentially block - // reading a new chunk header. - break - } - cr.beginChunk() - continue - } - if len(b) == 0 { - break - } - rbuf := b - if uint64(len(rbuf)) > cr.n { - rbuf = rbuf[:cr.n] - } - var n0 int - n0, cr.err = cr.r.Read(rbuf) - n += n0 - b = b[n0:] - cr.n -= uint64(n0) - // If we're at the end of a chunk, read the next two - // bytes to verify they are "\r\n". - if cr.n == 0 && cr.err == nil { - if _, cr.err = io.ReadFull(cr.r, cr.buf[:2]); cr.err == nil { - if cr.buf[0] != '\r' || cr.buf[1] != '\n' { - cr.err = errors.New("malformed chunked encoding") - } - } - } - } - return n, cr.err -} - -// Read a line of bytes (up to \n) from b. -// Give up if the line exceeds maxLineLength. -// The returned bytes are a pointer into storage in -// the bufio, so they are only valid until the next bufio read. -func readLine(b *bufio.Reader) (p []byte, err error) { - if p, err = b.ReadSlice('\n'); err != nil { - // We always know when EOF is coming. - // If the caller asked for a line, there should be a line. - if err == io.EOF { - err = io.ErrUnexpectedEOF - } else if err == bufio.ErrBufferFull { - err = ErrLineTooLong - } - return nil, err - } - if len(p) >= maxLineLength { - return nil, ErrLineTooLong - } - return trimTrailingWhitespace(p), nil -} - -func trimTrailingWhitespace(b []byte) []byte { - for len(b) > 0 && isASCIISpace(b[len(b)-1]) { - b = b[:len(b)-1] - } - return b -} - -func isASCIISpace(b byte) bool { - return b == ' ' || b == '\t' || b == '\n' || b == '\r' -} - -// newChunkedWriter returns a new chunkedWriter that translates writes into HTTP -// "chunked" format before writing them to w. Closing the returned chunkedWriter -// sends the final 0-length chunk that marks the end of the stream. -// -// newChunkedWriter is not needed by normal applications. The http -// package adds chunking automatically if handlers don't set a -// Content-Length header. Using newChunkedWriter inside a handler -// would result in double chunking or chunking with a Content-Length -// length, both of which are wrong. -func newChunkedWriter(w io.Writer) io.WriteCloser { - return &chunkedWriter{w} -} - -// Writing to chunkedWriter translates to writing in HTTP chunked Transfer -// Encoding wire format to the underlying Wire chunkedWriter. -type chunkedWriter struct { - Wire io.Writer -} - -// Write the contents of data as one chunk to Wire. -// NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has -// a bug since it does not check for success of io.WriteString -func (cw *chunkedWriter) Write(data []byte) (n int, err error) { - - // Don't send 0-length data. It looks like EOF for chunked encoding. - if len(data) == 0 { - return 0, nil - } - - if _, err = fmt.Fprintf(cw.Wire, "%x\r\n", len(data)); err != nil { - return 0, err - } - if n, err = cw.Wire.Write(data); err != nil { - return - } - if n != len(data) { - err = io.ErrShortWrite - return - } - _, err = io.WriteString(cw.Wire, "\r\n") - - return -} - -func (cw *chunkedWriter) Close() error { - _, err := io.WriteString(cw.Wire, "0\r\n") - return err -} - -func parseHexUint(v []byte) (n uint64, err error) { - for _, b := range v { - n <<= 4 - switch { - case '0' <= b && b <= '9': - b = b - '0' - case 'a' <= b && b <= 'f': - b = b - 'a' + 10 - case 'A' <= b && b <= 'F': - b = b - 'A' + 10 - default: - return 0, errors.New("invalid byte in chunk length") - } - n |= uint64(b) - } - return -} diff --git a/libgo/go/net/http/httputil/chunked_test.go b/libgo/go/net/http/httputil/chunked_test.go deleted file mode 100644 index a7a57746885..00000000000 --- a/libgo/go/net/http/httputil/chunked_test.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code is duplicated in net/http and net/http/httputil. -// Please make any changes in both files. - -package httputil - -import ( - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "strings" - "testing" -) - -func TestChunk(t *testing.T) { - var b bytes.Buffer - - w := newChunkedWriter(&b) - const chunk1 = "hello, " - const chunk2 = "world! 0123456789abcdef" - w.Write([]byte(chunk1)) - w.Write([]byte(chunk2)) - w.Close() - - if g, e := b.String(), "7\r\nhello, \r\n17\r\nworld! 0123456789abcdef\r\n0\r\n"; g != e { - t.Fatalf("chunk writer wrote %q; want %q", g, e) - } - - r := newChunkedReader(&b) - data, err := ioutil.ReadAll(r) - if err != nil { - t.Logf(`data: "%s"`, data) - t.Fatalf("ReadAll from reader: %v", err) - } - if g, e := string(data), chunk1+chunk2; g != e { - t.Errorf("chunk reader read %q; want %q", g, e) - } -} - -func TestChunkReadMultiple(t *testing.T) { - // Bunch of small chunks, all read together. - { - var b bytes.Buffer - w := newChunkedWriter(&b) - w.Write([]byte("foo")) - w.Write([]byte("bar")) - w.Close() - - r := newChunkedReader(&b) - buf := make([]byte, 10) - n, err := r.Read(buf) - if n != 6 || err != io.EOF { - t.Errorf("Read = %d, %v; want 6, EOF", n, err) - } - buf = buf[:n] - if string(buf) != "foobar" { - t.Errorf("Read = %q; want %q", buf, "foobar") - } - } - - // One big chunk followed by a little chunk, but the small bufio.Reader size - // should prevent the second chunk header from being read. - { - var b bytes.Buffer - w := newChunkedWriter(&b) - // fillBufChunk is 11 bytes + 3 bytes header + 2 bytes footer = 16 bytes, - // the same as the bufio ReaderSize below (the minimum), so even - // though we're going to try to Read with a buffer larger enough to also - // receive "foo", the second chunk header won't be read yet. - const fillBufChunk = "0123456789a" - const shortChunk = "foo" - w.Write([]byte(fillBufChunk)) - w.Write([]byte(shortChunk)) - w.Close() - - r := newChunkedReader(bufio.NewReaderSize(&b, 16)) - buf := make([]byte, len(fillBufChunk)+len(shortChunk)) - n, err := r.Read(buf) - if n != len(fillBufChunk) || err != nil { - t.Errorf("Read = %d, %v; want %d, nil", n, err, len(fillBufChunk)) - } - buf = buf[:n] - if string(buf) != fillBufChunk { - t.Errorf("Read = %q; want %q", buf, fillBufChunk) - } - - n, err = r.Read(buf) - if n != len(shortChunk) || err != io.EOF { - t.Errorf("Read = %d, %v; want %d, EOF", n, err, len(shortChunk)) - } - } - - // And test that we see an EOF chunk, even though our buffer is already full: - { - r := newChunkedReader(bufio.NewReader(strings.NewReader("3\r\nfoo\r\n0\r\n"))) - buf := make([]byte, 3) - n, err := r.Read(buf) - if n != 3 || err != io.EOF { - t.Errorf("Read = %d, %v; want 3, EOF", n, err) - } - if string(buf) != "foo" { - t.Errorf("buf = %q; want foo", buf) - } - } -} - -func TestChunkReaderAllocs(t *testing.T) { - if testing.Short() { - t.Skip("skipping in short mode") - } - var buf bytes.Buffer - w := newChunkedWriter(&buf) - a, b, c := []byte("aaaaaa"), []byte("bbbbbbbbbbbb"), []byte("cccccccccccccccccccccccc") - w.Write(a) - w.Write(b) - w.Write(c) - w.Close() - - readBuf := make([]byte, len(a)+len(b)+len(c)+1) - byter := bytes.NewReader(buf.Bytes()) - bufr := bufio.NewReader(byter) - mallocs := testing.AllocsPerRun(100, func() { - byter.Seek(0, 0) - bufr.Reset(byter) - r := newChunkedReader(bufr) - n, err := io.ReadFull(r, readBuf) - if n != len(readBuf)-1 { - t.Fatalf("read %d bytes; want %d", n, len(readBuf)-1) - } - if err != io.ErrUnexpectedEOF { - t.Fatalf("read error = %v; want ErrUnexpectedEOF", err) - } - }) - if mallocs > 1.5 { - t.Errorf("mallocs = %v; want 1", mallocs) - } -} - -func TestParseHexUint(t *testing.T) { - for i := uint64(0); i <= 1234; i++ { - line := []byte(fmt.Sprintf("%x", i)) - got, err := parseHexUint(line) - if err != nil { - t.Fatalf("on %d: %v", i, err) - } - if got != i { - t.Errorf("for input %q = %d; want %d", line, got, i) - } - } - _, err := parseHexUint([]byte("bogus")) - if err == nil { - t.Error("expected error on bogus input") - } -} diff --git a/libgo/go/net/http/httputil/dump.go b/libgo/go/net/http/httputil/dump.go index 2a7a413d01a..ac8f103f9b9 100644 --- a/libgo/go/net/http/httputil/dump.go +++ b/libgo/go/net/http/httputil/dump.go @@ -95,19 +95,27 @@ func DumpRequestOut(req *http.Request, body bool) ([]byte, error) { // with a dummy response. var buf bytes.Buffer // records the output pr, pw := io.Pipe() + defer pr.Close() + defer pw.Close() dr := &delegateReader{c: make(chan io.Reader)} // Wait for the request before replying with a dummy response: go func() { - http.ReadRequest(bufio.NewReader(pr)) + req, err := http.ReadRequest(bufio.NewReader(pr)) + if err == nil { + // Ensure all the body is read; otherwise + // we'll get a partial dump. + io.Copy(ioutil.Discard, req.Body) + req.Body.Close() + } dr.c <- strings.NewReader("HTTP/1.1 204 No Content\r\n\r\n") }() t := &http.Transport{ + DisableKeepAlives: true, Dial: func(net, addr string) (net.Conn, error) { return &dumpConn{io.MultiWriter(&buf, pw), dr}, nil }, } - defer t.CloseIdleConnections() _, err := t.RoundTrip(reqSend) diff --git a/libgo/go/net/http/httputil/dump_test.go b/libgo/go/net/http/httputil/dump_test.go index e1ffb3935ac..024ee5a86f4 100644 --- a/libgo/go/net/http/httputil/dump_test.go +++ b/libgo/go/net/http/httputil/dump_test.go @@ -111,6 +111,30 @@ var dumpTests = []dumpTest{ NoBody: true, }, + + // Request with Body > 8196 (default buffer size) + { + Req: http.Request{ + Method: "POST", + URL: &url.URL{ + Scheme: "http", + Host: "post.tld", + Path: "/", + }, + ContentLength: 8193, + ProtoMajor: 1, + ProtoMinor: 1, + }, + + Body: bytes.Repeat([]byte("a"), 8193), + + WantDumpOut: "POST / HTTP/1.1\r\n" + + "Host: post.tld\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Content-Length: 8193\r\n" + + "Accept-Encoding: gzip\r\n\r\n" + + strings.Repeat("a", 8193), + }, } func TestDumpRequest(t *testing.T) { @@ -125,6 +149,8 @@ func TestDumpRequest(t *testing.T) { tt.Req.Body = ioutil.NopCloser(bytes.NewReader(b)) case func() io.ReadCloser: tt.Req.Body = b() + default: + t.Fatalf("Test %d: unsupported Body of %T", i, tt.Body) } } setBody() @@ -159,7 +185,9 @@ func TestDumpRequest(t *testing.T) { } } if dg := runtime.NumGoroutine() - numg0; dg > 4 { - t.Errorf("Unexpectedly large number of new goroutines: %d new", dg) + buf := make([]byte, 4096) + buf = buf[:runtime.Stack(buf, true)] + t.Errorf("Unexpectedly large number of new goroutines: %d new: %s", dg, buf) } } diff --git a/libgo/go/net/http/httputil/httputil.go b/libgo/go/net/http/httputil/httputil.go index 74fb6c6556f..2e523e9e269 100644 --- a/libgo/go/net/http/httputil/httputil.go +++ b/libgo/go/net/http/httputil/httputil.go @@ -6,7 +6,10 @@ // more common ones in the net/http package. package httputil -import "io" +import ( + "io" + "net/http/internal" +) // NewChunkedReader returns a new chunkedReader that translates the data read from r // out of HTTP "chunked" format before returning it. @@ -15,7 +18,7 @@ import "io" // NewChunkedReader is not needed by normal applications. The http package // automatically decodes chunking when reading response bodies. func NewChunkedReader(r io.Reader) io.Reader { - return newChunkedReader(r) + return internal.NewChunkedReader(r) } // NewChunkedWriter returns a new chunkedWriter that translates writes into HTTP @@ -28,5 +31,9 @@ func NewChunkedReader(r io.Reader) io.Reader { // would result in double chunking or chunking with a Content-Length // length, both of which are wrong. func NewChunkedWriter(w io.Writer) io.WriteCloser { - return newChunkedWriter(w) + return internal.NewChunkedWriter(w) } + +// ErrLineTooLong is returned when reading malformed chunked data +// with lines that are too long. +var ErrLineTooLong = internal.ErrLineTooLong diff --git a/libgo/go/net/http/httputil/reverseproxy.go b/libgo/go/net/http/httputil/reverseproxy.go index 48ada5f5fdb..ab463701803 100644 --- a/libgo/go/net/http/httputil/reverseproxy.go +++ b/libgo/go/net/http/httputil/reverseproxy.go @@ -40,6 +40,12 @@ type ReverseProxy struct { // response body. // If zero, no periodic flushing is done. FlushInterval time.Duration + + // ErrorLog specifies an optional logger for errors + // that occur when attempting to proxy the request. + // If nil, logging goes to os.Stderr via the log package's + // standard logger. + ErrorLog *log.Logger } func singleJoiningSlash(a, b string) string { @@ -138,7 +144,7 @@ func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) { res, err := transport.RoundTrip(outreq) if err != nil { - log.Printf("http: proxy error: %v", err) + p.logf("http: proxy error: %v", err) rw.WriteHeader(http.StatusInternalServerError) return } @@ -171,6 +177,14 @@ func (p *ReverseProxy) copyResponse(dst io.Writer, src io.Reader) { io.Copy(dst, src) } +func (p *ReverseProxy) logf(format string, args ...interface{}) { + if p.ErrorLog != nil { + p.ErrorLog.Printf(format, args...) + } else { + log.Printf(format, args...) + } +} + type writeFlusher interface { io.Writer http.Flusher diff --git a/libgo/go/net/http/chunked.go b/libgo/go/net/http/internal/chunked.go index 749f29d3269..9294deb3e5e 100644 --- a/libgo/go/net/http/chunked.go +++ b/libgo/go/net/http/internal/chunked.go @@ -4,10 +4,9 @@ // The wire protocol for HTTP's "chunked" Transfer-Encoding. -// This code is duplicated in net/http and net/http/httputil. -// Please make any changes in both files. - -package http +// Package internal contains HTTP internals shared by net/http and +// net/http/httputil. +package internal import ( "bufio" @@ -21,13 +20,13 @@ const maxLineLength = 4096 // assumed <= bufio.defaultBufSize var ErrLineTooLong = errors.New("header line too long") -// newChunkedReader returns a new chunkedReader that translates the data read from r +// NewChunkedReader returns a new chunkedReader that translates the data read from r // out of HTTP "chunked" format before returning it. // The chunkedReader returns io.EOF when the final 0-length chunk is read. // -// newChunkedReader is not needed by normal applications. The http package +// NewChunkedReader is not needed by normal applications. The http package // automatically decodes chunking when reading response bodies. -func newChunkedReader(r io.Reader) io.Reader { +func NewChunkedReader(r io.Reader) io.Reader { br, ok := r.(*bufio.Reader) if !ok { br = bufio.NewReader(r) @@ -135,16 +134,16 @@ func isASCIISpace(b byte) bool { return b == ' ' || b == '\t' || b == '\n' || b == '\r' } -// newChunkedWriter returns a new chunkedWriter that translates writes into HTTP +// NewChunkedWriter returns a new chunkedWriter that translates writes into HTTP // "chunked" format before writing them to w. Closing the returned chunkedWriter // sends the final 0-length chunk that marks the end of the stream. // -// newChunkedWriter is not needed by normal applications. The http +// NewChunkedWriter is not needed by normal applications. The http // package adds chunking automatically if handlers don't set a // Content-Length header. Using newChunkedWriter inside a handler // would result in double chunking or chunking with a Content-Length // length, both of which are wrong. -func newChunkedWriter(w io.Writer) io.WriteCloser { +func NewChunkedWriter(w io.Writer) io.WriteCloser { return &chunkedWriter{w} } diff --git a/libgo/go/net/http/chunked_test.go b/libgo/go/net/http/internal/chunked_test.go index 34544790aff..ebc626ea9d0 100644 --- a/libgo/go/net/http/chunked_test.go +++ b/libgo/go/net/http/internal/chunked_test.go @@ -2,10 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This code is duplicated in net/http and net/http/httputil. -// Please make any changes in both files. - -package http +package internal import ( "bufio" @@ -20,7 +17,7 @@ import ( func TestChunk(t *testing.T) { var b bytes.Buffer - w := newChunkedWriter(&b) + w := NewChunkedWriter(&b) const chunk1 = "hello, " const chunk2 = "world! 0123456789abcdef" w.Write([]byte(chunk1)) @@ -31,7 +28,7 @@ func TestChunk(t *testing.T) { t.Fatalf("chunk writer wrote %q; want %q", g, e) } - r := newChunkedReader(&b) + r := NewChunkedReader(&b) data, err := ioutil.ReadAll(r) if err != nil { t.Logf(`data: "%s"`, data) @@ -46,12 +43,12 @@ func TestChunkReadMultiple(t *testing.T) { // Bunch of small chunks, all read together. { var b bytes.Buffer - w := newChunkedWriter(&b) + w := NewChunkedWriter(&b) w.Write([]byte("foo")) w.Write([]byte("bar")) w.Close() - r := newChunkedReader(&b) + r := NewChunkedReader(&b) buf := make([]byte, 10) n, err := r.Read(buf) if n != 6 || err != io.EOF { @@ -67,7 +64,7 @@ func TestChunkReadMultiple(t *testing.T) { // should prevent the second chunk header from being read. { var b bytes.Buffer - w := newChunkedWriter(&b) + w := NewChunkedWriter(&b) // fillBufChunk is 11 bytes + 3 bytes header + 2 bytes footer = 16 bytes, // the same as the bufio ReaderSize below (the minimum), so even // though we're going to try to Read with a buffer larger enough to also @@ -78,7 +75,7 @@ func TestChunkReadMultiple(t *testing.T) { w.Write([]byte(shortChunk)) w.Close() - r := newChunkedReader(bufio.NewReaderSize(&b, 16)) + r := NewChunkedReader(bufio.NewReaderSize(&b, 16)) buf := make([]byte, len(fillBufChunk)+len(shortChunk)) n, err := r.Read(buf) if n != len(fillBufChunk) || err != nil { @@ -97,7 +94,7 @@ func TestChunkReadMultiple(t *testing.T) { // And test that we see an EOF chunk, even though our buffer is already full: { - r := newChunkedReader(bufio.NewReader(strings.NewReader("3\r\nfoo\r\n0\r\n"))) + r := NewChunkedReader(bufio.NewReader(strings.NewReader("3\r\nfoo\r\n0\r\n"))) buf := make([]byte, 3) n, err := r.Read(buf) if n != 3 || err != io.EOF { @@ -114,7 +111,7 @@ func TestChunkReaderAllocs(t *testing.T) { t.Skip("skipping in short mode") } var buf bytes.Buffer - w := newChunkedWriter(&buf) + w := NewChunkedWriter(&buf) a, b, c := []byte("aaaaaa"), []byte("bbbbbbbbbbbb"), []byte("cccccccccccccccccccccccc") w.Write(a) w.Write(b) @@ -127,7 +124,7 @@ func TestChunkReaderAllocs(t *testing.T) { mallocs := testing.AllocsPerRun(100, func() { byter.Seek(0, 0) bufr.Reset(byter) - r := newChunkedReader(bufr) + r := NewChunkedReader(bufr) n, err := io.ReadFull(r, readBuf) if n != len(readBuf)-1 { t.Fatalf("read %d bytes; want %d", n, len(readBuf)-1) diff --git a/libgo/go/net/http/z_last_test.go b/libgo/go/net/http/main_test.go index 5a0cc119849..b8c71fd19fd 100644 --- a/libgo/go/net/http/z_last_test.go +++ b/libgo/go/net/http/main_test.go @@ -5,7 +5,9 @@ package http_test import ( + "fmt" "net/http" + "os" "runtime" "sort" "strings" @@ -13,6 +15,14 @@ import ( "time" ) +func TestMain(m *testing.M) { + v := m.Run() + if v == 0 && goroutineLeaked() { + os.Exit(1) + } + os.Exit(v) +} + func interestingGoroutines() (gs []string) { buf := make([]byte, 2<<20) buf = buf[:runtime.Stack(buf, true)] @@ -30,6 +40,7 @@ func interestingGoroutines() (gs []string) { // These only show up with GOTRACEBACK=2; Issue 5005 (comment 28) strings.Contains(stack, "runtime.goexit") || strings.Contains(stack, "created by runtime.gc") || + strings.Contains(stack, "net/http_test.interestingGoroutines") || strings.Contains(stack, "runtime.MHeap_Scavenger") { continue } @@ -40,10 +51,10 @@ func interestingGoroutines() (gs []string) { } // Verify the other tests didn't leave any goroutines running. -// This is in a file named z_last_test.go so it sorts at the end. -func TestGoroutinesRunning(t *testing.T) { +func goroutineLeaked() bool { if testing.Short() { - t.Skip("not counting goroutines for leakage in -short mode") + // not counting goroutines for leakage in -short mode + return false } gs := interestingGoroutines() @@ -54,13 +65,14 @@ func TestGoroutinesRunning(t *testing.T) { n++ } - t.Logf("num goroutines = %d", n) - if n > 0 { - t.Error("Too many goroutines.") - for stack, count := range stackCount { - t.Logf("%d instances of:\n%s", count, stack) - } + if n == 0 { + return false + } + fmt.Fprintf(os.Stderr, "Too many goroutines running after net/http test(s).\n") + for stack, count := range stackCount { + fmt.Fprintf(os.Stderr, "%d instances of:\n%s\n", count, stack) } + return true } func afterTest(t *testing.T) { diff --git a/libgo/go/net/http/pprof/pprof.go b/libgo/go/net/http/pprof/pprof.go index 0c7548e3ef3..a23f1bc4bc6 100644 --- a/libgo/go/net/http/pprof/pprof.go +++ b/libgo/go/net/http/pprof/pprof.go @@ -162,6 +162,10 @@ func (name handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, "Unknown profile: %s\n", name) return } + gc, _ := strconv.Atoi(r.FormValue("gc")) + if name == "heap" && gc > 0 { + runtime.GC() + } p.WriteTo(w, debug) return } diff --git a/libgo/go/net/http/readrequest_test.go b/libgo/go/net/http/readrequest_test.go index ffdd6a892da..e930d99af62 100644 --- a/libgo/go/net/http/readrequest_test.go +++ b/libgo/go/net/http/readrequest_test.go @@ -11,6 +11,7 @@ import ( "io" "net/url" "reflect" + "strings" "testing" ) @@ -295,14 +296,39 @@ var reqTests = []reqTest{ noTrailer, noError, }, + + // Connection: close. golang.org/issue/8261 + { + "GET / HTTP/1.1\r\nHost: issue8261.com\r\nConnection: close\r\n\r\n", + &Request{ + Method: "GET", + URL: &url.URL{ + Path: "/", + }, + Header: Header{ + // This wasn't removed from Go 1.0 to + // Go 1.3, so locking it in that we + // keep this: + "Connection": []string{"close"}, + }, + Host: "issue8261.com", + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Close: true, + RequestURI: "/", + }, + + noBody, + noTrailer, + noError, + }, } func TestReadRequest(t *testing.T) { for i := range reqTests { tt := &reqTests[i] - var braw bytes.Buffer - braw.WriteString(tt.Raw) - req, err := ReadRequest(bufio.NewReader(&braw)) + req, err := ReadRequest(bufio.NewReader(strings.NewReader(tt.Raw))) if err != nil { if err.Error() != tt.Error { t.Errorf("#%d: error %q, want error %q", i, err.Error(), tt.Error) @@ -311,21 +337,22 @@ func TestReadRequest(t *testing.T) { } rbody := req.Body req.Body = nil - diff(t, fmt.Sprintf("#%d Request", i), req, tt.Req) + testName := fmt.Sprintf("Test %d (%q)", i, tt.Raw) + diff(t, testName, req, tt.Req) var bout bytes.Buffer if rbody != nil { _, err := io.Copy(&bout, rbody) if err != nil { - t.Fatalf("#%d. copying body: %v", i, err) + t.Fatalf("%s: copying body: %v", testName, err) } rbody.Close() } body := bout.String() if body != tt.Body { - t.Errorf("#%d: Body = %q want %q", i, body, tt.Body) + t.Errorf("%s: Body = %q want %q", testName, body, tt.Body) } if !reflect.DeepEqual(tt.Trailer, req.Trailer) { - t.Errorf("#%d. Trailers differ.\n got: %v\nwant: %v", i, req.Trailer, tt.Trailer) + t.Errorf("%s: Trailers differ.\n got: %v\nwant: %v", testName, req.Trailer, tt.Trailer) } } } diff --git a/libgo/go/net/http/request.go b/libgo/go/net/http/request.go index a67092066ad..487eebcb841 100644 --- a/libgo/go/net/http/request.go +++ b/libgo/go/net/http/request.go @@ -10,6 +10,7 @@ import ( "bufio" "bytes" "crypto/tls" + "encoding/base64" "errors" "fmt" "io" @@ -390,10 +391,16 @@ func (req *Request) write(w io.Writer, usingProxy bool, extraHeaders Header) err w = bw } - fmt.Fprintf(w, "%s %s HTTP/1.1\r\n", valueOrDefault(req.Method, "GET"), ruri) + _, err := fmt.Fprintf(w, "%s %s HTTP/1.1\r\n", valueOrDefault(req.Method, "GET"), ruri) + if err != nil { + return err + } // Header lines - fmt.Fprintf(w, "Host: %s\r\n", host) + _, err = fmt.Fprintf(w, "Host: %s\r\n", host) + if err != nil { + return err + } // Use the defaultUserAgent unless the Header contains one, which // may be blank to not send the header. @@ -404,7 +411,10 @@ func (req *Request) write(w io.Writer, usingProxy bool, extraHeaders Header) err } } if userAgent != "" { - fmt.Fprintf(w, "User-Agent: %s\r\n", userAgent) + _, err = fmt.Fprintf(w, "User-Agent: %s\r\n", userAgent) + if err != nil { + return err + } } // Process Body,ContentLength,Close,Trailer @@ -429,7 +439,10 @@ func (req *Request) write(w io.Writer, usingProxy bool, extraHeaders Header) err } } - io.WriteString(w, "\r\n") + _, err = io.WriteString(w, "\r\n") + if err != nil { + return err + } // Write body and trailer err = tw.WriteBody(w) @@ -509,6 +522,35 @@ func NewRequest(method, urlStr string, body io.Reader) (*Request, error) { return req, nil } +// BasicAuth returns the username and password provided in the request's +// Authorization header, if the request uses HTTP Basic Authentication. +// See RFC 2617, Section 2. +func (r *Request) BasicAuth() (username, password string, ok bool) { + auth := r.Header.Get("Authorization") + if auth == "" { + return + } + return parseBasicAuth(auth) +} + +// parseBasicAuth parses an HTTP Basic Authentication string. +// "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" returns ("Aladdin", "open sesame", true). +func parseBasicAuth(auth string) (username, password string, ok bool) { + if !strings.HasPrefix(auth, "Basic ") { + return + } + c, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic ")) + if err != nil { + return + } + cs := string(c) + s := strings.IndexByte(cs, ':') + if s < 0 { + return + } + return cs[:s], cs[s+1:], true +} + // SetBasicAuth sets the request's Authorization header to use HTTP // Basic Authentication with the provided username and password. // @@ -623,6 +665,7 @@ func ReadRequest(b *bufio.Reader) (req *Request, err error) { return nil, err } + req.Close = shouldClose(req.ProtoMajor, req.ProtoMinor, req.Header, false) return req, nil } @@ -807,8 +850,10 @@ func (r *Request) ParseMultipartForm(maxMemory int64) error { // FormValue returns the first value for the named component of the query. // POST and PUT body parameters take precedence over URL query string values. -// FormValue calls ParseMultipartForm and ParseForm if necessary. -// To access multiple values of the same key use ParseForm. +// FormValue calls ParseMultipartForm and ParseForm if necessary and ignores +// any errors returned by these functions. +// To access multiple values of the same key, call ParseForm and +// then inspect Request.Form directly. func (r *Request) FormValue(key string) string { if r.Form == nil { r.ParseMultipartForm(defaultMaxMemory) @@ -821,7 +866,8 @@ func (r *Request) FormValue(key string) string { // PostFormValue returns the first value for the named component of the POST // or PUT request body. URL query parameters are ignored. -// PostFormValue calls ParseMultipartForm and ParseForm if necessary. +// PostFormValue calls ParseMultipartForm and ParseForm if necessary and ignores +// any errors returned by these functions. func (r *Request) PostFormValue(key string) string { if r.PostForm == nil { r.ParseMultipartForm(defaultMaxMemory) diff --git a/libgo/go/net/http/request_test.go b/libgo/go/net/http/request_test.go index b9fa3c2bfc4..759ea4e8b5d 100644 --- a/libgo/go/net/http/request_test.go +++ b/libgo/go/net/http/request_test.go @@ -7,6 +7,7 @@ package http_test import ( "bufio" "bytes" + "encoding/base64" "fmt" "io" "io/ioutil" @@ -396,6 +397,75 @@ func TestParseHTTPVersion(t *testing.T) { } } +type getBasicAuthTest struct { + username, password string + ok bool +} + +type parseBasicAuthTest getBasicAuthTest + +type basicAuthCredentialsTest struct { + username, password string +} + +var getBasicAuthTests = []struct { + username, password string + ok bool +}{ + {"Aladdin", "open sesame", true}, + {"Aladdin", "open:sesame", true}, + {"", "", true}, +} + +func TestGetBasicAuth(t *testing.T) { + for _, tt := range getBasicAuthTests { + r, _ := NewRequest("GET", "http://example.com/", nil) + r.SetBasicAuth(tt.username, tt.password) + username, password, ok := r.BasicAuth() + if ok != tt.ok || username != tt.username || password != tt.password { + t.Errorf("BasicAuth() = %#v, want %#v", getBasicAuthTest{username, password, ok}, + getBasicAuthTest{tt.username, tt.password, tt.ok}) + } + } + // Unauthenticated request. + r, _ := NewRequest("GET", "http://example.com/", nil) + username, password, ok := r.BasicAuth() + if ok { + t.Errorf("expected false from BasicAuth when the request is unauthenticated") + } + want := basicAuthCredentialsTest{"", ""} + if username != want.username || password != want.password { + t.Errorf("expected credentials: %#v when the request is unauthenticated, got %#v", + want, basicAuthCredentialsTest{username, password}) + } +} + +var parseBasicAuthTests = []struct { + header, username, password string + ok bool +}{ + {"Basic " + base64.StdEncoding.EncodeToString([]byte("Aladdin:open sesame")), "Aladdin", "open sesame", true}, + {"Basic " + base64.StdEncoding.EncodeToString([]byte("Aladdin:open:sesame")), "Aladdin", "open:sesame", true}, + {"Basic " + base64.StdEncoding.EncodeToString([]byte(":")), "", "", true}, + {"Basic" + base64.StdEncoding.EncodeToString([]byte("Aladdin:open sesame")), "", "", false}, + {base64.StdEncoding.EncodeToString([]byte("Aladdin:open sesame")), "", "", false}, + {"Basic ", "", "", false}, + {"Basic Aladdin:open sesame", "", "", false}, + {`Digest username="Aladdin"`, "", "", false}, +} + +func TestParseBasicAuth(t *testing.T) { + for _, tt := range parseBasicAuthTests { + r, _ := NewRequest("GET", "http://example.com/", nil) + r.Header.Set("Authorization", tt.header) + username, password, ok := r.BasicAuth() + if ok != tt.ok || username != tt.username || password != tt.password { + t.Errorf("BasicAuth() = %#v, want %#v", getBasicAuthTest{username, password, ok}, + getBasicAuthTest{tt.username, tt.password, tt.ok}) + } + } +} + type logWrites struct { t *testing.T dst *[]string diff --git a/libgo/go/net/http/requestwrite_test.go b/libgo/go/net/http/requestwrite_test.go index dc0e204cac9..7a6bd587863 100644 --- a/libgo/go/net/http/requestwrite_test.go +++ b/libgo/go/net/http/requestwrite_test.go @@ -280,7 +280,7 @@ var reqWriteTests = []reqWriteTest{ ContentLength: 10, // but we're going to send only 5 bytes }, Body: []byte("12345"), - WantError: errors.New("http: Request.ContentLength=10 with Body length 5"), + WantError: errors.New("http: ContentLength=10 with Body length 5"), }, // Request with a ContentLength of 4 but an 8 byte body. @@ -294,7 +294,7 @@ var reqWriteTests = []reqWriteTest{ ContentLength: 4, // but we're going to try to send 8 bytes }, Body: []byte("12345678"), - WantError: errors.New("http: Request.ContentLength=4 with Body length 8"), + WantError: errors.New("http: ContentLength=4 with Body length 8"), }, // Request with a 5 ContentLength and nil body. @@ -563,3 +563,61 @@ func mustParseURL(s string) *url.URL { } return u } + +type writerFunc func([]byte) (int, error) + +func (f writerFunc) Write(p []byte) (int, error) { return f(p) } + +// TestRequestWriteError tests the Write err != nil checks in (*Request).write. +func TestRequestWriteError(t *testing.T) { + failAfter, writeCount := 0, 0 + errFail := errors.New("fake write failure") + + // w is the buffered io.Writer to write the request to. It + // fails exactly once on its Nth Write call, as controlled by + // failAfter. It also tracks the number of calls in + // writeCount. + w := struct { + io.ByteWriter // to avoid being wrapped by a bufio.Writer + io.Writer + }{ + nil, + writerFunc(func(p []byte) (n int, err error) { + writeCount++ + if failAfter == 0 { + err = errFail + } + failAfter-- + return len(p), err + }), + } + + req, _ := NewRequest("GET", "http://example.com/", nil) + const writeCalls = 4 // number of Write calls in current implementation + sawGood := false + for n := 0; n <= writeCalls+2; n++ { + failAfter = n + writeCount = 0 + err := req.Write(w) + var wantErr error + if n < writeCalls { + wantErr = errFail + } + if err != wantErr { + t.Errorf("for fail-after %d Writes, err = %v; want %v", n, err, wantErr) + continue + } + if err == nil { + sawGood = true + if writeCount != writeCalls { + t.Fatalf("writeCalls constant is outdated in test") + } + } + if writeCount > writeCalls || writeCount > n+1 { + t.Errorf("for fail-after %d, saw unexpectedly high (%d) write calls", n, writeCount) + } + } + if !sawGood { + t.Fatalf("writeCalls constant is outdated in test") + } +} diff --git a/libgo/go/net/http/response_test.go b/libgo/go/net/http/response_test.go index 4b8946f7ae4..06e940d9aba 100644 --- a/libgo/go/net/http/response_test.go +++ b/libgo/go/net/http/response_test.go @@ -12,6 +12,7 @@ import ( "fmt" "io" "io/ioutil" + "net/http/internal" "net/url" "reflect" "regexp" @@ -376,6 +377,34 @@ some body`, "Body here\n", }, + + // 206 Partial Content. golang.org/issue/8923 + { + "HTTP/1.1 206 Partial Content\r\n" + + "Content-Type: text/plain; charset=utf-8\r\n" + + "Accept-Ranges: bytes\r\n" + + "Content-Range: bytes 0-5/1862\r\n" + + "Content-Length: 6\r\n\r\n" + + "foobar", + + Response{ + Status: "206 Partial Content", + StatusCode: 206, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Request: dummyReq("GET"), + Header: Header{ + "Accept-Ranges": []string{"bytes"}, + "Content-Length": []string{"6"}, + "Content-Type": []string{"text/plain; charset=utf-8"}, + "Content-Range": []string{"bytes 0-5/1862"}, + }, + ContentLength: 6, + }, + + "foobar", + }, } func TestReadResponse(t *testing.T) { @@ -451,7 +480,7 @@ func TestReadResponseCloseInMiddle(t *testing.T) { } var wr io.Writer = &buf if test.chunked { - wr = newChunkedWriter(wr) + wr = internal.NewChunkedWriter(wr) } if test.compressed { buf.WriteString("Content-Encoding: gzip\r\n") diff --git a/libgo/go/net/http/serve_test.go b/libgo/go/net/http/serve_test.go index 8371dd82f58..6bd168d3de3 100644 --- a/libgo/go/net/http/serve_test.go +++ b/libgo/go/net/http/serve_test.go @@ -15,6 +15,7 @@ import ( "io" "io/ioutil" "log" + "math/rand" "net" . "net/http" "net/http/httptest" @@ -777,6 +778,35 @@ func TestChunkedResponseHeaders(t *testing.T) { } } +func TestIdentityResponseHeaders(t *testing.T) { + defer afterTest(t) + log.SetOutput(ioutil.Discard) // is noisy otherwise + defer log.SetOutput(os.Stderr) + + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Transfer-Encoding", "identity") + w.(Flusher).Flush() + fmt.Fprintf(w, "I am an identity response.") + })) + defer ts.Close() + + res, err := Get(ts.URL) + if err != nil { + t.Fatalf("Get error: %v", err) + } + defer res.Body.Close() + + if g, e := res.TransferEncoding, []string(nil); !reflect.DeepEqual(g, e) { + t.Errorf("expected TransferEncoding of %v; got %v", e, g) + } + if _, haveCL := res.Header["Content-Length"]; haveCL { + t.Errorf("Unexpected Content-Length") + } + if !res.Close { + t.Errorf("expected Connection: close; got %v", res.Close) + } +} + // Test304Responses verifies that 304s don't declare that they're // chunking in their response headers and aren't allowed to produce // output. @@ -1188,6 +1218,82 @@ func TestTimeoutHandler(t *testing.T) { } } +// See issues 8209 and 8414. +func TestTimeoutHandlerRace(t *testing.T) { + defer afterTest(t) + + delayHi := HandlerFunc(func(w ResponseWriter, r *Request) { + ms, _ := strconv.Atoi(r.URL.Path[1:]) + if ms == 0 { + ms = 1 + } + for i := 0; i < ms; i++ { + w.Write([]byte("hi")) + time.Sleep(time.Millisecond) + } + }) + + ts := httptest.NewServer(TimeoutHandler(delayHi, 20*time.Millisecond, "")) + defer ts.Close() + + var wg sync.WaitGroup + gate := make(chan bool, 10) + n := 50 + if testing.Short() { + n = 10 + gate = make(chan bool, 3) + } + for i := 0; i < n; i++ { + gate <- true + wg.Add(1) + go func() { + defer wg.Done() + defer func() { <-gate }() + res, err := Get(fmt.Sprintf("%s/%d", ts.URL, rand.Intn(50))) + if err == nil { + io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + } + }() + } + wg.Wait() +} + +// See issues 8209 and 8414. +func TestTimeoutHandlerRaceHeader(t *testing.T) { + defer afterTest(t) + + delay204 := HandlerFunc(func(w ResponseWriter, r *Request) { + w.WriteHeader(204) + }) + + ts := httptest.NewServer(TimeoutHandler(delay204, time.Nanosecond, "")) + defer ts.Close() + + var wg sync.WaitGroup + gate := make(chan bool, 50) + n := 500 + if testing.Short() { + n = 10 + } + for i := 0; i < n; i++ { + gate <- true + wg.Add(1) + go func() { + defer wg.Done() + defer func() { <-gate }() + res, err := Get(ts.URL) + if err != nil { + t.Error(err) + return + } + defer res.Body.Close() + io.Copy(ioutil.Discard, res.Body) + }() + } + wg.Wait() +} + // Verifies we don't path.Clean() on the wrong parts in redirects. func TestRedirectMunging(t *testing.T) { req, _ := NewRequest("GET", "http://example.com/", nil) @@ -2405,13 +2511,13 @@ func TestServerConnState(t *testing.T) { } want := map[int][]ConnState{ - 1: []ConnState{StateNew, StateActive, StateIdle, StateActive, StateClosed}, - 2: []ConnState{StateNew, StateActive, StateIdle, StateActive, StateClosed}, - 3: []ConnState{StateNew, StateActive, StateHijacked}, - 4: []ConnState{StateNew, StateActive, StateHijacked}, - 5: []ConnState{StateNew, StateClosed}, - 6: []ConnState{StateNew, StateActive, StateClosed}, - 7: []ConnState{StateNew, StateActive, StateIdle, StateClosed}, + 1: {StateNew, StateActive, StateIdle, StateActive, StateClosed}, + 2: {StateNew, StateActive, StateIdle, StateActive, StateClosed}, + 3: {StateNew, StateActive, StateHijacked}, + 4: {StateNew, StateActive, StateHijacked}, + 5: {StateNew, StateClosed}, + 6: {StateNew, StateActive, StateClosed}, + 7: {StateNew, StateActive, StateIdle, StateClosed}, } logString := func(m map[int][]ConnState) string { var b bytes.Buffer @@ -2531,6 +2637,126 @@ func TestServerConnStateNew(t *testing.T) { } } +type closeWriteTestConn struct { + rwTestConn + didCloseWrite bool +} + +func (c *closeWriteTestConn) CloseWrite() error { + c.didCloseWrite = true + return nil +} + +func TestCloseWrite(t *testing.T) { + var srv Server + var testConn closeWriteTestConn + c, err := ExportServerNewConn(&srv, &testConn) + if err != nil { + t.Fatal(err) + } + ExportCloseWriteAndWait(c) + if !testConn.didCloseWrite { + t.Error("didn't see CloseWrite call") + } +} + +// This verifies that a handler can Flush and then Hijack. +// +// An similar test crashed once during development, but it was only +// testing this tangentially and temporarily until another TODO was +// fixed. +// +// So add an explicit test for this. +func TestServerFlushAndHijack(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + io.WriteString(w, "Hello, ") + w.(Flusher).Flush() + conn, buf, _ := w.(Hijacker).Hijack() + buf.WriteString("6\r\nworld!\r\n0\r\n\r\n") + if err := buf.Flush(); err != nil { + t.Error(err) + } + if err := conn.Close(); err != nil { + t.Error(err) + } + })) + defer ts.Close() + res, err := Get(ts.URL) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + all, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if want := "Hello, world!"; string(all) != want { + t.Errorf("Got %q; want %q", all, want) + } +} + +// golang.org/issue/8534 -- the Server shouldn't reuse a connection +// for keep-alive after it's seen any Write error (e.g. a timeout) on +// that net.Conn. +// +// To test, verify we don't timeout or see fewer unique client +// addresses (== unique connections) than requests. +func TestServerKeepAliveAfterWriteError(t *testing.T) { + if testing.Short() { + t.Skip("skipping in -short mode") + } + defer afterTest(t) + const numReq = 3 + addrc := make(chan string, numReq) + ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) { + addrc <- r.RemoteAddr + time.Sleep(500 * time.Millisecond) + w.(Flusher).Flush() + })) + ts.Config.WriteTimeout = 250 * time.Millisecond + ts.Start() + defer ts.Close() + + errc := make(chan error, numReq) + go func() { + defer close(errc) + for i := 0; i < numReq; i++ { + res, err := Get(ts.URL) + if res != nil { + res.Body.Close() + } + errc <- err + } + }() + + timeout := time.NewTimer(numReq * 2 * time.Second) // 4x overkill + defer timeout.Stop() + addrSeen := map[string]bool{} + numOkay := 0 + for { + select { + case v := <-addrc: + addrSeen[v] = true + case err, ok := <-errc: + if !ok { + if len(addrSeen) != numReq { + t.Errorf("saw %d unique client addresses; want %d", len(addrSeen), numReq) + } + if numOkay != 0 { + t.Errorf("got %d successful client requests; want 0", numOkay) + } + return + } + if err == nil { + numOkay++ + } + case <-timeout.C: + t.Fatal("timeout waiting for requests to complete") + } + } +} + func BenchmarkClientServer(b *testing.B) { b.ReportAllocs() b.StopTimer() @@ -2560,24 +2786,44 @@ func BenchmarkClientServer(b *testing.B) { } func BenchmarkClientServerParallel4(b *testing.B) { - benchmarkClientServerParallel(b, 4) + benchmarkClientServerParallel(b, 4, false) } func BenchmarkClientServerParallel64(b *testing.B) { - benchmarkClientServerParallel(b, 64) + benchmarkClientServerParallel(b, 64, false) } -func benchmarkClientServerParallel(b *testing.B, parallelism int) { +func BenchmarkClientServerParallelTLS4(b *testing.B) { + benchmarkClientServerParallel(b, 4, true) +} + +func BenchmarkClientServerParallelTLS64(b *testing.B) { + benchmarkClientServerParallel(b, 64, true) +} + +func benchmarkClientServerParallel(b *testing.B, parallelism int, useTLS bool) { b.ReportAllocs() - ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, r *Request) { + ts := httptest.NewUnstartedServer(HandlerFunc(func(rw ResponseWriter, r *Request) { fmt.Fprintf(rw, "Hello world.\n") })) + if useTLS { + ts.StartTLS() + } else { + ts.Start() + } defer ts.Close() b.ResetTimer() b.SetParallelism(parallelism) b.RunParallel(func(pb *testing.PB) { + noVerifyTransport := &Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + } + defer noVerifyTransport.CloseIdleConnections() + client := &Client{Transport: noVerifyTransport} for pb.Next() { - res, err := Get(ts.URL) + res, err := client.Get(ts.URL) if err != nil { b.Logf("Get: %v", err) continue diff --git a/libgo/go/net/http/server.go b/libgo/go/net/http/server.go index eae097eb8e9..008d5aa7a74 100644 --- a/libgo/go/net/http/server.go +++ b/libgo/go/net/http/server.go @@ -42,6 +42,12 @@ var ( // and then return. Returning signals that the request is finished // and that the HTTP server can move on to the next request on // the connection. +// +// If ServeHTTP panics, the server (the caller of ServeHTTP) assumes +// that the effect of the panic was isolated to the active request. +// It recovers the panic, logs a stack trace to the server error log, +// and hangs up the connection. +// type Handler interface { ServeHTTP(ResponseWriter, *Request) } @@ -108,6 +114,8 @@ type conn struct { remoteAddr string // network address of remote side server *Server // the Server on which the connection arrived rwc net.Conn // i/o connection + w io.Writer // checkConnErrorWriter's copy of wrc, not zeroed on Hijack + werr error // any errors writing to w sr liveSwitchReader // where the LimitReader reads from; usually the rwc lr *io.LimitedReader // io.LimitReader(sr) buf *bufio.ReadWriter // buffered(lr,rwc), reading from bufio->limitReader->sr->rwc @@ -426,13 +434,14 @@ func (srv *Server) newConn(rwc net.Conn) (c *conn, err error) { c.remoteAddr = rwc.RemoteAddr().String() c.server = srv c.rwc = rwc + c.w = rwc if debugServerConnections { c.rwc = newLoggingConn("server", c.rwc) } c.sr = liveSwitchReader{r: c.rwc} c.lr = io.LimitReader(&c.sr, noLimit).(*io.LimitedReader) br := newBufioReader(c.lr) - bw := newBufioWriterSize(c.rwc, 4<<10) + bw := newBufioWriterSize(checkConnErrorWriter{c}, 4<<10) c.buf = bufio.NewReadWriter(br, bw) return c, nil } @@ -833,13 +842,20 @@ func (cw *chunkWriter) writeHeader(p []byte) { } else if hasCL { delHeader("Transfer-Encoding") } else if w.req.ProtoAtLeast(1, 1) { - // HTTP/1.1 or greater: use chunked transfer encoding - // to avoid closing the connection at EOF. - // TODO: this blows away any custom or stacked Transfer-Encoding they - // might have set. Deal with that as need arises once we have a valid - // use case. - cw.chunking = true - setHeader.transferEncoding = "chunked" + // HTTP/1.1 or greater: Transfer-Encoding has been set to identity, and no + // content-length has been provided. The connection must be closed after the + // reply is written, and no chunking is to be done. This is the setup + // recommended in the Server-Sent Events candidate recommendation 11, + // section 8. + if hasTE && te == "identity" { + cw.chunking = false + w.closeAfterReply = true + } else { + // HTTP/1.1 or greater: use chunked transfer encoding + // to avoid closing the connection at EOF. + cw.chunking = true + setHeader.transferEncoding = "chunked" + } } else { // HTTP version < 1.1: cannot do chunked transfer // encoding and we don't know the Content-Length so @@ -943,8 +959,10 @@ func (w *response) bodyAllowed() bool { // 2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes // 3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type) // and which writes the chunk headers, if needed. -// 4. conn.buf, a bufio.Writer of default (4kB) bytes -// 5. the rwc, the net.Conn. +// 4. conn.buf, a bufio.Writer of default (4kB) bytes, writing to -> +// 5. checkConnErrorWriter{c}, which notes any non-nil error on Write +// and populates c.werr with it if so. but otherwise writes to: +// 6. the rwc, the net.Conn. // // TODO(bradfitz): short-circuit some of the buffering when the // initial header contains both a Content-Type and Content-Length. @@ -1014,6 +1032,12 @@ func (w *response) finishRequest() { // Did not write enough. Avoid getting out of sync. w.closeAfterReply = true } + + // There was some error writing to the underlying connection + // during the request, so don't re-use this conn. + if w.conn.werr != nil { + w.closeAfterReply = true + } } func (w *response) Flush() { @@ -1058,15 +1082,21 @@ func (c *conn) close() { // This timeout is somewhat arbitrary (~latency around the planet). const rstAvoidanceDelay = 500 * time.Millisecond +type closeWriter interface { + CloseWrite() error +} + +var _ closeWriter = (*net.TCPConn)(nil) + // closeWrite flushes any outstanding data and sends a FIN packet (if // client is connected via TCP), signalling that we're done. We then -// pause for a bit, hoping the client processes it before `any +// pause for a bit, hoping the client processes it before any // subsequent RST. // // See http://golang.org/issue/3595 func (c *conn) closeWriteAndWait() { c.finalFlush() - if tcp, ok := c.rwc.(*net.TCPConn); ok { + if tcp, ok := c.rwc.(closeWriter); ok { tcp.CloseWrite() } time.Sleep(rstAvoidanceDelay) @@ -1916,9 +1946,9 @@ func (tw *timeoutWriter) Header() Header { func (tw *timeoutWriter) Write(p []byte) (int, error) { tw.mu.Lock() - timedOut := tw.timedOut - tw.mu.Unlock() - if timedOut { + defer tw.mu.Unlock() + tw.wroteHeader = true // implicitly at least + if tw.timedOut { return 0, ErrHandlerTimeout } return tw.w.Write(p) @@ -1926,12 +1956,11 @@ func (tw *timeoutWriter) Write(p []byte) (int, error) { func (tw *timeoutWriter) WriteHeader(code int) { tw.mu.Lock() + defer tw.mu.Unlock() if tw.timedOut || tw.wroteHeader { - tw.mu.Unlock() return } tw.wroteHeader = true - tw.mu.Unlock() tw.w.WriteHeader(code) } @@ -2050,3 +2079,18 @@ func (c *loggingConn) Close() (err error) { log.Printf("%s.Close() = %v", c.name, err) return } + +// checkConnErrorWriter writes to c.rwc and records any write errors to c.werr. +// It only contains one field (and a pointer field at that), so it +// fits in an interface value without an extra allocation. +type checkConnErrorWriter struct { + c *conn +} + +func (w checkConnErrorWriter) Write(p []byte) (n int, err error) { + n, err = w.c.w.Write(p) // c.w == c.rwc, except after a hijack, when rwc is nil. + if err != nil && w.c.werr == nil { + w.c.werr = err + } + return +} diff --git a/libgo/go/net/http/transfer.go b/libgo/go/net/http/transfer.go index 7f63686528a..520500330bc 100644 --- a/libgo/go/net/http/transfer.go +++ b/libgo/go/net/http/transfer.go @@ -11,6 +11,7 @@ import ( "fmt" "io" "io/ioutil" + "net/http/internal" "net/textproto" "sort" "strconv" @@ -18,6 +19,10 @@ import ( "sync" ) +// ErrLineTooLong is returned when reading request or response bodies +// with malformed chunked encoding. +var ErrLineTooLong = internal.ErrLineTooLong + type errorReader struct { err error } @@ -198,7 +203,7 @@ func (t *transferWriter) WriteBody(w io.Writer) error { // Write body if t.Body != nil { if chunked(t.TransferEncoding) { - cw := newChunkedWriter(w) + cw := internal.NewChunkedWriter(w) _, err = io.Copy(cw, t.Body) if err == nil { err = cw.Close() @@ -223,7 +228,7 @@ func (t *transferWriter) WriteBody(w io.Writer) error { } if !t.ResponseToHEAD && t.ContentLength != -1 && t.ContentLength != ncopy { - return fmt.Errorf("http: Request.ContentLength=%d with Body length %d", + return fmt.Errorf("http: ContentLength=%d with Body length %d", t.ContentLength, ncopy) } @@ -298,7 +303,7 @@ func readTransfer(msg interface{}, r *bufio.Reader) (err error) { t.StatusCode = rr.StatusCode t.ProtoMajor = rr.ProtoMajor t.ProtoMinor = rr.ProtoMinor - t.Close = shouldClose(t.ProtoMajor, t.ProtoMinor, t.Header) + t.Close = shouldClose(t.ProtoMajor, t.ProtoMinor, t.Header, true) isResponse = true if rr.Request != nil { t.RequestMethod = rr.Request.Method @@ -365,7 +370,7 @@ func readTransfer(msg interface{}, r *bufio.Reader) (err error) { if noBodyExpected(t.RequestMethod) { t.Body = eofReader } else { - t.Body = &body{src: newChunkedReader(r), hdr: msg, r: r, closing: t.Close} + t.Body = &body{src: internal.NewChunkedReader(r), hdr: msg, r: r, closing: t.Close} } case realLength == 0: t.Body = eofReader @@ -497,7 +502,7 @@ func fixLength(isResponse bool, status int, requestMethod string, header Header, // Determine whether to hang up after sending a request and body, or // receiving a response and body // 'header' is the request headers -func shouldClose(major, minor int, header Header) bool { +func shouldClose(major, minor int, header Header, removeCloseHeader bool) bool { if major < 1 { return true } else if major == 1 && minor == 0 { @@ -509,7 +514,9 @@ func shouldClose(major, minor int, header Header) bool { // TODO: Should split on commas, toss surrounding white space, // and check each field. if strings.ToLower(header.get("Connection")) == "close" { - header.Del("Connection") + if removeCloseHeader { + header.Del("Connection") + } return true } } diff --git a/libgo/go/net/http/transport.go b/libgo/go/net/http/transport.go index b1cc632a782..782f7cd395b 100644 --- a/libgo/go/net/http/transport.go +++ b/libgo/go/net/http/transport.go @@ -43,17 +43,20 @@ var DefaultTransport RoundTripper = &Transport{ // MaxIdleConnsPerHost. const DefaultMaxIdleConnsPerHost = 2 -// Transport is an implementation of RoundTripper that supports http, -// https, and http proxies (for either http or https with CONNECT). +// Transport is an implementation of RoundTripper that supports HTTP, +// HTTPS, and HTTP proxies (for either HTTP or HTTPS with CONNECT). // Transport can also cache connections for future re-use. type Transport struct { - idleMu sync.Mutex - idleConn map[connectMethodKey][]*persistConn - idleConnCh map[connectMethodKey]chan *persistConn + idleMu sync.Mutex + wantIdle bool // user has requested to close all idle conns + idleConn map[connectMethodKey][]*persistConn + idleConnCh map[connectMethodKey]chan *persistConn + reqMu sync.Mutex reqCanceler map[*Request]func() - altMu sync.RWMutex - altProto map[string]RoundTripper // nil or map of URI scheme => RoundTripper + + altMu sync.RWMutex + altProto map[string]RoundTripper // nil or map of URI scheme => RoundTripper // Proxy specifies a function to return a proxy for a given // Request. If the function returns a non-nil error, the @@ -61,11 +64,22 @@ type Transport struct { // If Proxy is nil or returns a nil *URL, no proxy is used. Proxy func(*Request) (*url.URL, error) - // Dial specifies the dial function for creating TCP - // connections. + // Dial specifies the dial function for creating unencrypted + // TCP connections. // If Dial is nil, net.Dial is used. Dial func(network, addr string) (net.Conn, error) + // DialTLS specifies an optional dial function for creating + // TLS connections for non-proxied HTTPS requests. + // + // If DialTLS is nil, Dial and TLSClientConfig are used. + // + // If DialTLS is set, the Dial hook is not used for HTTPS + // requests and the TLSClientConfig and TLSHandshakeTimeout + // are ignored. The returned net.Conn is assumed to already be + // past the TLS handshake. + DialTLS func(network, addr string) (net.Conn, error) + // TLSClientConfig specifies the TLS configuration to use with // tls.Client. If nil, the default configuration is used. TLSClientConfig *tls.Config @@ -105,15 +119,28 @@ type Transport struct { // ProxyFromEnvironment returns the URL of the proxy to use for a // given request, as indicated by the environment variables -// $HTTP_PROXY and $NO_PROXY (or $http_proxy and $no_proxy). -// An error is returned if the proxy environment is invalid. +// HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the lowercase versions +// thereof). HTTPS_PROXY takes precedence over HTTP_PROXY for https +// requests. +// +// The environment values may be either a complete URL or a +// "host[:port]", in which case the "http" scheme is assumed. +// An error is returned if the value is a different form. +// // A nil URL and nil error are returned if no proxy is defined in the -// environment, or a proxy should not be used for the given request. +// environment, or a proxy should not be used for the given request, +// as defined by NO_PROXY. // // As a special case, if req.URL.Host is "localhost" (with or without // a port number), then a nil URL and nil error will be returned. func ProxyFromEnvironment(req *Request) (*url.URL, error) { - proxy := httpProxyEnv.Get() + var proxy string + if req.URL.Scheme == "https" { + proxy = httpsProxyEnv.Get() + } + if proxy == "" { + proxy = httpProxyEnv.Get() + } if proxy == "" { return nil, nil } @@ -238,6 +265,7 @@ func (t *Transport) CloseIdleConnections() { m := t.idleConn t.idleConn = nil t.idleConnCh = nil + t.wantIdle = true t.idleMu.Unlock() for _, conns := range m { for _, pconn := range conns { @@ -265,6 +293,9 @@ var ( httpProxyEnv = &envOnce{ names: []string{"HTTP_PROXY", "http_proxy"}, } + httpsProxyEnv = &envOnce{ + names: []string{"HTTPS_PROXY", "https_proxy"}, + } noProxyEnv = &envOnce{ names: []string{"NO_PROXY", "no_proxy"}, } @@ -305,7 +336,7 @@ func (t *Transport) connectMethodForRequest(treq *transportRequest) (cm connectM if t.Proxy != nil { cm.proxyURL, err = t.Proxy(treq.Request) } - return cm, nil + return cm, err } // proxyAuth returns the Proxy-Authorization header to set @@ -358,6 +389,11 @@ func (t *Transport) putIdleConn(pconn *persistConn) bool { delete(t.idleConnCh, key) } } + if t.wantIdle { + t.idleMu.Unlock() + pconn.close() + return false + } if t.idleConn == nil { t.idleConn = make(map[connectMethodKey][]*persistConn) } @@ -386,6 +422,7 @@ func (t *Transport) getIdleConnCh(cm connectMethod) chan *persistConn { key := cm.key() t.idleMu.Lock() defer t.idleMu.Unlock() + t.wantIdle = false if t.idleConnCh == nil { t.idleConnCh = make(map[connectMethodKey]chan *persistConn) } @@ -444,6 +481,9 @@ func (t *Transport) dial(network, addr string) (c net.Conn, err error) { return net.Dial(network, addr) } +// Testing hooks: +var prePendingDial, postPendingDial func() + // getConn dials and creates a new persistConn to the target as // specified in the connectMethod. This includes doing a proxy CONNECT // and/or setting up TLS. If this doesn't return an error, the persistConn @@ -460,9 +500,17 @@ func (t *Transport) getConn(req *Request, cm connectMethod) (*persistConn, error dialc := make(chan dialRes) handlePendingDial := func() { - if v := <-dialc; v.err == nil { - t.putIdleConn(v.pc) + if prePendingDial != nil { + prePendingDial() } + go func() { + if v := <-dialc; v.err == nil { + t.putIdleConn(v.pc) + } + if postPendingDial != nil { + postPendingDial() + } + }() } cancelc := make(chan struct{}) @@ -484,53 +532,65 @@ func (t *Transport) getConn(req *Request, cm connectMethod) (*persistConn, error // else's dial that they didn't use. // But our dial is still going, so give it away // when it finishes: - go handlePendingDial() + handlePendingDial() return pc, nil case <-cancelc: - go handlePendingDial() + handlePendingDial() return nil, errors.New("net/http: request canceled while waiting for connection") } } func (t *Transport) dialConn(cm connectMethod) (*persistConn, error) { - conn, err := t.dial("tcp", cm.addr()) - if err != nil { - if cm.proxyURL != nil { - err = fmt.Errorf("http: error connecting to proxy %s: %v", cm.proxyURL, err) - } - return nil, err - } - - pa := cm.proxyAuth() - pconn := &persistConn{ t: t, cacheKey: cm.key(), - conn: conn, reqch: make(chan requestAndChan, 1), writech: make(chan writeRequest, 1), closech: make(chan struct{}), writeErrCh: make(chan error, 1), } + tlsDial := t.DialTLS != nil && cm.targetScheme == "https" && cm.proxyURL == nil + if tlsDial { + var err error + pconn.conn, err = t.DialTLS("tcp", cm.addr()) + if err != nil { + return nil, err + } + if tc, ok := pconn.conn.(*tls.Conn); ok { + cs := tc.ConnectionState() + pconn.tlsState = &cs + } + } else { + conn, err := t.dial("tcp", cm.addr()) + if err != nil { + if cm.proxyURL != nil { + err = fmt.Errorf("http: error connecting to proxy %s: %v", cm.proxyURL, err) + } + return nil, err + } + pconn.conn = conn + } + // Proxy setup. switch { case cm.proxyURL == nil: - // Do nothing. + // Do nothing. Not using a proxy. case cm.targetScheme == "http": pconn.isProxy = true - if pa != "" { + if pa := cm.proxyAuth(); pa != "" { pconn.mutateHeaderFunc = func(h Header) { h.Set("Proxy-Authorization", pa) } } case cm.targetScheme == "https": + conn := pconn.conn connectReq := &Request{ Method: "CONNECT", URL: &url.URL{Opaque: cm.targetAddr}, Host: cm.targetAddr, Header: make(Header), } - if pa != "" { + if pa := cm.proxyAuth(); pa != "" { connectReq.Header.Set("Proxy-Authorization", pa) } connectReq.Write(conn) @@ -551,7 +611,7 @@ func (t *Transport) dialConn(cm connectMethod) (*persistConn, error) { } } - if cm.targetScheme == "https" { + if cm.targetScheme == "https" && !tlsDial { // Initiate TLS and check remote host name against certificate. cfg := t.TLSClientConfig if cfg == nil || cfg.ServerName == "" { @@ -564,7 +624,7 @@ func (t *Transport) dialConn(cm connectMethod) (*persistConn, error) { cfg = &clone } } - plainConn := conn + plainConn := pconn.conn tlsConn := tls.Client(plainConn, cfg) errc := make(chan error, 2) var timer *time.Timer // for canceling TLS handshake @@ -980,11 +1040,14 @@ func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err err } // Ask for a compressed version if the caller didn't set their - // own value for Accept-Encoding. We only attempted to + // own value for Accept-Encoding. We only attempt to // uncompress the gzip stream if we were the layer that // requested it. requestedGzip := false - if !pc.t.DisableCompression && req.Header.Get("Accept-Encoding") == "" && req.Method != "HEAD" { + if !pc.t.DisableCompression && + req.Header.Get("Accept-Encoding") == "" && + req.Header.Get("Range") == "" && + req.Method != "HEAD" { // Request gzip only, not deflate. Deflate is ambiguous and // not as universally supported anyway. // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 @@ -993,6 +1056,10 @@ func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err err // due to a bug in nginx: // http://trac.nginx.org/nginx/ticket/358 // http://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See http://golang.org/issue/8923 requestedGzip = true req.extraHeaders().Set("Accept-Encoding", "gzip") } diff --git a/libgo/go/net/http/transport_test.go b/libgo/go/net/http/transport_test.go index 964ca0fca54..defa6337082 100644 --- a/libgo/go/net/http/transport_test.go +++ b/libgo/go/net/http/transport_test.go @@ -1063,20 +1063,18 @@ func TestTransportConcurrency(t *testing.T) { var wg sync.WaitGroup wg.Add(numReqs) - tr := &Transport{ - Dial: func(netw, addr string) (c net.Conn, err error) { - // Due to the Transport's "socket late - // binding" (see idleConnCh in transport.go), - // the numReqs HTTP requests below can finish - // with a dial still outstanding. So count - // our dials as work too so the leak checker - // doesn't complain at us. - wg.Add(1) - defer wg.Done() - return net.Dial(netw, addr) - }, - } + // Due to the Transport's "socket late binding" (see + // idleConnCh in transport.go), the numReqs HTTP requests + // below can finish with a dial still outstanding. To keep + // the leak checker happy, keep track of pending dials and + // wait for them to finish (and be closed or returned to the + // idle pool) before we close idle connections. + SetPendingDialHooks(func() { wg.Add(1) }, wg.Done) + defer SetPendingDialHooks(nil, nil) + + tr := &Transport{} defer tr.CloseIdleConnections() + c := &Client{Transport: tr} reqs := make(chan string) defer close(reqs) @@ -1703,26 +1701,40 @@ Content-Length: %d } type proxyFromEnvTest struct { - req string // URL to fetch; blank means "http://example.com" - env string - noenv string + req string // URL to fetch; blank means "http://example.com" + + env string // HTTP_PROXY + httpsenv string // HTTPS_PROXY + noenv string // NO_RPXY + want string wanterr error } func (t proxyFromEnvTest) String() string { var buf bytes.Buffer + space := func() { + if buf.Len() > 0 { + buf.WriteByte(' ') + } + } if t.env != "" { fmt.Fprintf(&buf, "http_proxy=%q", t.env) } + if t.httpsenv != "" { + space() + fmt.Fprintf(&buf, "https_proxy=%q", t.httpsenv) + } if t.noenv != "" { - fmt.Fprintf(&buf, " no_proxy=%q", t.noenv) + space() + fmt.Fprintf(&buf, "no_proxy=%q", t.noenv) } req := "http://example.com" if t.req != "" { req = t.req } - fmt.Fprintf(&buf, " req=%q", req) + space() + fmt.Fprintf(&buf, "req=%q", req) return strings.TrimSpace(buf.String()) } @@ -1733,7 +1745,15 @@ var proxyFromEnvTests = []proxyFromEnvTest{ {env: "https://cache.corp.example.com", want: "https://cache.corp.example.com"}, {env: "http://127.0.0.1:8080", want: "http://127.0.0.1:8080"}, {env: "https://127.0.0.1:8080", want: "https://127.0.0.1:8080"}, + + // Don't use secure for http + {req: "http://insecure.tld/", env: "http.proxy.tld", httpsenv: "secure.proxy.tld", want: "http://http.proxy.tld"}, + // Use secure for https. + {req: "https://secure.tld/", env: "http.proxy.tld", httpsenv: "secure.proxy.tld", want: "http://secure.proxy.tld"}, + {req: "https://secure.tld/", env: "http.proxy.tld", httpsenv: "https://secure.proxy.tld", want: "https://secure.proxy.tld"}, + {want: "<nil>"}, + {noenv: "example.com", req: "http://example.com/", env: "proxy", want: "<nil>"}, {noenv: ".example.com", req: "http://example.com/", env: "proxy", want: "<nil>"}, {noenv: "ample.com", req: "http://example.com/", env: "proxy", want: "http://proxy"}, @@ -1745,6 +1765,7 @@ func TestProxyFromEnvironment(t *testing.T) { ResetProxyEnv() for _, tt := range proxyFromEnvTests { os.Setenv("HTTP_PROXY", tt.env) + os.Setenv("HTTPS_PROXY", tt.httpsenv) os.Setenv("NO_PROXY", tt.noenv) ResetCachedEnvironment() reqURL := tt.req @@ -2098,6 +2119,136 @@ func TestTransportClosesBodyOnError(t *testing.T) { } } +func TestTransportDialTLS(t *testing.T) { + var mu sync.Mutex // guards following + var gotReq, didDial bool + + ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) { + mu.Lock() + gotReq = true + mu.Unlock() + })) + defer ts.Close() + tr := &Transport{ + DialTLS: func(netw, addr string) (net.Conn, error) { + mu.Lock() + didDial = true + mu.Unlock() + c, err := tls.Dial(netw, addr, &tls.Config{ + InsecureSkipVerify: true, + }) + if err != nil { + return nil, err + } + return c, c.Handshake() + }, + } + defer tr.CloseIdleConnections() + client := &Client{Transport: tr} + res, err := client.Get(ts.URL) + if err != nil { + t.Fatal(err) + } + res.Body.Close() + mu.Lock() + if !gotReq { + t.Error("didn't get request") + } + if !didDial { + t.Error("didn't use dial hook") + } +} + +// Test for issue 8755 +// Ensure that if a proxy returns an error, it is exposed by RoundTrip +func TestRoundTripReturnsProxyError(t *testing.T) { + badProxy := func(*http.Request) (*url.URL, error) { + return nil, errors.New("errorMessage") + } + + tr := &Transport{Proxy: badProxy} + + req, _ := http.NewRequest("GET", "http://example.com", nil) + + _, err := tr.RoundTrip(req) + + if err == nil { + t.Error("Expected proxy error to be returned by RoundTrip") + } +} + +// tests that putting an idle conn after a call to CloseIdleConns does return it +func TestTransportCloseIdleConnsThenReturn(t *testing.T) { + tr := &Transport{} + wantIdle := func(when string, n int) bool { + got := tr.IdleConnCountForTesting("|http|example.com") // key used by PutIdleTestConn + if got == n { + return true + } + t.Errorf("%s: idle conns = %d; want %d", when, got, n) + return false + } + wantIdle("start", 0) + if !tr.PutIdleTestConn() { + t.Fatal("put failed") + } + if !tr.PutIdleTestConn() { + t.Fatal("second put failed") + } + wantIdle("after put", 2) + tr.CloseIdleConnections() + if !tr.IsIdleForTesting() { + t.Error("should be idle after CloseIdleConnections") + } + wantIdle("after close idle", 0) + if tr.PutIdleTestConn() { + t.Fatal("put didn't fail") + } + wantIdle("after second put", 0) + + tr.RequestIdleConnChForTesting() // should toggle the transport out of idle mode + if tr.IsIdleForTesting() { + t.Error("shouldn't be idle after RequestIdleConnChForTesting") + } + if !tr.PutIdleTestConn() { + t.Fatal("after re-activation") + } + wantIdle("after final put", 1) +} + +// This tests that an client requesting a content range won't also +// implicitly ask for gzip support. If they want that, they need to do it +// on their own. +// golang.org/issue/8923 +func TestTransportRangeAndGzip(t *testing.T) { + defer afterTest(t) + reqc := make(chan *Request, 1) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + reqc <- r + })) + defer ts.Close() + + req, _ := NewRequest("GET", ts.URL, nil) + req.Header.Set("Range", "bytes=7-11") + res, err := DefaultClient.Do(req) + if err != nil { + t.Fatal(err) + } + + select { + case r := <-reqc: + if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { + t.Error("Transport advertised gzip support in the Accept header") + } + if r.Header.Get("Range") == "" { + t.Error("no Range in request") + } + case <-time.After(10 * time.Second): + t.Fatal("timeout") + } + res.Body.Close() +} + func wantBody(res *http.Response, err error, want string) error { if err != nil { return err diff --git a/libgo/go/net/ip.go b/libgo/go/net/ip.go index 0582009b8bd..4a93e97b39d 100644 --- a/libgo/go/net/ip.go +++ b/libgo/go/net/ip.go @@ -287,6 +287,7 @@ func (ip IP) String() string { if j > i && j-i > e1-e0 { e0 = i e1 = j + i = j } } // The symbol "::" MUST NOT be used to shorten just one 16 bit 0 field. @@ -295,21 +296,23 @@ func (ip IP) String() string { e1 = -1 } + const maxLen = len("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff") + b := make([]byte, 0, maxLen) + // Print with possible :: in place of run of zeros - var s string for i := 0; i < IPv6len; i += 2 { if i == e0 { - s += "::" + b = append(b, ':', ':') i = e1 if i >= IPv6len { break } } else if i > 0 { - s += ":" + b = append(b, ':') } - s += itox((uint(p[i])<<8)|uint(p[i+1]), 1) + b = appendHex(b, (uint32(p[i])<<8)|uint32(p[i+1])) } - return s + return string(b) } // ipEmptyString is like ip.String except that it returns @@ -419,14 +422,14 @@ func (m IPMask) Size() (ones, bits int) { // String returns the hexadecimal form of m, with no punctuation. func (m IPMask) String() string { - s := "" - for _, b := range m { - s += itox(uint(b), 2) - } - if len(s) == 0 { + if len(m) == 0 { return "<nil>" } - return s + buf := make([]byte, len(m)*2) + for i, b := range m { + buf[i*2], buf[i*2+1] = hexDigit[b>>4], hexDigit[b&0xf] + } + return string(buf) } func networkNumberAndMask(n *IPNet) (ip IP, m IPMask) { @@ -646,11 +649,16 @@ func (e *ParseError) Error() string { // If s is not a valid textual representation of an IP address, // ParseIP returns nil. func ParseIP(s string) IP { - if ip := parseIPv4(s); ip != nil { - return ip + for i := 0; i < len(s); i++ { + switch s[i] { + case '.': + return parseIPv4(s) + case ':': + ip, _ := parseIPv6(s, false) + return ip + } } - ip, _ := parseIPv6(s, false) - return ip + return nil } // ParseCIDR parses s as a CIDR notation IP address and mask, diff --git a/libgo/go/net/ip_test.go b/libgo/go/net/ip_test.go index ffeb9d315e7..485ff51153b 100644 --- a/libgo/go/net/ip_test.go +++ b/libgo/go/net/ip_test.go @@ -44,6 +44,14 @@ func TestParseIP(t *testing.T) { } } +func BenchmarkParseIP(b *testing.B) { + for i := 0; i < b.N; i++ { + for _, tt := range parseIPTests { + ParseIP(tt.in) + } + } +} + // Issue 6339 func TestMarshalEmptyIP(t *testing.T) { for _, in := range [][]byte{nil, []byte("")} { @@ -91,6 +99,16 @@ func TestIPString(t *testing.T) { } } +func BenchmarkIPString(b *testing.B) { + for i := 0; i < b.N; i++ { + for _, tt := range ipStringTests { + if tt.in != nil { + tt.in.String() + } + } + } +} + var ipMaskTests = []struct { in IP mask IPMask @@ -131,6 +149,14 @@ func TestIPMaskString(t *testing.T) { } } +func BenchmarkIPMaskString(b *testing.B) { + for i := 0; i < b.N; i++ { + for _, tt := range ipMaskStringTests { + tt.in.String() + } + } +} + var parseCIDRTests = []struct { in string ip IP diff --git a/libgo/go/net/ipraw_test.go b/libgo/go/net/ipraw_test.go index 0632dafc65e..92dc8dc5694 100644 --- a/libgo/go/net/ipraw_test.go +++ b/libgo/go/net/ipraw_test.go @@ -68,6 +68,11 @@ func skipRawSocketTest(t *testing.T) (skip bool, skipmsg string) { } func TestResolveIPAddr(t *testing.T) { + switch runtime.GOOS { + case "nacl": + t.Skipf("skipping test on %q", runtime.GOOS) + } + for _, tt := range resolveIPAddrTests { addr, err := ResolveIPAddr(tt.net, tt.litAddrOrName) if err != tt.err { diff --git a/libgo/go/net/iprawsock_posix.go b/libgo/go/net/iprawsock_posix.go index bbb3f3ed66c..99b081ba8c8 100644 --- a/libgo/go/net/iprawsock_posix.go +++ b/libgo/go/net/iprawsock_posix.go @@ -198,7 +198,7 @@ func dialIP(netProto string, laddr, raddr *IPAddr, deadline time.Time) (*IPConn, if raddr == nil { return nil, &OpError{Op: "dial", Net: netProto, Addr: nil, Err: errMissingAddress} } - fd, err := internetSocket(net, laddr, raddr, deadline, syscall.SOCK_RAW, proto, "dial", sockaddrToIP) + fd, err := internetSocket(net, laddr, raddr, deadline, syscall.SOCK_RAW, proto, "dial") if err != nil { return nil, &OpError{Op: "dial", Net: netProto, Addr: raddr, Err: err} } @@ -219,7 +219,7 @@ func ListenIP(netProto string, laddr *IPAddr) (*IPConn, error) { default: return nil, &OpError{Op: "listen", Net: netProto, Addr: laddr, Err: UnknownNetworkError(netProto)} } - fd, err := internetSocket(net, laddr, nil, noDeadline, syscall.SOCK_RAW, proto, "listen", sockaddrToIP) + fd, err := internetSocket(net, laddr, nil, noDeadline, syscall.SOCK_RAW, proto, "listen") if err != nil { return nil, &OpError{Op: "listen", Net: netProto, Addr: laddr, Err: err} } diff --git a/libgo/go/net/ipsock_posix.go b/libgo/go/net/ipsock_posix.go index 2ba4c8efd53..f9ebe40a21e 100644 --- a/libgo/go/net/ipsock_posix.go +++ b/libgo/go/net/ipsock_posix.go @@ -132,9 +132,9 @@ func favoriteAddrFamily(net string, laddr, raddr sockaddr, mode string) (family // Internet sockets (TCP, UDP, IP) -func internetSocket(net string, laddr, raddr sockaddr, deadline time.Time, sotype, proto int, mode string, toAddr func(syscall.Sockaddr) Addr) (fd *netFD, err error) { +func internetSocket(net string, laddr, raddr sockaddr, deadline time.Time, sotype, proto int, mode string) (fd *netFD, err error) { family, ipv6only := favoriteAddrFamily(net, laddr, raddr, mode) - return socket(net, family, sotype, proto, ipv6only, laddr, raddr, deadline, toAddr) + return socket(net, family, sotype, proto, ipv6only, laddr, raddr, deadline) } func ipToSockaddr(family int, ip IP, port int, zone string) (syscall.Sockaddr, error) { diff --git a/libgo/go/net/lookup.go b/libgo/go/net/lookup.go index 20f20578cde..aeffe6c9b72 100644 --- a/libgo/go/net/lookup.go +++ b/libgo/go/net/lookup.go @@ -40,10 +40,16 @@ func lookupIPMerge(host string) (addrs []IP, err error) { addrsi, err, shared := lookupGroup.Do(host, func() (interface{}, error) { return lookupIP(host) }) + return lookupIPReturn(addrsi, err, shared) +} + +// lookupIPReturn turns the return values from singleflight.Do into +// the return values from LookupIP. +func lookupIPReturn(addrsi interface{}, err error, shared bool) ([]IP, error) { if err != nil { return nil, err } - addrs = addrsi.([]IP) + addrs := addrsi.([]IP) if shared { clone := make([]IP, len(addrs)) copy(clone, addrs) @@ -52,41 +58,40 @@ func lookupIPMerge(host string) (addrs []IP, err error) { return addrs, nil } +// lookupIPDeadline looks up a hostname with a deadline. func lookupIPDeadline(host string, deadline time.Time) (addrs []IP, err error) { if deadline.IsZero() { return lookupIPMerge(host) } - // TODO(bradfitz): consider pushing the deadline down into the - // name resolution functions. But that involves fixing it for - // the native Go resolver, cgo, Windows, etc. - // - // In the meantime, just use a goroutine. Most users affected - // by http://golang.org/issue/2631 are due to TCP connections - // to unresponsive hosts, not DNS. + // We could push the deadline down into the name resolution + // functions. However, the most commonly used implementation + // calls getaddrinfo, which has no timeout. + timeout := deadline.Sub(time.Now()) if timeout <= 0 { - err = errTimeout - return + return nil, errTimeout } t := time.NewTimer(timeout) defer t.Stop() - type res struct { - addrs []IP - err error - } - resc := make(chan res, 1) - go func() { - a, err := lookupIPMerge(host) - resc <- res{a, err} - }() + + ch := lookupGroup.DoChan(host, func() (interface{}, error) { + return lookupIP(host) + }) + select { case <-t.C: - err = errTimeout - case r := <-resc: - addrs, err = r.addrs, r.err + // The DNS lookup timed out for some reason. Force + // future requests to start the DNS lookup again + // rather than waiting for the current lookup to + // complete. See issue 8602. + lookupGroup.Forget(host) + + return nil, errTimeout + + case r := <-ch: + return lookupIPReturn(r.v, r.err, r.shared) } - return } // LookupPort looks up the port for the given network and service. diff --git a/libgo/go/net/lookup_stub.go b/libgo/go/net/lookup_stub.go new file mode 100644 index 00000000000..502aafb2702 --- /dev/null +++ b/libgo/go/net/lookup_stub.go @@ -0,0 +1,49 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl + +package net + +import "syscall" + +func lookupProtocol(name string) (proto int, err error) { + return 0, syscall.ENOPROTOOPT +} + +func lookupHost(host string) (addrs []string, err error) { + return nil, syscall.ENOPROTOOPT +} + +func lookupIP(host string) (ips []IP, err error) { + return nil, syscall.ENOPROTOOPT +} + +func lookupPort(network, service string) (port int, err error) { + return 0, syscall.ENOPROTOOPT +} + +func lookupCNAME(name string) (cname string, err error) { + return "", syscall.ENOPROTOOPT +} + +func lookupSRV(service, proto, name string) (cname string, srvs []*SRV, err error) { + return "", nil, syscall.ENOPROTOOPT +} + +func lookupMX(name string) (mxs []*MX, err error) { + return nil, syscall.ENOPROTOOPT +} + +func lookupNS(name string) (nss []*NS, err error) { + return nil, syscall.ENOPROTOOPT +} + +func lookupTXT(name string) (txts []string, err error) { + return nil, syscall.ENOPROTOOPT +} + +func lookupAddr(addr string) (ptrs []string, err error) { + return nil, syscall.ENOPROTOOPT +} diff --git a/libgo/go/net/lookup_test.go b/libgo/go/net/lookup_test.go index 3355e469489..057e1322b99 100644 --- a/libgo/go/net/lookup_test.go +++ b/libgo/go/net/lookup_test.go @@ -15,87 +15,181 @@ import ( var testExternal = flag.Bool("external", true, "allow use of external networks during long test") -func TestGoogleSRV(t *testing.T) { +var lookupGoogleSRVTests = []struct { + service, proto, name string + cname, target string +}{ + { + "xmpp-server", "tcp", "google.com", + ".google.com", ".google.com", + }, + { + "", "", "_xmpp-server._tcp.google.com", // non-standard back door + ".google.com", ".google.com", + }, +} + +func TestLookupGoogleSRV(t *testing.T) { if testing.Short() || !*testExternal { t.Skip("skipping test to avoid external network") } - _, addrs, err := LookupSRV("xmpp-server", "tcp", "google.com") - if err != nil { - t.Errorf("failed: %s", err) + + for _, tt := range lookupGoogleSRVTests { + cname, srvs, err := LookupSRV(tt.service, tt.proto, tt.name) + if err != nil { + t.Fatal(err) + } + if len(srvs) == 0 { + t.Error("got no record") + } + if !strings.Contains(cname, tt.cname) { + t.Errorf("got %q; want %q", cname, tt.cname) + } + for _, srv := range srvs { + if !strings.Contains(srv.Target, tt.target) { + t.Errorf("got %v; want a record containing %q", srv, tt.target) + } + } } - if len(addrs) == 0 { - t.Errorf("no results") +} + +func TestLookupGmailMX(t *testing.T) { + if testing.Short() || !*testExternal { + t.Skip("skipping test to avoid external network") } - // Non-standard back door. - _, addrs, err = LookupSRV("", "", "_xmpp-server._tcp.google.com") + mxs, err := LookupMX("gmail.com") if err != nil { - t.Errorf("back door failed: %s", err) + t.Fatal(err) } - if len(addrs) == 0 { - t.Errorf("back door no results") + if len(mxs) == 0 { + t.Error("got no record") + } + for _, mx := range mxs { + if !strings.Contains(mx.Host, ".google.com") { + t.Errorf("got %v; want a record containing .google.com.", mx) + } } } -func TestGmailMX(t *testing.T) { +func TestLookupGmailNS(t *testing.T) { if testing.Short() || !*testExternal { t.Skip("skipping test to avoid external network") } - mx, err := LookupMX("gmail.com") + + nss, err := LookupNS("gmail.com") if err != nil { - t.Errorf("failed: %s", err) + t.Fatal(err) + } + if len(nss) == 0 { + t.Error("got no record") } - if len(mx) == 0 { - t.Errorf("no results") + for _, ns := range nss { + if !strings.Contains(ns.Host, ".google.com") { + t.Errorf("got %v; want a record containing .google.com.", ns) + } } } -func TestGmailNS(t *testing.T) { +func TestLookupGmailTXT(t *testing.T) { if testing.Short() || !*testExternal { t.Skip("skipping test to avoid external network") } - ns, err := LookupNS("gmail.com") + + txts, err := LookupTXT("gmail.com") if err != nil { - t.Errorf("failed: %s", err) + t.Fatal(err) + } + if len(txts) == 0 { + t.Error("got no record") + } + for _, txt := range txts { + if !strings.Contains(txt, "spf") { + t.Errorf("got %q; want a spf record", txt) + } + } +} + +var lookupGooglePublicDNSAddrs = []struct { + addr string + name string +}{ + {"8.8.8.8", ".google.com."}, + {"8.8.4.4", ".google.com."}, + {"2001:4860:4860::8888", ".google.com."}, + {"2001:4860:4860::8844", ".google.com."}, +} + +func TestLookupGooglePublicDNSAddr(t *testing.T) { + if testing.Short() || !*testExternal { + t.Skip("skipping test to avoid external network") } - if len(ns) == 0 { - t.Errorf("no results") + + for _, tt := range lookupGooglePublicDNSAddrs { + names, err := LookupAddr(tt.addr) + if err != nil { + t.Fatal(err) + } + if len(names) == 0 { + t.Error("got no record") + } + for _, name := range names { + if !strings.HasSuffix(name, tt.name) { + t.Errorf("got %q; want a record containing %q", name, tt.name) + } + } } } -func TestGmailTXT(t *testing.T) { +func TestLookupIANACNAME(t *testing.T) { if testing.Short() || !*testExternal { t.Skip("skipping test to avoid external network") } - txt, err := LookupTXT("gmail.com") + + cname, err := LookupCNAME("www.iana.org") if err != nil { - t.Errorf("failed: %s", err) + t.Fatal(err) } - if len(txt) == 0 || len(txt[0]) == 0 { - t.Errorf("no results") + if !strings.HasSuffix(cname, ".icann.org.") { + t.Errorf("got %q; want a record containing .icann.org.", cname) } } -func TestGoogleDNSAddr(t *testing.T) { +func TestLookupGoogleHost(t *testing.T) { if testing.Short() || !*testExternal { t.Skip("skipping test to avoid external network") } - names, err := LookupAddr("8.8.8.8") + + addrs, err := LookupHost("google.com") if err != nil { - t.Errorf("failed: %s", err) + t.Fatal(err) + } + if len(addrs) == 0 { + t.Error("got no record") } - if len(names) == 0 { - t.Errorf("no results") + for _, addr := range addrs { + if ParseIP(addr) == nil { + t.Errorf("got %q; want a literal ip address", addr) + } } } -func TestLookupIANACNAME(t *testing.T) { +func TestLookupGoogleIP(t *testing.T) { if testing.Short() || !*testExternal { t.Skip("skipping test to avoid external network") } - cname, err := LookupCNAME("www.iana.org") - if !strings.HasSuffix(cname, ".icann.org.") || err != nil { - t.Errorf(`LookupCNAME("www.iana.org.") = %q, %v, want "*.icann.org.", nil`, cname, err) + + ips, err := LookupIP("google.com") + if err != nil { + t.Fatal(err) + } + if len(ips) == 0 { + t.Error("got no record") + } + for _, ip := range ips { + if ip.To4() == nil && ip.To16() == nil { + t.Errorf("got %v; want an ip address", ip) + } } } diff --git a/libgo/go/net/lookup_unix.go b/libgo/go/net/lookup_unix.go index b1d2f8f31a9..a54578456d7 100644 --- a/libgo/go/net/lookup_unix.go +++ b/libgo/go/net/lookup_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris +// +build darwin dragonfly freebsd linux netbsd openbsd solaris package net diff --git a/libgo/go/net/lookup_windows.go b/libgo/go/net/lookup_windows.go index 130364231d4..6a925b0a7ad 100644 --- a/libgo/go/net/lookup_windows.go +++ b/libgo/go/net/lookup_windows.go @@ -210,14 +210,21 @@ func lookupCNAME(name string) (cname string, err error) { defer releaseThread() var r *syscall.DNSRecord e := syscall.DnsQuery(name, syscall.DNS_TYPE_CNAME, 0, nil, &r, nil) + // windows returns DNS_INFO_NO_RECORDS if there are no CNAME-s + if errno, ok := e.(syscall.Errno); ok && errno == syscall.DNS_INFO_NO_RECORDS { + // if there are no aliases, the canonical name is the input name + if name == "" || name[len(name)-1] != '.' { + return name + ".", nil + } + return name, nil + } if e != nil { return "", os.NewSyscallError("LookupCNAME", e) } defer syscall.DnsRecordListFree(r, 1) - if r != nil && r.Type == syscall.DNS_TYPE_CNAME { - v := (*syscall.DNSPTRData)(unsafe.Pointer(&r.Data[0])) - cname = syscall.UTF16ToString((*[256]uint16)(unsafe.Pointer(v.Host))[:]) + "." - } + + resolved := resolveCNAME(syscall.StringToUTF16Ptr(name), r) + cname = syscall.UTF16ToString((*[256]uint16)(unsafe.Pointer(resolved))[:]) + "." return } @@ -236,8 +243,9 @@ func lookupSRV(service, proto, name string) (cname string, addrs []*SRV, err err return "", nil, os.NewSyscallError("LookupSRV", e) } defer syscall.DnsRecordListFree(r, 1) + addrs = make([]*SRV, 0, 10) - for p := r; p != nil && p.Type == syscall.DNS_TYPE_SRV; p = p.Next { + for _, p := range validRecs(r, syscall.DNS_TYPE_SRV, target) { v := (*syscall.DNSSRVData)(unsafe.Pointer(&p.Data[0])) addrs = append(addrs, &SRV{syscall.UTF16ToString((*[256]uint16)(unsafe.Pointer(v.Target))[:]), v.Port, v.Priority, v.Weight}) } @@ -254,8 +262,9 @@ func lookupMX(name string) (mx []*MX, err error) { return nil, os.NewSyscallError("LookupMX", e) } defer syscall.DnsRecordListFree(r, 1) + mx = make([]*MX, 0, 10) - for p := r; p != nil && p.Type == syscall.DNS_TYPE_MX; p = p.Next { + for _, p := range validRecs(r, syscall.DNS_TYPE_MX, name) { v := (*syscall.DNSMXData)(unsafe.Pointer(&p.Data[0])) mx = append(mx, &MX{syscall.UTF16ToString((*[256]uint16)(unsafe.Pointer(v.NameExchange))[:]) + ".", v.Preference}) } @@ -272,8 +281,9 @@ func lookupNS(name string) (ns []*NS, err error) { return nil, os.NewSyscallError("LookupNS", e) } defer syscall.DnsRecordListFree(r, 1) + ns = make([]*NS, 0, 10) - for p := r; p != nil && p.Type == syscall.DNS_TYPE_NS; p = p.Next { + for _, p := range validRecs(r, syscall.DNS_TYPE_NS, name) { v := (*syscall.DNSPTRData)(unsafe.Pointer(&p.Data[0])) ns = append(ns, &NS{syscall.UTF16ToString((*[256]uint16)(unsafe.Pointer(v.Host))[:]) + "."}) } @@ -289,9 +299,10 @@ func lookupTXT(name string) (txt []string, err error) { return nil, os.NewSyscallError("LookupTXT", e) } defer syscall.DnsRecordListFree(r, 1) + txt = make([]string, 0, 10) - if r != nil && r.Type == syscall.DNS_TYPE_TEXT { - d := (*syscall.DNSTXTData)(unsafe.Pointer(&r.Data[0])) + for _, p := range validRecs(r, syscall.DNS_TYPE_TEXT, name) { + d := (*syscall.DNSTXTData)(unsafe.Pointer(&p.Data[0])) for _, v := range (*[1 << 10]*uint16)(unsafe.Pointer(&(d.StringArray[0])))[:d.StringCount] { s := syscall.UTF16ToString((*[1 << 20]uint16)(unsafe.Pointer(v))[:]) txt = append(txt, s) @@ -313,10 +324,58 @@ func lookupAddr(addr string) (name []string, err error) { return nil, os.NewSyscallError("LookupAddr", e) } defer syscall.DnsRecordListFree(r, 1) + name = make([]string, 0, 10) - for p := r; p != nil && p.Type == syscall.DNS_TYPE_PTR; p = p.Next { + for _, p := range validRecs(r, syscall.DNS_TYPE_PTR, arpa) { v := (*syscall.DNSPTRData)(unsafe.Pointer(&p.Data[0])) name = append(name, syscall.UTF16ToString((*[256]uint16)(unsafe.Pointer(v.Host))[:])) } return name, nil } + +const dnsSectionMask = 0x0003 + +// returns only results applicable to name and resolves CNAME entries +func validRecs(r *syscall.DNSRecord, dnstype uint16, name string) []*syscall.DNSRecord { + cname := syscall.StringToUTF16Ptr(name) + if dnstype != syscall.DNS_TYPE_CNAME { + cname = resolveCNAME(cname, r) + } + rec := make([]*syscall.DNSRecord, 0, 10) + for p := r; p != nil; p = p.Next { + if p.Dw&dnsSectionMask != syscall.DnsSectionAnswer { + continue + } + if p.Type != dnstype { + continue + } + if !syscall.DnsNameCompare(cname, p.Name) { + continue + } + rec = append(rec, p) + } + return rec +} + +// returns the last CNAME in chain +func resolveCNAME(name *uint16, r *syscall.DNSRecord) *uint16 { + // limit cname resolving to 10 in case of a infinite CNAME loop +Cname: + for cnameloop := 0; cnameloop < 10; cnameloop++ { + for p := r; p != nil; p = p.Next { + if p.Dw&dnsSectionMask != syscall.DnsSectionAnswer { + continue + } + if p.Type != syscall.DNS_TYPE_CNAME { + continue + } + if !syscall.DnsNameCompare(name, p.Name) { + continue + } + name = (*syscall.DNSPTRData)(unsafe.Pointer(&r.Data[0])).Host + continue Cname + } + break + } + return name +} diff --git a/libgo/go/net/mail/message.go b/libgo/go/net/mail/message.go index ba0778caa73..19aa888d872 100644 --- a/libgo/go/net/mail/message.go +++ b/libgo/go/net/mail/message.go @@ -28,6 +28,7 @@ import ( "strconv" "strings" "time" + "unicode" ) var debug = debugT(false) @@ -445,7 +446,7 @@ func decodeRFC2047Word(s string) (string, error) { return "", errors.New("address not RFC 2047 encoded") } charset, enc := strings.ToLower(fields[1]), strings.ToLower(fields[2]) - if charset != "iso-8859-1" && charset != "utf-8" { + if charset != "us-ascii" && charset != "iso-8859-1" && charset != "utf-8" { return "", fmt.Errorf("charset not supported: %q", charset) } @@ -466,6 +467,16 @@ func decodeRFC2047Word(s string) (string, error) { } switch charset { + case "us-ascii": + b := new(bytes.Buffer) + for _, c := range dec { + if c >= 0x80 { + b.WriteRune(unicode.ReplacementChar) + } else { + b.WriteRune(rune(c)) + } + } + return b.String(), nil case "iso-8859-1": b := new(bytes.Buffer) for _, c := range dec { diff --git a/libgo/go/net/mail/message_test.go b/libgo/go/net/mail/message_test.go index eb9c8cbdc9b..6ba48be04fa 100644 --- a/libgo/go/net/mail/message_test.go +++ b/libgo/go/net/mail/message_test.go @@ -194,6 +194,16 @@ func TestAddressParsing(t *testing.T) { }, }, }, + // RFC 2047 "Q"-encoded US-ASCII address. Dumb but legal. + { + `=?us-ascii?q?J=6Frg_Doe?= <joerg@example.com>`, + []*Address{ + { + Name: `Jorg Doe`, + Address: "joerg@example.com", + }, + }, + }, // RFC 2047 "Q"-encoded UTF-8 address. { `=?utf-8?q?J=C3=B6rg_Doe?= <joerg@example.com>`, diff --git a/libgo/go/net/multicast_test.go b/libgo/go/net/multicast_test.go index 63dbce88e9a..5f253f44a45 100644 --- a/libgo/go/net/multicast_test.go +++ b/libgo/go/net/multicast_test.go @@ -25,7 +25,7 @@ var ipv4MulticastListenerTests = []struct { // port. func TestIPv4MulticastListener(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9": + case "android", "nacl", "plan9": t.Skipf("skipping test on %q", runtime.GOOS) case "solaris": t.Skipf("skipping test on solaris, see issue 7399") diff --git a/libgo/go/net/net.go b/libgo/go/net/net.go index ca56af54fc6..cb31af5e347 100644 --- a/libgo/go/net/net.go +++ b/libgo/go/net/net.go @@ -32,7 +32,6 @@ The Listen function creates servers: conn, err := ln.Accept() if err != nil { // handle error - continue } go handleConnection(conn) } diff --git a/libgo/go/net/parse.go b/libgo/go/net/parse.go index ee6e7e99522..e1d0130c9ac 100644 --- a/libgo/go/net/parse.go +++ b/libgo/go/net/parse.go @@ -210,18 +210,18 @@ func itod(i uint) string { return string(b[bp:]) } -// Convert i to hexadecimal string. -func itox(i uint, min int) string { - // Assemble hexadecimal in reverse order. - var b [32]byte - bp := len(b) - for ; i > 0 || min > 0; i /= 16 { - bp-- - b[bp] = "0123456789abcdef"[byte(i%16)] - min-- +// Convert i to a hexadecimal string. Leading zeros are not printed. +func appendHex(dst []byte, i uint32) []byte { + if i == 0 { + return append(dst, '0') } - - return string(b[bp:]) + for j := 7; j >= 0; j-- { + v := i >> uint(j*4) + if v > 0 { + dst = append(dst, hexDigit[v&0xf]) + } + } + return dst } // Number of occurrences of b in s. diff --git a/libgo/go/net/parse_test.go b/libgo/go/net/parse_test.go index b86bc32884b..7b213b75bde 100644 --- a/libgo/go/net/parse_test.go +++ b/libgo/go/net/parse_test.go @@ -12,9 +12,9 @@ import ( ) func TestReadLine(t *testing.T) { - // /etc/services file does not exist on windows and Plan 9. + // /etc/services file does not exist on android, plan9, windows. switch runtime.GOOS { - case "plan9", "windows": + case "android", "plan9", "windows": t.Skipf("skipping test on %q", runtime.GOOS) } filename := "/etc/services" // a nice big file diff --git a/libgo/go/net/port_test.go b/libgo/go/net/port_test.go index 9e8968f359c..4811ade69e0 100644 --- a/libgo/go/net/port_test.go +++ b/libgo/go/net/port_test.go @@ -5,6 +5,7 @@ package net import ( + "runtime" "testing" ) @@ -43,6 +44,11 @@ var porttests = []portTest{ } func TestLookupPort(t *testing.T) { + switch runtime.GOOS { + case "nacl": + t.Skipf("skipping test on %q", runtime.GOOS) + } + for i := 0; i < len(porttests); i++ { tt := porttests[i] if port, err := LookupPort(tt.netw, tt.name); port != tt.port || (err == nil) != tt.ok { diff --git a/libgo/go/net/port_unix.go b/libgo/go/net/port_unix.go index 89558c1f029..348c771c351 100644 --- a/libgo/go/net/port_unix.go +++ b/libgo/go/net/port_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris +// +build darwin dragonfly freebsd linux netbsd openbsd solaris // Read system port mappings from /etc/services diff --git a/libgo/go/net/rpc/client.go b/libgo/go/net/rpc/client.go index 21f79b06844..d0c4a69214e 100644 --- a/libgo/go/net/rpc/client.go +++ b/libgo/go/net/rpc/client.go @@ -41,10 +41,10 @@ type Call struct { type Client struct { codec ClientCodec - sending sync.Mutex + reqMutex sync.Mutex // protects following + request Request mutex sync.Mutex // protects following - request Request seq uint64 pending map[uint64]*Call closing bool // user has called Close @@ -69,8 +69,8 @@ type ClientCodec interface { } func (client *Client) send(call *Call) { - client.sending.Lock() - defer client.sending.Unlock() + client.reqMutex.Lock() + defer client.reqMutex.Unlock() // Register this call. client.mutex.Lock() @@ -146,7 +146,7 @@ func (client *Client) input() { } } // Terminate pending calls. - client.sending.Lock() + client.reqMutex.Lock() client.mutex.Lock() client.shutdown = true closing := client.closing @@ -162,7 +162,7 @@ func (client *Client) input() { call.done() } client.mutex.Unlock() - client.sending.Unlock() + client.reqMutex.Unlock() if debugLog && err != io.EOF && !closing { log.Println("rpc: client protocol error:", err) } diff --git a/libgo/go/net/rpc/client_test.go b/libgo/go/net/rpc/client_test.go index bbfc1ec3a3e..5dd111b299f 100644 --- a/libgo/go/net/rpc/client_test.go +++ b/libgo/go/net/rpc/client_test.go @@ -6,6 +6,10 @@ package rpc import ( "errors" + "fmt" + "net" + "runtime" + "strings" "testing" ) @@ -34,3 +38,54 @@ func TestCloseCodec(t *testing.T) { t.Error("client.Close did not close codec") } } + +// Test that errors in gob shut down the connection. Issue 7689. + +type R struct { + msg []byte // Not exported, so R does not work with gob. +} + +type S struct{} + +func (s *S) Recv(nul *struct{}, reply *R) error { + *reply = R{[]byte("foo")} + return nil +} + +func TestGobError(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("skipping test; see http://golang.org/issue/8908") + } + defer func() { + err := recover() + if err == nil { + t.Fatal("no error") + } + if !strings.Contains("reading body EOF", err.(error).Error()) { + t.Fatal("expected `reading body EOF', got", err) + } + }() + Register(new(S)) + + listen, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + panic(err) + } + go Accept(listen) + + client, err := Dial("tcp", listen.Addr().String()) + if err != nil { + panic(err) + } + + var reply Reply + err = client.Call("S.Recv", &struct{}{}, &reply) + if err != nil { + panic(err) + } + + fmt.Printf("%#v\n", reply) + client.Close() + + listen.Close() +} diff --git a/libgo/go/net/rpc/debug.go b/libgo/go/net/rpc/debug.go index 926466d6255..98b2c1c6c4a 100644 --- a/libgo/go/net/rpc/debug.go +++ b/libgo/go/net/rpc/debug.go @@ -11,9 +11,9 @@ package rpc import ( "fmt" + "html/template" "net/http" "sort" - "text/template" ) const debugText = `<html> diff --git a/libgo/go/net/rpc/server.go b/libgo/go/net/rpc/server.go index 6b264b46b8e..83728d55a18 100644 --- a/libgo/go/net/rpc/server.go +++ b/libgo/go/net/rpc/server.go @@ -395,6 +395,7 @@ type gobServerCodec struct { dec *gob.Decoder enc *gob.Encoder encBuf *bufio.Writer + closed bool } func (c *gobServerCodec) ReadRequestHeader(r *Request) error { @@ -407,15 +408,32 @@ func (c *gobServerCodec) ReadRequestBody(body interface{}) error { func (c *gobServerCodec) WriteResponse(r *Response, body interface{}) (err error) { if err = c.enc.Encode(r); err != nil { + if c.encBuf.Flush() == nil { + // Gob couldn't encode the header. Should not happen, so if it does, + // shut down the connection to signal that the connection is broken. + log.Println("rpc: gob error encoding response:", err) + c.Close() + } return } if err = c.enc.Encode(body); err != nil { + if c.encBuf.Flush() == nil { + // Was a gob problem encoding the body but the header has been written. + // Shut down the connection to signal that the connection is broken. + log.Println("rpc: gob error encoding body:", err) + c.Close() + } return } return c.encBuf.Flush() } func (c *gobServerCodec) Close() error { + if c.closed { + // Only call c.rwc.Close once; otherwise the semantics are undefined. + return nil + } + c.closed = true return c.rwc.Close() } @@ -426,7 +444,12 @@ func (c *gobServerCodec) Close() error { // connection. To use an alternate codec, use ServeCodec. func (server *Server) ServeConn(conn io.ReadWriteCloser) { buf := bufio.NewWriter(conn) - srv := &gobServerCodec{conn, gob.NewDecoder(conn), gob.NewEncoder(buf), buf} + srv := &gobServerCodec{ + rwc: conn, + dec: gob.NewDecoder(conn), + enc: gob.NewEncoder(buf), + encBuf: buf, + } server.ServeCodec(srv) } diff --git a/libgo/go/net/singleflight.go b/libgo/go/net/singleflight.go index dc58affdaac..bf599f0cc94 100644 --- a/libgo/go/net/singleflight.go +++ b/libgo/go/net/singleflight.go @@ -8,10 +8,18 @@ import "sync" // call is an in-flight or completed singleflight.Do call type call struct { - wg sync.WaitGroup - val interface{} - err error - dups int + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- singleflightResult } // singleflight represents a class of work and forms a namespace in @@ -21,6 +29,14 @@ type singleflight struct { m map[string]*call // lazily initialized } +// singleflightResult holds the results of Do, so they can be passed +// on a channel. +type singleflightResult struct { + v interface{} + err error + shared bool +} + // Do executes and returns the results of the given function, making // sure that only one execution is in-flight for a given key at a // time. If a duplicate comes in, the duplicate caller waits for the @@ -42,12 +58,52 @@ func (g *singleflight) Do(key string, fn func() (interface{}, error)) (v interfa g.m[key] = c g.mu.Unlock() + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +func (g *singleflight) DoChan(key string, fn func() (interface{}, error)) <-chan singleflightResult { + ch := make(chan singleflightResult, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- singleflightResult{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *singleflight) doCall(c *call, key string, fn func() (interface{}, error)) { c.val, c.err = fn() c.wg.Done() g.mu.Lock() delete(g.m, key) + for _, ch := range c.chans { + ch <- singleflightResult{c.val, c.err, c.dups > 0} + } g.mu.Unlock() +} - return c.val, c.err, c.dups > 0 +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *singleflight) Forget(key string) { + g.mu.Lock() + delete(g.m, key) + g.mu.Unlock() } diff --git a/libgo/go/net/smtp/smtp_test.go b/libgo/go/net/smtp/smtp_test.go index 3fba1ea5ae3..5c659e8a095 100644 --- a/libgo/go/net/smtp/smtp_test.go +++ b/libgo/go/net/smtp/smtp_test.go @@ -669,7 +669,7 @@ func sendMail(hostPort string) error { // localhostCert is a PEM-encoded TLS cert with SAN IPs // "127.0.0.1" and "[::1]", expiring at the last second of 2049 (the end // of ASN.1 time). -// generated from src/pkg/crypto/tls: +// generated from src/crypto/tls: // go run generate_cert.go --rsa-bits 512 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h var localhostCert = []byte(`-----BEGIN CERTIFICATE----- MIIBdzCCASOgAwIBAgIBADALBgkqhkiG9w0BAQUwEjEQMA4GA1UEChMHQWNtZSBD diff --git a/libgo/go/net/sock_bsd.go b/libgo/go/net/sock_bsd.go index 48fb7852757..6c37109f5e4 100644 --- a/libgo/go/net/sock_bsd.go +++ b/libgo/go/net/sock_bsd.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd nacl netbsd openbsd +// +build darwin dragonfly freebsd netbsd openbsd package net diff --git a/libgo/go/net/sock_posix.go b/libgo/go/net/sock_posix.go index c80c7d6a2f1..3f956df65a6 100644 --- a/libgo/go/net/sock_posix.go +++ b/libgo/go/net/sock_posix.go @@ -36,7 +36,7 @@ type sockaddr interface { // socket returns a network file descriptor that is ready for // asynchronous I/O using the network poller. -func socket(net string, family, sotype, proto int, ipv6only bool, laddr, raddr sockaddr, deadline time.Time, toAddr func(syscall.Sockaddr) Addr) (fd *netFD, err error) { +func socket(net string, family, sotype, proto int, ipv6only bool, laddr, raddr sockaddr, deadline time.Time) (fd *netFD, err error) { s, err := sysSocket(family, sotype, proto) if err != nil { return nil, err @@ -75,27 +75,51 @@ func socket(net string, family, sotype, proto int, ipv6only bool, laddr, raddr s if laddr != nil && raddr == nil { switch sotype { case syscall.SOCK_STREAM, syscall.SOCK_SEQPACKET: - if err := fd.listenStream(laddr, listenerBacklog, toAddr); err != nil { + if err := fd.listenStream(laddr, listenerBacklog); err != nil { fd.Close() return nil, err } return fd, nil case syscall.SOCK_DGRAM: - if err := fd.listenDatagram(laddr, toAddr); err != nil { + if err := fd.listenDatagram(laddr); err != nil { fd.Close() return nil, err } return fd, nil } } - if err := fd.dial(laddr, raddr, deadline, toAddr); err != nil { + if err := fd.dial(laddr, raddr, deadline); err != nil { fd.Close() return nil, err } return fd, nil } -func (fd *netFD) dial(laddr, raddr sockaddr, deadline time.Time, toAddr func(syscall.Sockaddr) Addr) error { +func (fd *netFD) addrFunc() func(syscall.Sockaddr) Addr { + switch fd.family { + case syscall.AF_INET, syscall.AF_INET6: + switch fd.sotype { + case syscall.SOCK_STREAM: + return sockaddrToTCP + case syscall.SOCK_DGRAM: + return sockaddrToUDP + case syscall.SOCK_RAW: + return sockaddrToIP + } + case syscall.AF_UNIX: + switch fd.sotype { + case syscall.SOCK_STREAM: + return sockaddrToUnix + case syscall.SOCK_DGRAM: + return sockaddrToUnixgram + case syscall.SOCK_SEQPACKET: + return sockaddrToUnixpacket + } + } + return func(syscall.Sockaddr) Addr { return nil } +} + +func (fd *netFD) dial(laddr, raddr sockaddr, deadline time.Time) error { var err error var lsa syscall.Sockaddr if laddr != nil { @@ -123,14 +147,14 @@ func (fd *netFD) dial(laddr, raddr sockaddr, deadline time.Time, toAddr func(sys } lsa, _ = syscall.Getsockname(fd.sysfd) if rsa, _ = syscall.Getpeername(fd.sysfd); rsa != nil { - fd.setAddr(toAddr(lsa), toAddr(rsa)) + fd.setAddr(fd.addrFunc()(lsa), fd.addrFunc()(rsa)) } else { - fd.setAddr(toAddr(lsa), raddr) + fd.setAddr(fd.addrFunc()(lsa), raddr) } return nil } -func (fd *netFD) listenStream(laddr sockaddr, backlog int, toAddr func(syscall.Sockaddr) Addr) error { +func (fd *netFD) listenStream(laddr sockaddr, backlog int) error { if err := setDefaultListenerSockopts(fd.sysfd); err != nil { return err } @@ -148,11 +172,11 @@ func (fd *netFD) listenStream(laddr sockaddr, backlog int, toAddr func(syscall.S return err } lsa, _ := syscall.Getsockname(fd.sysfd) - fd.setAddr(toAddr(lsa), nil) + fd.setAddr(fd.addrFunc()(lsa), nil) return nil } -func (fd *netFD) listenDatagram(laddr sockaddr, toAddr func(syscall.Sockaddr) Addr) error { +func (fd *netFD) listenDatagram(laddr sockaddr) error { switch addr := laddr.(type) { case *UDPAddr: // We provide a socket that listens to a wildcard @@ -187,6 +211,6 @@ func (fd *netFD) listenDatagram(laddr sockaddr, toAddr func(syscall.Sockaddr) Ad return err } lsa, _ := syscall.Getsockname(fd.sysfd) - fd.setAddr(toAddr(lsa), nil) + fd.setAddr(fd.addrFunc()(lsa), nil) return nil } diff --git a/libgo/go/net/sock_solaris.go b/libgo/go/net/sock_stub.go index 90fe9de894c..ed6b0894893 100644 --- a/libgo/go/net/sock_solaris.go +++ b/libgo/go/net/sock_stub.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build nacl solaris + package net import "syscall" diff --git a/libgo/go/net/sockopt_bsd.go b/libgo/go/net/sockopt_bsd.go index 2fa3b6f1d36..d5b3621c526 100644 --- a/libgo/go/net/sockopt_bsd.go +++ b/libgo/go/net/sockopt_bsd.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd nacl netbsd openbsd +// +build darwin dragonfly freebsd netbsd openbsd package net @@ -17,7 +17,7 @@ func setDefaultSockopts(s, family, sotype int, ipv6only bool) error { // On DragonFly BSD, we adjust the ephemeral port // range because unlike other BSD systems its default // port range doesn't conform to IANA recommendation - // as described in RFC 6355 and is pretty narrow. + // as described in RFC 6056 and is pretty narrow. switch family { case syscall.AF_INET: syscall.SetsockoptInt(s, syscall.IPPROTO_IP, syscall.IP_PORTRANGE, syscall.IP_PORTRANGE_HIGH) diff --git a/libgo/go/net/sockopt_posix.go b/libgo/go/net/sockopt_posix.go index 921918c37f5..1654d1b85e4 100644 --- a/libgo/go/net/sockopt_posix.go +++ b/libgo/go/net/sockopt_posix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows package net diff --git a/libgo/go/net/sockopt_stub.go b/libgo/go/net/sockopt_stub.go new file mode 100644 index 00000000000..de5ee0bb63c --- /dev/null +++ b/libgo/go/net/sockopt_stub.go @@ -0,0 +1,37 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl + +package net + +import "syscall" + +func setDefaultSockopts(s, family, sotype int, ipv6only bool) error { + return nil +} + +func setDefaultListenerSockopts(s int) error { + return nil +} + +func setDefaultMulticastSockopts(s int) error { + return nil +} + +func setReadBuffer(fd *netFD, bytes int) error { + return syscall.ENOPROTOOPT +} + +func setWriteBuffer(fd *netFD, bytes int) error { + return syscall.ENOPROTOOPT +} + +func setKeepAlive(fd *netFD, keepalive bool) error { + return syscall.ENOPROTOOPT +} + +func setLinger(fd *netFD, sec int) error { + return syscall.ENOPROTOOPT +} diff --git a/libgo/go/net/sockoptip_bsd.go b/libgo/go/net/sockoptip_bsd.go index 87132f0f461..2199e480d42 100644 --- a/libgo/go/net/sockoptip_bsd.go +++ b/libgo/go/net/sockoptip_bsd.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd nacl netbsd openbsd +// +build darwin dragonfly freebsd netbsd openbsd package net diff --git a/libgo/go/net/sockoptip_posix.go b/libgo/go/net/sockoptip_posix.go index b5c80e44909..c2579be9114 100644 --- a/libgo/go/net/sockoptip_posix.go +++ b/libgo/go/net/sockoptip_posix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux nacl netbsd openbsd windows +// +build darwin dragonfly freebsd linux netbsd openbsd windows package net diff --git a/libgo/go/net/sockoptip_stub.go b/libgo/go/net/sockoptip_stub.go index dcd3a22b57d..32ec5ddb859 100644 --- a/libgo/go/net/sockoptip_stub.go +++ b/libgo/go/net/sockoptip_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build solaris +// +build nacl solaris package net @@ -10,30 +10,30 @@ import "syscall" func setIPv4MulticastInterface(fd *netFD, ifi *Interface) error { // See golang.org/issue/7399. - return syscall.EINVAL + return syscall.ENOPROTOOPT } func setIPv4MulticastLoopback(fd *netFD, v bool) error { // See golang.org/issue/7399. - return syscall.EINVAL + return syscall.ENOPROTOOPT } func joinIPv4Group(fd *netFD, ifi *Interface, ip IP) error { // See golang.org/issue/7399. - return syscall.EINVAL + return syscall.ENOPROTOOPT } func setIPv6MulticastInterface(fd *netFD, ifi *Interface) error { // See golang.org/issue/7399. - return syscall.EINVAL + return syscall.ENOPROTOOPT } func setIPv6MulticastLoopback(fd *netFD, v bool) error { // See golang.org/issue/7399. - return syscall.EINVAL + return syscall.ENOPROTOOPT } func joinIPv6Group(fd *netFD, ifi *Interface, ip IP) error { // See golang.org/issue/7399. - return syscall.EINVAL + return syscall.ENOPROTOOPT } diff --git a/libgo/go/net/tcpsock_posix.go b/libgo/go/net/tcpsock_posix.go index b79b115ca5b..dd78aefa773 100644 --- a/libgo/go/net/tcpsock_posix.go +++ b/libgo/go/net/tcpsock_posix.go @@ -153,7 +153,7 @@ func DialTCP(net string, laddr, raddr *TCPAddr) (*TCPConn, error) { } func dialTCP(net string, laddr, raddr *TCPAddr, deadline time.Time) (*TCPConn, error) { - fd, err := internetSocket(net, laddr, raddr, deadline, syscall.SOCK_STREAM, 0, "dial", sockaddrToTCP) + fd, err := internetSocket(net, laddr, raddr, deadline, syscall.SOCK_STREAM, 0, "dial") // TCP has a rarely used mechanism called a 'simultaneous connection' in // which Dial("tcp", addr1, addr2) run on the machine at addr1 can @@ -183,7 +183,7 @@ func dialTCP(net string, laddr, raddr *TCPAddr, deadline time.Time) (*TCPConn, e if err == nil { fd.Close() } - fd, err = internetSocket(net, laddr, raddr, deadline, syscall.SOCK_STREAM, 0, "dial", sockaddrToTCP) + fd, err = internetSocket(net, laddr, raddr, deadline, syscall.SOCK_STREAM, 0, "dial") } if err != nil { @@ -231,7 +231,7 @@ func (l *TCPListener) AcceptTCP() (*TCPConn, error) { if l == nil || l.fd == nil { return nil, syscall.EINVAL } - fd, err := l.fd.accept(sockaddrToTCP) + fd, err := l.fd.accept() if err != nil { return nil, err } @@ -291,7 +291,7 @@ func ListenTCP(net string, laddr *TCPAddr) (*TCPListener, error) { if laddr == nil { laddr = &TCPAddr{} } - fd, err := internetSocket(net, laddr, nil, noDeadline, syscall.SOCK_STREAM, 0, "listen", sockaddrToTCP) + fd, err := internetSocket(net, laddr, nil, noDeadline, syscall.SOCK_STREAM, 0, "listen") if err != nil { return nil, &OpError{Op: "listen", Net: net, Addr: laddr, Err: err} } diff --git a/libgo/go/net/tcpsockopt_darwin.go b/libgo/go/net/tcpsockopt_darwin.go index 33140849c95..1f1609088ba 100644 --- a/libgo/go/net/tcpsockopt_darwin.go +++ b/libgo/go/net/tcpsockopt_darwin.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// TCP socket options for darwin - package net import ( @@ -12,16 +10,20 @@ import ( "time" ) -// Set keep alive period. +const sysTCP_KEEPINTVL = 0x101 + func setKeepAlivePeriod(fd *netFD, d time.Duration) error { if err := fd.incref(); err != nil { return err } defer fd.decref() - // The kernel expects seconds so round to next highest second. d += (time.Second - time.Nanosecond) secs := int(d.Seconds()) - + switch err := syscall.SetsockoptInt(fd.sysfd, syscall.IPPROTO_TCP, sysTCP_KEEPINTVL, secs); err { + case nil, syscall.ENOPROTOOPT: // OS X 10.7 and earlier don't support this option + default: + return os.NewSyscallError("setsockopt", err) + } return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd.sysfd, syscall.IPPROTO_TCP, syscall.TCP_KEEPALIVE, secs)) } diff --git a/libgo/go/net/tcpsockopt_dragonfly.go b/libgo/go/net/tcpsockopt_dragonfly.go index d10a77773d8..0aa213239d1 100644 --- a/libgo/go/net/tcpsockopt_dragonfly.go +++ b/libgo/go/net/tcpsockopt_dragonfly.go @@ -10,20 +10,17 @@ import ( "time" ) -// Set keep alive period. func setKeepAlivePeriod(fd *netFD, d time.Duration) error { if err := fd.incref(); err != nil { return err } defer fd.decref() - - // The kernel expects milliseconds so round to next highest millisecond. + // The kernel expects milliseconds so round to next highest + // millisecond. d += (time.Millisecond - time.Nanosecond) - msecs := int(time.Duration(d.Nanoseconds()) / time.Millisecond) - - err := os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd.sysfd, syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL, msecs)) - if err != nil { - return err + msecs := int(d / time.Millisecond) + if err := syscall.SetsockoptInt(fd.sysfd, syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL, msecs); err != nil { + return os.NewSyscallError("setsockopt", err) } return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd.sysfd, syscall.IPPROTO_TCP, syscall.TCP_KEEPIDLE, msecs)) } diff --git a/libgo/go/net/tcpsockopt_openbsd.go b/libgo/go/net/tcpsockopt_openbsd.go index 3480f932c80..041e1786a92 100644 --- a/libgo/go/net/tcpsockopt_openbsd.go +++ b/libgo/go/net/tcpsockopt_openbsd.go @@ -2,26 +2,15 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// TCP socket options for openbsd - package net import ( - "os" "syscall" "time" ) -// Set keep alive period. func setKeepAlivePeriod(fd *netFD, d time.Duration) error { - if err := fd.incref(); err != nil { - return err - } - defer fd.decref() - - // The kernel expects seconds so round to next highest second. - d += (time.Second - time.Nanosecond) - secs := int(d.Seconds()) - - return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd.sysfd, syscall.IPPROTO_TCP, syscall.SO_KEEPALIVE, secs)) + // OpenBSD has no user-settable per-socket TCP keepalive + // options. + return syscall.ENOPROTOOPT } diff --git a/libgo/go/net/tcpsockopt_posix.go b/libgo/go/net/tcpsockopt_posix.go index 6484bad4b45..0abf3f97f6b 100644 --- a/libgo/go/net/tcpsockopt_posix.go +++ b/libgo/go/net/tcpsockopt_posix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows package net diff --git a/libgo/go/net/tcpsockopt_solaris.go b/libgo/go/net/tcpsockopt_solaris.go deleted file mode 100644 index eaab6b6787b..00000000000 --- a/libgo/go/net/tcpsockopt_solaris.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// TCP socket options for solaris - -package net - -import ( - "os" - "syscall" - "time" -) - -// Set keep alive period. -func setKeepAlivePeriod(fd *netFD, d time.Duration) error { - if err := fd.incref(); err != nil { - return err - } - defer fd.decref() - - // The kernel expects seconds so round to next highest second. - d += (time.Second - time.Nanosecond) - secs := int(d.Seconds()) - - return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd.sysfd, syscall.IPPROTO_TCP, syscall.SO_KEEPALIVE, secs)) -} diff --git a/libgo/go/net/tcpsockopt_stub.go b/libgo/go/net/tcpsockopt_stub.go new file mode 100644 index 00000000000..b413a764d82 --- /dev/null +++ b/libgo/go/net/tcpsockopt_stub.go @@ -0,0 +1,20 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl + +package net + +import ( + "syscall" + "time" +) + +func setNoDelay(fd *netFD, noDelay bool) error { + return syscall.ENOPROTOOPT +} + +func setKeepAlivePeriod(fd *netFD, d time.Duration) error { + return syscall.ENOPROTOOPT +} diff --git a/libgo/go/net/tcpsockopt_unix.go b/libgo/go/net/tcpsockopt_unix.go index 2693a541d20..c9f604cad7b 100644 --- a/libgo/go/net/tcpsockopt_unix.go +++ b/libgo/go/net/tcpsockopt_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build freebsd linux nacl netbsd +// +build freebsd linux netbsd solaris package net @@ -12,20 +12,16 @@ import ( "time" ) -// Set keep alive period. func setKeepAlivePeriod(fd *netFD, d time.Duration) error { if err := fd.incref(); err != nil { return err } defer fd.decref() - // The kernel expects seconds so round to next highest second. d += (time.Second - time.Nanosecond) secs := int(d.Seconds()) - - err := os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd.sysfd, syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL, secs)) - if err != nil { - return err + if err := syscall.SetsockoptInt(fd.sysfd, syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL, secs); err != nil { + return os.NewSyscallError("setsockopt", err) } return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd.sysfd, syscall.IPPROTO_TCP, syscall.TCP_KEEPIDLE, secs)) } diff --git a/libgo/go/net/tcpsockopt_windows.go b/libgo/go/net/tcpsockopt_windows.go index 8ef1407977f..091f5233f20 100644 --- a/libgo/go/net/tcpsockopt_windows.go +++ b/libgo/go/net/tcpsockopt_windows.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// TCP socket options for windows - package net import ( @@ -18,14 +16,14 @@ func setKeepAlivePeriod(fd *netFD, d time.Duration) error { return err } defer fd.decref() - - // Windows expects milliseconds so round to next highest millisecond. + // The kernel expects milliseconds so round to next highest + // millisecond. d += (time.Millisecond - time.Nanosecond) - millis := uint32(d / time.Millisecond) + msecs := uint32(d / time.Millisecond) ka := syscall.TCPKeepalive{ OnOff: 1, - Time: millis, - Interval: millis, + Time: msecs, + Interval: msecs, } ret := uint32(0) size := uint32(unsafe.Sizeof(ka)) diff --git a/libgo/go/net/testdata/domain-resolv.conf b/libgo/go/net/testdata/domain-resolv.conf new file mode 100644 index 00000000000..ff269180f43 --- /dev/null +++ b/libgo/go/net/testdata/domain-resolv.conf @@ -0,0 +1,5 @@ +# /etc/resolv.conf + +search test invalid +domain localdomain +nameserver 8.8.8.8 diff --git a/libgo/go/net/testdata/empty-resolv.conf b/libgo/go/net/testdata/empty-resolv.conf new file mode 100644 index 00000000000..c4b2b576549 --- /dev/null +++ b/libgo/go/net/testdata/empty-resolv.conf @@ -0,0 +1 @@ +# /etc/resolv.conf diff --git a/libgo/go/net/testdata/resolv.conf b/libgo/go/net/testdata/resolv.conf index 3841bbf9044..04e87eed03f 100644 --- a/libgo/go/net/testdata/resolv.conf +++ b/libgo/go/net/testdata/resolv.conf @@ -1,6 +1,8 @@ # /etc/resolv.conf -domain Home -nameserver 192.168.1.1 +domain localdomain +nameserver 8.8.8.8 +nameserver 2001:4860:4860::8888 +nameserver fe80::1%lo0 options ndots:5 timeout:10 attempts:3 rotate options attempts 3 diff --git a/libgo/go/net/testdata/search-resolv.conf b/libgo/go/net/testdata/search-resolv.conf new file mode 100644 index 00000000000..1c846bfaffc --- /dev/null +++ b/libgo/go/net/testdata/search-resolv.conf @@ -0,0 +1,5 @@ +# /etc/resolv.conf + +domain localdomain +search test invalid +nameserver 8.8.8.8 diff --git a/libgo/go/net/udp_test.go b/libgo/go/net/udp_test.go index e1778779cf5..125bbca6c40 100644 --- a/libgo/go/net/udp_test.go +++ b/libgo/go/net/udp_test.go @@ -9,6 +9,7 @@ import ( "runtime" "strings" "testing" + "time" ) func TestResolveUDPAddr(t *testing.T) { @@ -34,6 +35,46 @@ func TestResolveUDPAddr(t *testing.T) { } } +func TestReadFromUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9": + t.Skipf("skipping test on %q, see issue 8916", runtime.GOOS) + } + + ra, err := ResolveUDPAddr("udp", "127.0.0.1:7") + if err != nil { + t.Fatal(err) + } + + la, err := ResolveUDPAddr("udp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + + c, err := ListenUDP("udp", la) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + _, err = c.WriteToUDP([]byte("a"), ra) + if err != nil { + t.Fatal(err) + } + + err = c.SetDeadline(time.Now().Add(100 * time.Millisecond)) + if err != nil { + t.Fatal(err) + } + b := make([]byte, 1) + _, _, err = c.ReadFromUDP(b) + if err == nil { + t.Fatal("ReadFromUDP should fail") + } else if !isTimeout(err) { + t.Fatal(err) + } +} + func TestWriteToUDP(t *testing.T) { switch runtime.GOOS { case "plan9": diff --git a/libgo/go/net/udpsock_posix.go b/libgo/go/net/udpsock_posix.go index 5dfba94e9a6..a0533366a42 100644 --- a/libgo/go/net/udpsock_posix.go +++ b/libgo/go/net/udpsock_posix.go @@ -175,7 +175,7 @@ func DialUDP(net string, laddr, raddr *UDPAddr) (*UDPConn, error) { } func dialUDP(net string, laddr, raddr *UDPAddr, deadline time.Time) (*UDPConn, error) { - fd, err := internetSocket(net, laddr, raddr, deadline, syscall.SOCK_DGRAM, 0, "dial", sockaddrToUDP) + fd, err := internetSocket(net, laddr, raddr, deadline, syscall.SOCK_DGRAM, 0, "dial") if err != nil { return nil, &OpError{Op: "dial", Net: net, Addr: raddr, Err: err} } @@ -198,7 +198,7 @@ func ListenUDP(net string, laddr *UDPAddr) (*UDPConn, error) { if laddr == nil { laddr = &UDPAddr{} } - fd, err := internetSocket(net, laddr, nil, noDeadline, syscall.SOCK_DGRAM, 0, "listen", sockaddrToUDP) + fd, err := internetSocket(net, laddr, nil, noDeadline, syscall.SOCK_DGRAM, 0, "listen") if err != nil { return nil, &OpError{Op: "listen", Net: net, Addr: laddr, Err: err} } @@ -218,7 +218,7 @@ func ListenMulticastUDP(net string, ifi *Interface, gaddr *UDPAddr) (*UDPConn, e if gaddr == nil || gaddr.IP == nil { return nil, &OpError{Op: "listen", Net: net, Addr: nil, Err: errMissingAddress} } - fd, err := internetSocket(net, gaddr, nil, noDeadline, syscall.SOCK_DGRAM, 0, "listen", sockaddrToUDP) + fd, err := internetSocket(net, gaddr, nil, noDeadline, syscall.SOCK_DGRAM, 0, "listen") if err != nil { return nil, &OpError{Op: "listen", Net: net, Addr: gaddr, Err: err} } diff --git a/libgo/go/net/unicast_posix_test.go b/libgo/go/net/unicast_posix_test.go index 452ac925428..ab7ef40a758 100644 --- a/libgo/go/net/unicast_posix_test.go +++ b/libgo/go/net/unicast_posix_test.go @@ -204,6 +204,9 @@ func TestDualStackTCPListener(t *testing.T) { // to a test listener with various address families, differnet // listening address and same port. func TestDualStackUDPListener(t *testing.T) { + if testing.Short() { + t.Skip("skipping in -short mode, see issue 5001") + } switch runtime.GOOS { case "plan9": t.Skipf("skipping test on %q", runtime.GOOS) diff --git a/libgo/go/net/unix_test.go b/libgo/go/net/unix_test.go index 05643ddf9ae..1cdff3908c1 100644 --- a/libgo/go/net/unix_test.go +++ b/libgo/go/net/unix_test.go @@ -256,8 +256,11 @@ func TestUnixConnLocalAndRemoteNames(t *testing.T) { t.Fatalf("UnixConn.Write failed: %v", err) } - if runtime.GOOS == "linux" && laddr == "" { - laddr = "@" // autobind feature + switch runtime.GOOS { + case "android", "linux": + if laddr == "" { + laddr = "@" // autobind feature + } } var connAddrs = [3]struct{ got, want Addr }{ {ln.Addr(), ta}, @@ -308,9 +311,13 @@ func TestUnixgramConnLocalAndRemoteNames(t *testing.T) { } }() - if runtime.GOOS == "linux" && laddr == "" { - laddr = "@" // autobind feature + switch runtime.GOOS { + case "android", "linux": + if laddr == "" { + laddr = "@" // autobind feature + } } + var connAddrs = [4]struct{ got, want Addr }{ {c1.LocalAddr(), ta}, {c1.RemoteAddr(), nil}, diff --git a/libgo/go/net/unixsock_posix.go b/libgo/go/net/unixsock_posix.go index 2610779bfd2..3c2e78bdca3 100644 --- a/libgo/go/net/unixsock_posix.go +++ b/libgo/go/net/unixsock_posix.go @@ -42,14 +42,7 @@ func unixSocket(net string, laddr, raddr sockaddr, mode string, deadline time.Ti return nil, errors.New("unknown mode: " + mode) } - f := sockaddrToUnix - if sotype == syscall.SOCK_DGRAM { - f = sockaddrToUnixgram - } else if sotype == syscall.SOCK_SEQPACKET { - f = sockaddrToUnixpacket - } - - fd, err := socket(net, syscall.AF_UNIX, sotype, 0, false, laddr, raddr, deadline, f) + fd, err := socket(net, syscall.AF_UNIX, sotype, 0, false, laddr, raddr, deadline) if err != nil { return nil, err } @@ -286,11 +279,7 @@ func (l *UnixListener) AcceptUnix() (*UnixConn, error) { if l == nil || l.fd == nil { return nil, syscall.EINVAL } - toAddr := sockaddrToUnix - if l.fd.sotype == syscall.SOCK_SEQPACKET { - toAddr = sockaddrToUnixpacket - } - fd, err := l.fd.accept(toAddr) + fd, err := l.fd.accept() if err != nil { return nil, err } diff --git a/libgo/go/net/url/url.go b/libgo/go/net/url/url.go index 75f650a2756..f167408faba 100644 --- a/libgo/go/net/url/url.go +++ b/libgo/go/net/url/url.go @@ -64,7 +64,6 @@ func (e EscapeError) Error() string { // Return true if the specified character should be escaped when // appearing in a URL string, according to RFC 3986. -// When 'all' is true the full range of reserved characters are matched. func shouldEscape(c byte, mode encoding) bool { // §2.3 Unreserved characters (alphanum) if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' { @@ -86,10 +85,12 @@ func shouldEscape(c byte, mode encoding) bool { // last two as well. That leaves only ? to escape. return c == '?' - case encodeUserPassword: // §3.2.2 - // The RFC allows ; : & = + $ , in userinfo, so we must escape only @ and /. - // The parsing of userinfo treats : as special so we must escape that too. - return c == '@' || c == '/' || c == ':' + case encodeUserPassword: // §3.2.1 + // The RFC allows ';', ':', '&', '=', '+', '$', and ',' in + // userinfo, so we must escape only '@', '/', and '?'. + // The parsing of userinfo treats ':' as special so we must escape + // that too. + return c == '@' || c == '/' || c == '?' || c == ':' case encodeQueryComponent: // §3.4 // The RFC reserves (so we must escape) everything. @@ -440,6 +441,24 @@ func parseAuthority(authority string) (user *Userinfo, host string, err error) { } // String reassembles the URL into a valid URL string. +// The general form of the result is one of: +// +// scheme:opaque +// scheme://userinfo@host/path?query#fragment +// +// If u.Opaque is non-empty, String uses the first form; +// otherwise it uses the second form. +// +// In the second form, the following rules apply: +// - if u.Scheme is empty, scheme: is omitted. +// - if u.User is nil, userinfo@ is omitted. +// - if u.Host is empty, host/ is omitted. +// - if u.Scheme and u.Host are empty and u.User is nil, +// the entire scheme://userinfo@host/ is omitted. +// - if u.Host is non-empty and u.Path begins with a /, +// the form host/path does not add its own /. +// - if u.RawQuery is empty, ?query is omitted. +// - if u.Fragment is empty, #fragment is omitted. func (u *URL) String() string { var buf bytes.Buffer if u.Scheme != "" { diff --git a/libgo/go/net/url/url_test.go b/libgo/go/net/url/url_test.go index cad758f2385..d8b19d805d0 100644 --- a/libgo/go/net/url/url_test.go +++ b/libgo/go/net/url/url_test.go @@ -279,6 +279,16 @@ var urltests = []URLTest{ }, "a/b/c", }, + // escaped '?' in username and password + { + "http://%3Fam:pa%3Fsword@google.com", + &URL{ + Scheme: "http", + User: UserPassword("?am", "pa?sword"), + Host: "google.com", + }, + "", + }, } // more useful string for debugging than fmt's struct printer @@ -903,3 +913,49 @@ func TestParseFailure(t *testing.T) { t.Errorf(`ParseQuery(%q) returned error %q, want something containing %q"`, url, errStr, "%gh") } } + +type shouldEscapeTest struct { + in byte + mode encoding + escape bool +} + +var shouldEscapeTests = []shouldEscapeTest{ + // Unreserved characters (§2.3) + {'a', encodePath, false}, + {'a', encodeUserPassword, false}, + {'a', encodeQueryComponent, false}, + {'a', encodeFragment, false}, + {'z', encodePath, false}, + {'A', encodePath, false}, + {'Z', encodePath, false}, + {'0', encodePath, false}, + {'9', encodePath, false}, + {'-', encodePath, false}, + {'-', encodeUserPassword, false}, + {'-', encodeQueryComponent, false}, + {'-', encodeFragment, false}, + {'.', encodePath, false}, + {'_', encodePath, false}, + {'~', encodePath, false}, + + // User information (§3.2.1) + {':', encodeUserPassword, true}, + {'/', encodeUserPassword, true}, + {'?', encodeUserPassword, true}, + {'@', encodeUserPassword, true}, + {'$', encodeUserPassword, false}, + {'&', encodeUserPassword, false}, + {'+', encodeUserPassword, false}, + {',', encodeUserPassword, false}, + {';', encodeUserPassword, false}, + {'=', encodeUserPassword, false}, +} + +func TestShouldEscape(t *testing.T) { + for _, tt := range shouldEscapeTests { + if shouldEscape(tt.in, tt.mode) != tt.escape { + t.Errorf("shouldEscape(%q, %v) returned %v; expected %v", tt.in, tt.mode, !tt.escape, tt.escape) + } + } +} diff --git a/libgo/go/net/z_last_test.go b/libgo/go/net/z_last_test.go index 4f6a54a560a..716c103db26 100644 --- a/libgo/go/net/z_last_test.go +++ b/libgo/go/net/z_last_test.go @@ -8,6 +8,7 @@ import ( "flag" "fmt" "testing" + "time" ) var testDNSFlood = flag.Bool("dnsflood", false, "whether to test dns query flooding") @@ -35,3 +36,64 @@ func TestDNSThreadLimit(t *testing.T) { // If we're still here, it worked. } + +func TestLookupIPDeadline(t *testing.T) { + if !*testDNSFlood { + t.Skip("test disabled; use -dnsflood to enable") + } + + const N = 5000 + const timeout = 3 * time.Second + c := make(chan error, 2*N) + for i := 0; i < N; i++ { + name := fmt.Sprintf("%d.net-test.golang.org", i) + go func() { + _, err := lookupIPDeadline(name, time.Now().Add(timeout/2)) + c <- err + }() + go func() { + _, err := lookupIPDeadline(name, time.Now().Add(timeout)) + c <- err + }() + } + qstats := struct { + succeeded, failed int + timeout, temporary, other int + unknown int + }{} + deadline := time.After(timeout + time.Second) + for i := 0; i < 2*N; i++ { + select { + case <-deadline: + t.Fatal("deadline exceeded") + case err := <-c: + switch err := err.(type) { + case nil: + qstats.succeeded++ + case Error: + qstats.failed++ + if err.Timeout() { + qstats.timeout++ + } + if err.Temporary() { + qstats.temporary++ + } + if !err.Timeout() && !err.Temporary() { + qstats.other++ + } + default: + qstats.failed++ + qstats.unknown++ + } + } + } + + // A high volume of DNS queries for sub-domain of golang.org + // would be coordinated by authoritative or recursive server, + // or stub resolver which implements query-response rate + // limitation, so we can expect some query successes and more + // failures including timeout, temporary and other here. + // As a rule, unknown must not be shown but it might possibly + // happen due to issue 4856 for now. + t.Logf("%v succeeded, %v failed (%v timeout, %v temporary, %v other, %v unknown)", qstats.succeeded, qstats.failed, qstats.timeout, qstats.temporary, qstats.other, qstats.unknown) +} diff --git a/libgo/go/os/dir_unix.go b/libgo/go/os/dir_unix.go index d353e405e54..589db852740 100644 --- a/libgo/go/os/dir_unix.go +++ b/libgo/go/os/dir_unix.go @@ -36,7 +36,7 @@ func (f *File) readdirnames(n int) (names []string, err error) { if d.bufp >= d.nbuf { d.bufp = 0 var errno error - d.nbuf, errno = syscall.ReadDirent(f.fd, d.buf) + d.nbuf, errno = fixCount(syscall.ReadDirent(f.fd, d.buf)) if errno != nil { return names, NewSyscallError("readdirent", errno) } diff --git a/libgo/go/os/env.go b/libgo/go/os/env.go index db7fc72b8a4..d0494a47634 100644 --- a/libgo/go/os/env.go +++ b/libgo/go/os/env.go @@ -91,6 +91,11 @@ func Setenv(key, value string) error { return nil } +// Unsetenv unsets a single environment variable. +func Unsetenv(key string) error { + return syscall.Unsetenv(key) +} + // Clearenv deletes all environment variables. func Clearenv() { syscall.Clearenv() diff --git a/libgo/go/os/env_test.go b/libgo/go/os/env_test.go index 991fa4d0578..e6180675137 100644 --- a/libgo/go/os/env_test.go +++ b/libgo/go/os/env_test.go @@ -7,6 +7,7 @@ package os_test import ( . "os" "reflect" + "strings" "testing" ) @@ -68,3 +69,28 @@ func TestConsistentEnviron(t *testing.T) { } } } + +func TestUnsetenv(t *testing.T) { + const testKey = "GO_TEST_UNSETENV" + set := func() bool { + prefix := testKey + "=" + for _, key := range Environ() { + if strings.HasPrefix(key, prefix) { + return true + } + } + return false + } + if err := Setenv(testKey, "1"); err != nil { + t.Fatalf("Setenv: %v", err) + } + if !set() { + t.Error("Setenv didn't set TestUnsetenv") + } + if err := Unsetenv(testKey); err != nil { + t.Fatalf("Unsetenv: %v", err) + } + if set() { + t.Fatal("Unsetenv didn't clear TestUnsetenv") + } +} diff --git a/libgo/go/os/error_plan9.go b/libgo/go/os/error_plan9.go index 85260c82aea..001cdfcf2e3 100644 --- a/libgo/go/os/error_plan9.go +++ b/libgo/go/os/error_plan9.go @@ -25,7 +25,8 @@ func isNotExist(err error) bool { case *LinkError: err = pe.Err } - return contains(err.Error(), "does not exist") + return contains(err.Error(), "does not exist") || contains(err.Error(), "not found") || + contains(err.Error(), "has been removed") || contains(err.Error(), "no parent") } func isPermission(err error) bool { diff --git a/libgo/go/os/exec/exec.go b/libgo/go/os/exec/exec.go index a70ed0d20cb..72b4905d560 100644 --- a/libgo/go/os/exec/exec.go +++ b/libgo/go/os/exec/exec.go @@ -55,8 +55,15 @@ type Cmd struct { // calling process's current directory. Dir string - // Stdin specifies the process's standard input. If Stdin is - // nil, the process reads from the null device (os.DevNull). + // Stdin specifies the process's standard input. + // If Stdin is nil, the process reads from the null device (os.DevNull). + // If Stdin is an *os.File, the process's standard input is connected + // directly to that file. + // Otherwise, during the execution of the command a separate + // goroutine reads from Stdin and delivers that data to the command + // over a pipe. In this case, Wait does not complete until the goroutine + // stops copying, either because it has reached the end of Stdin + // (EOF or a read error) or because writing to the pipe returned an error. Stdin io.Reader // Stdout and Stderr specify the process's standard output and error. @@ -358,7 +365,7 @@ func (c *Cmd) Wait() error { c.ProcessState = state var copyError error - for _ = range c.goroutine { + for range c.goroutine { if err := <-c.errch; err != nil && copyError == nil { copyError = err } diff --git a/libgo/go/os/exec/exec_test.go b/libgo/go/os/exec/exec_test.go index 8521bfda3f9..f9ffde602ca 100644 --- a/libgo/go/os/exec/exec_test.go +++ b/libgo/go/os/exec/exec_test.go @@ -250,7 +250,7 @@ func TestPipeLookPathLeak(t *testing.T) { } func numOpenFDS(t *testing.T) (n int, lsof []byte) { - lsof, err := exec.Command("lsof", "-n", "-p", strconv.Itoa(os.Getpid())).Output() + lsof, err := exec.Command("lsof", "-b", "-n", "-p", strconv.Itoa(os.Getpid())).Output() if err != nil { t.Skip("skipping test; error finding or running lsof") } @@ -262,15 +262,7 @@ var testedAlreadyLeaked = false // basefds returns the number of expected file descriptors // to be present in a process at start. func basefds() uintptr { - n := os.Stderr.Fd() + 1 - - // Go runtime for 32-bit Plan 9 requires that /dev/bintime - // be kept open. - // See ../../runtime/time_plan9_386.c:/^runtime·nanotime - if runtime.GOOS == "plan9" && runtime.GOARCH == "386" { - n++ - } - return n + return os.Stderr.Fd() + 1 } func closeUnexpectedFds(t *testing.T, m string) { @@ -387,8 +379,9 @@ func TestExtraFilesFDShuffle(t *testing.T) { } func TestExtraFiles(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("no operating system support; skipping") + switch runtime.GOOS { + case "nacl", "windows": + t.Skipf("skipping test on %q", runtime.GOOS) } // Ensure that file descriptors have not already been leaked into diff --git a/libgo/go/os/exec_unix.go b/libgo/go/os/exec_unix.go index 1b1e3350b84..ed97f85e22f 100644 --- a/libgo/go/os/exec_unix.go +++ b/libgo/go/os/exec_unix.go @@ -34,18 +34,26 @@ func (p *Process) wait() (ps *ProcessState, err error) { return ps, nil } +var errFinished = errors.New("os: process already finished") + func (p *Process) signal(sig Signal) error { - if p.done() { - return errors.New("os: process already finished") - } if p.Pid == -1 { return errors.New("os: process already released") } + if p.Pid == 0 { + return errors.New("os: process not initialized") + } + if p.done() { + return errFinished + } s, ok := sig.(syscall.Signal) if !ok { return errors.New("os: unsupported signal type") } if e := syscall.Kill(p.Pid, s); e != nil { + if e == syscall.ESRCH { + return errFinished + } return e } return nil diff --git a/libgo/go/os/exec_windows.go b/libgo/go/os/exec_windows.go index c4f3d4f8530..393393b2375 100644 --- a/libgo/go/os/exec_windows.go +++ b/libgo/go/os/exec_windows.go @@ -53,6 +53,9 @@ func terminateProcess(pid, exitcode int) error { } func (p *Process) signal(sig Signal) error { + if p.handle == uintptr(syscall.InvalidHandle) { + return syscall.EINVAL + } if p.done() { return errors.New("os: process already finished") } diff --git a/libgo/go/os/file.go b/libgo/go/os/file.go index b4a74580162..e12428cbe12 100644 --- a/libgo/go/os/file.go +++ b/libgo/go/os/file.go @@ -255,3 +255,12 @@ var lstat = Lstat func Rename(oldpath, newpath string) error { return rename(oldpath, newpath) } + +// Many functions in package syscall return a count of -1 instead of 0. +// Using fixCount(call()) instead of call() corrects the count. +func fixCount(n int, err error) (int, error) { + if n < 0 { + n = 0 + } + return n, err +} diff --git a/libgo/go/os/file_plan9.go b/libgo/go/os/file_plan9.go index a804b819731..132594eede9 100644 --- a/libgo/go/os/file_plan9.go +++ b/libgo/go/os/file_plan9.go @@ -25,7 +25,8 @@ type file struct { dirinfo *dirInfo // nil unless directory being read } -// Fd returns the integer Unix file descriptor referencing the open file. +// Fd returns the integer Plan 9 file descriptor referencing the open file. +// The file descriptor is valid only until f.Close is called or f is garbage collected. func (f *File) Fd() uintptr { if f == nil { return ^(uintptr(0)) @@ -244,14 +245,14 @@ func (f *File) Sync() (err error) { // read reads up to len(b) bytes from the File. // It returns the number of bytes read and an error, if any. func (f *File) read(b []byte) (n int, err error) { - return syscall.Read(f.fd, b) + return fixCount(syscall.Read(f.fd, b)) } // pread reads len(b) bytes from the File starting at byte offset off. // It returns the number of bytes read and the error, if any. // EOF is signaled by a zero count with err set to nil. func (f *File) pread(b []byte, off int64) (n int, err error) { - return syscall.Pread(f.fd, b, off) + return fixCount(syscall.Pread(f.fd, b, off)) } // write writes len(b) bytes to the File. @@ -262,7 +263,7 @@ func (f *File) write(b []byte) (n int, err error) { if len(b) == 0 { return 0, nil } - return syscall.Write(f.fd, b) + return fixCount(syscall.Write(f.fd, b)) } // pwrite writes len(b) bytes to the File starting at byte offset off. @@ -273,7 +274,7 @@ func (f *File) pwrite(b []byte, off int64) (n int, err error) { if len(b) == 0 { return 0, nil } - return syscall.Pwrite(f.fd, b, off) + return fixCount(syscall.Pwrite(f.fd, b, off)) } // seek sets the offset for the next Read or Write on file to offset, interpreted diff --git a/libgo/go/os/file_posix.go b/libgo/go/os/file_posix.go index b3466b15cc8..fbb3b5e4d81 100644 --- a/libgo/go/os/file_posix.go +++ b/libgo/go/os/file_posix.go @@ -13,32 +13,12 @@ import ( func sigpipe() // implemented in package runtime -// Link creates newname as a hard link to the oldname file. -// If there is an error, it will be of type *LinkError. -func Link(oldname, newname string) error { - e := syscall.Link(oldname, newname) - if e != nil { - return &LinkError{"link", oldname, newname, e} - } - return nil -} - -// Symlink creates newname as a symbolic link to oldname. -// If there is an error, it will be of type *LinkError. -func Symlink(oldname, newname string) error { - e := syscall.Symlink(oldname, newname) - if e != nil { - return &LinkError{"symlink", oldname, newname, e} - } - return nil -} - // Readlink returns the destination of the named symbolic link. // If there is an error, it will be of type *PathError. func Readlink(name string) (string, error) { for len := 128; ; len *= 2 { b := make([]byte, len) - n, e := syscall.Readlink(name, b) + n, e := fixCount(syscall.Readlink(name, b)) if e != nil { return "", &PathError{"readlink", name, e} } diff --git a/libgo/go/os/file_unix.go b/libgo/go/os/file_unix.go index 7959091995f..b25e62ff003 100644 --- a/libgo/go/os/file_unix.go +++ b/libgo/go/os/file_unix.go @@ -29,6 +29,7 @@ type file struct { } // Fd returns the integer Unix file descriptor referencing the open file. +// The file descriptor is valid only until f.Close is called or f is garbage collected. func (f *File) Fd() uintptr { if f == nil { return ^(uintptr(0)) @@ -198,7 +199,7 @@ func (f *File) read(b []byte) (n int, err error) { if needsMaxRW && len(b) > maxRW { b = b[:maxRW] } - return syscall.Read(f.fd, b) + return fixCount(syscall.Read(f.fd, b)) } // pread reads len(b) bytes from the File starting at byte offset off. @@ -208,7 +209,7 @@ func (f *File) pread(b []byte, off int64) (n int, err error) { if needsMaxRW && len(b) > maxRW { b = b[:maxRW] } - return syscall.Pread(f.fd, b, off) + return fixCount(syscall.Pread(f.fd, b, off)) } // write writes len(b) bytes to the File. @@ -219,7 +220,7 @@ func (f *File) write(b []byte) (n int, err error) { if needsMaxRW && len(bcap) > maxRW { bcap = bcap[:maxRW] } - m, err := syscall.Write(f.fd, bcap) + m, err := fixCount(syscall.Write(f.fd, bcap)) n += m // If the syscall wrote some data but not all (short write) @@ -245,7 +246,7 @@ func (f *File) pwrite(b []byte, off int64) (n int, err error) { if needsMaxRW && len(b) > maxRW { b = b[:maxRW] } - return syscall.Pwrite(f.fd, b, off) + return fixCount(syscall.Pwrite(f.fd, b, off)) } // seek sets the offset for the next Read or Write on file to offset, interpreted @@ -319,7 +320,31 @@ func basename(name string) string { func TempDir() string { dir := Getenv("TMPDIR") if dir == "" { - dir = "/tmp" + if runtime.GOOS == "android" { + dir = "/data/local/tmp" + } else { + dir = "/tmp" + } } return dir } + +// Link creates newname as a hard link to the oldname file. +// If there is an error, it will be of type *LinkError. +func Link(oldname, newname string) error { + e := syscall.Link(oldname, newname) + if e != nil { + return &LinkError{"link", oldname, newname, e} + } + return nil +} + +// Symlink creates newname as a symbolic link to oldname. +// If there is an error, it will be of type *LinkError. +func Symlink(oldname, newname string) error { + e := syscall.Symlink(oldname, newname) + if e != nil { + return &LinkError{"symlink", oldname, newname, e} + } + return nil +} diff --git a/libgo/go/os/getwd.go b/libgo/go/os/getwd.go index a72edeaee6e..d5da53b34bf 100644 --- a/libgo/go/os/getwd.go +++ b/libgo/go/os/getwd.go @@ -5,6 +5,7 @@ package os import ( + "runtime" "sync" "syscall" ) @@ -23,22 +24,16 @@ var useSyscallwd = func(error) bool { return true } // reached via multiple paths (due to symbolic links), // Getwd may return any one of them. func Getwd() (dir string, err error) { - // If the operating system provides a Getwd call, use it. - if syscall.ImplementsGetwd { - s, e := syscall.Getwd() - if useSyscallwd(e) { - return s, NewSyscallError("getwd", e) - } + if runtime.GOOS == "windows" { + return syscall.Getwd() } - // Otherwise, we're trying to find our way back to ".". + // Clumsy but widespread kludge: + // if $PWD is set and matches ".", use it. dot, err := Stat(".") if err != nil { return "", err } - - // Clumsy but widespread kludge: - // if $PWD is set and matches ".", use it. dir = Getenv("PWD") if len(dir) > 0 && dir[0] == '/' { d, err := Stat(dir) @@ -47,6 +42,15 @@ func Getwd() (dir string, err error) { } } + // If the operating system provides a Getwd call, use it. + // Otherwise, we're trying to find our way back to ".". + if syscall.ImplementsGetwd { + s, e := syscall.Getwd() + if useSyscallwd(e) { + return s, NewSyscallError("getwd", e) + } + } + // Apply same kludge but to cached dir instead of $PWD. getwdCache.Lock() dir = getwdCache.dir diff --git a/libgo/go/os/os_test.go b/libgo/go/os/os_test.go index a34e328b814..7a86efac218 100644 --- a/libgo/go/os/os_test.go +++ b/libgo/go/os/os_test.go @@ -18,12 +18,15 @@ import ( "runtime" "sort" "strings" + "sync" "syscall" "testing" "text/template" "time" ) +var supportsSymlinks = true + var dot = []string{ "dir_unix.go", "env.go", @@ -40,6 +43,14 @@ type sysDir struct { var sysdir = func() (sd *sysDir) { switch runtime.GOOS { + case "android": + sd = &sysDir{ + "/system/etc", + []string{ + "audio_policy.conf", + "system_fonts.xml", + }, + } case "windows": sd = &sysDir{ Getenv("SystemRoot") + "\\system32\\drivers\\etc", @@ -106,12 +117,27 @@ func newFile(testName string, t *testing.T) (f *File) { // On Unix, override $TMPDIR in case the user // has it set to an NFS-mounted directory. dir := "" - if runtime.GOOS != "windows" { + if runtime.GOOS != "android" && runtime.GOOS != "windows" { dir = "/tmp" } f, err := ioutil.TempFile(dir, "_Go_"+testName) if err != nil { - t.Fatalf("open %s: %s", testName, err) + t.Fatalf("TempFile %s: %s", testName, err) + } + return +} + +func newDir(testName string, t *testing.T) (name string) { + // Use a local file system, not NFS. + // On Unix, override $TMPDIR in case the user + // has it set to an NFS-mounted directory. + dir := "" + if runtime.GOOS != "android" && runtime.GOOS != "windows" { + dir = "/tmp" + } + name, err := ioutil.TempDir(dir, "_Go_"+testName) + if err != nil { + t.Fatalf("TempDir %s: %s", testName, err) } return } @@ -282,10 +308,12 @@ func TestReaddirnamesOneAtATime(t *testing.T) { // big directory that doesn't change often. dir := "/usr/bin" switch runtime.GOOS { - case "windows": - dir = Getenv("SystemRoot") + "\\system32" + case "android": + dir = "/system/bin" case "plan9": dir = "/bin" + case "windows": + dir = Getenv("SystemRoot") + "\\system32" } file, err := Open(dir) if err != nil { @@ -463,7 +491,7 @@ func TestReaddirStatFailures(t *testing.T) { func TestHardLink(t *testing.T) { // Hardlinks are not supported under windows or Plan 9. - if runtime.GOOS == "windows" || runtime.GOOS == "plan9" { + if runtime.GOOS == "plan9" { return } from, to := "hardlinktestfrom", "hardlinktestto" @@ -496,8 +524,12 @@ func TestHardLink(t *testing.T) { func TestSymlink(t *testing.T) { switch runtime.GOOS { - case "windows", "plan9", "nacl": + case "android", "nacl", "plan9": t.Skipf("skipping on %s", runtime.GOOS) + case "windows": + if !supportsSymlinks { + t.Skipf("skipping on %s", runtime.GOOS) + } } from, to := "symlinktestfrom", "symlinktestto" Remove(from) // Just in case. @@ -558,8 +590,12 @@ func TestSymlink(t *testing.T) { func TestLongSymlink(t *testing.T) { switch runtime.GOOS { - case "windows", "plan9", "nacl": + case "plan9", "nacl": t.Skipf("skipping on %s", runtime.GOOS) + case "windows": + if !supportsSymlinks { + t.Skipf("skipping on %s", runtime.GOOS) + } } s := "0123456789abcdef" // Long, but not too long: a common limit is 255. @@ -628,8 +664,9 @@ func exec(t *testing.T, dir, cmd string, args []string, expect string) { } func TestStartProcess(t *testing.T) { - if runtime.GOOS == "nacl" { - t.Skip("skipping on nacl") + switch runtime.GOOS { + case "android", "nacl": + t.Skipf("skipping on %s", runtime.GOOS) } var dir, cmd string @@ -731,35 +768,49 @@ func TestTruncate(t *testing.T) { } } -// Use TempDir() to make sure we're on a local file system, +// Use TempDir (via newFile) to make sure we're on a local file system, // so that timings are not distorted by latency and caching. // On NFS, timings can be off due to caching of meta-data on // NFS servers (Issue 848). func TestChtimes(t *testing.T) { f := newFile("TestChtimes", t) defer Remove(f.Name()) - defer f.Close() f.Write([]byte("hello, world\n")) f.Close() - st, err := Stat(f.Name()) + testChtimes(t, f.Name()) +} + +// Use TempDir (via newDir) to make sure we're on a local file system, +// so that timings are not distorted by latency and caching. +// On NFS, timings can be off due to caching of meta-data on +// NFS servers (Issue 848). +func TestChtimesDir(t *testing.T) { + name := newDir("TestChtimes", t) + defer RemoveAll(name) + + testChtimes(t, name) +} + +func testChtimes(t *testing.T, name string) { + st, err := Stat(name) if err != nil { - t.Fatalf("Stat %s: %s", f.Name(), err) + t.Fatalf("Stat %s: %s", name, err) } preStat := st // Move access and modification time back a second at := Atime(preStat) mt := preStat.ModTime() - err = Chtimes(f.Name(), at.Add(-time.Second), mt.Add(-time.Second)) + err = Chtimes(name, at.Add(-time.Second), mt.Add(-time.Second)) if err != nil { - t.Fatalf("Chtimes %s: %s", f.Name(), err) + t.Fatalf("Chtimes %s: %s", name, err) } - st, err = Stat(f.Name()) + st, err = Stat(name) if err != nil { - t.Fatalf("second Stat %s: %s", f.Name(), err) + t.Fatalf("second Stat %s: %s", name, err) } postStat := st @@ -788,12 +839,16 @@ func TestChdirAndGetwd(t *testing.T) { t.Fatalf("Open .: %s", err) } // These are chosen carefully not to be symlinks on a Mac - // (unlike, say, /var, /etc, and /tmp). - dirs := []string{"/", "/usr/bin"} - // /usr/bin does not usually exist on Plan 9. - if runtime.GOOS == "plan9" { + // (unlike, say, /var, /etc), except /tmp, which we handle below. + dirs := []string{"/", "/usr/bin", "/tmp"} + // /usr/bin does not usually exist on Plan 9 or Android. + switch runtime.GOOS { + case "android": + dirs = []string{"/", "/system/bin"} + case "plan9": dirs = []string{"/", "/usr"} } + oldwd := Getenv("PWD") for mode := 0; mode < 2; mode++ { for _, d := range dirs { if mode == 0 { @@ -807,7 +862,11 @@ func TestChdirAndGetwd(t *testing.T) { err = fd1.Chdir() fd1.Close() } + if d == "/tmp" { + Setenv("PWD", "/tmp") + } pwd, err1 := Getwd() + Setenv("PWD", oldwd) err2 := fd.Chdir() if err2 != nil { // We changed the current directory and cannot go back. @@ -910,6 +969,12 @@ func TestOpenError(t *testing.T) { syscallErrStr := perr.Err.Error() expectedErrStr := strings.Replace(tt.error.Error(), "file ", "", 1) if !strings.HasSuffix(syscallErrStr, expectedErrStr) { + // Some Plan 9 file servers incorrectly return + // EACCES rather than EISDIR when a directory is + // opened for write. + if tt.error == syscall.EISDIR && strings.HasSuffix(syscallErrStr, syscall.EACCES.Error()) { + continue + } t.Errorf("Open(%q, %d) = _, %q; want suffix %q", tt.path, tt.mode, syscallErrStr, expectedErrStr) } continue @@ -970,9 +1035,9 @@ func run(t *testing.T, cmd []string) string { func TestHostname(t *testing.T) { // There is no other way to fetch hostname on windows, but via winapi. - // On Plan 9 it is can be taken from #c/sysname as Hostname() does. + // On Plan 9 it can be taken from #c/sysname as Hostname() does. switch runtime.GOOS { - case "windows", "plan9", "nacl": + case "android", "nacl", "plan9", "windows": t.Skipf("skipping on %s", runtime.GOOS) } @@ -1233,8 +1298,9 @@ func TestReadAtEOF(t *testing.T) { func testKillProcess(t *testing.T, processKiller func(p *Process)) { t.Skip("gccgo does not have a go command") - if runtime.GOOS == "nacl" { - t.Skip("skipping on nacl") + switch runtime.GOOS { + case "android", "nacl": + t.Skipf("skipping on %s", runtime.GOOS) } dir, err := ioutil.TempDir("", "go-build") @@ -1292,6 +1358,36 @@ func TestKillStartProcess(t *testing.T) { }) } +func TestGetppid(t *testing.T) { + switch runtime.GOOS { + case "nacl": + t.Skip("skipping on nacl") + case "plan9": + // TODO: golang.org/issue/8206 + t.Skipf("skipping test on plan9; see issue 8206") + } + + if Getenv("GO_WANT_HELPER_PROCESS") == "1" { + fmt.Print(Getppid()) + Exit(0) + } + + cmd := osexec.Command(Args[0], "-test.run=TestGetppid") + cmd.Env = append(Environ(), "GO_WANT_HELPER_PROCESS=1") + + // verify that Getppid() from the forked process reports our process id + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Failed to spawn child process: %v %q", err, string(output)) + } + + childPpid := string(output) + ourPid := fmt.Sprintf("%d", Getpid()) + if childPpid != ourPid { + t.Fatalf("Child process reports parent process id '%v', expected '%v'", childPpid, ourPid) + } +} + func TestKillFindProcess(t *testing.T) { testKillProcess(t, func(p *Process) { p2, err := FindProcess(p.Pid) @@ -1336,3 +1432,52 @@ func TestNilFileMethods(t *testing.T) { } } } + +func mkdirTree(t *testing.T, root string, level, max int) { + if level >= max { + return + } + level++ + for i := 'a'; i < 'c'; i++ { + dir := filepath.Join(root, string(i)) + if err := Mkdir(dir, 0700); err != nil { + t.Fatal(err) + } + mkdirTree(t, dir, level, max) + } +} + +// Test that simultaneous RemoveAll do not report an error. +// As long as it gets removed, we should be happy. +func TestRemoveAllRace(t *testing.T) { + if runtime.GOOS == "windows" { + // Windows has very strict rules about things like + // removing directories while someone else has + // them open. The racing doesn't work out nicely + // like it does on Unix. + t.Skip("skipping on windows") + } + + n := runtime.GOMAXPROCS(16) + defer runtime.GOMAXPROCS(n) + root, err := ioutil.TempDir("", "issue") + if err != nil { + t.Fatal(err) + } + mkdirTree(t, root, 1, 6) + hold := make(chan struct{}) + var wg sync.WaitGroup + for i := 0; i < 4; i++ { + wg.Add(1) + go func() { + defer wg.Done() + <-hold + err := RemoveAll(root) + if err != nil { + t.Errorf("unexpected error: %T, %q", err, err) + } + }() + } + close(hold) // let workers race to remove root + wg.Wait() +} diff --git a/libgo/go/os/path.go b/libgo/go/os/path.go index 02a77ec8051..84a3be3348f 100644 --- a/libgo/go/os/path.go +++ b/libgo/go/os/path.go @@ -17,7 +17,7 @@ import ( // If path is already a directory, MkdirAll does nothing // and returns nil. func MkdirAll(path string, perm FileMode) error { - // If path exists, stop with success or error. + // Fast path: if we can tell whether path is a directory or file, stop with success or error. dir, err := Stat(path) if err == nil { if dir.IsDir() { @@ -26,7 +26,7 @@ func MkdirAll(path string, perm FileMode) error { return &PathError{"mkdir", path, syscall.ENOTDIR} } - // Doesn't already exist; make sure parent does. + // Slow path: make sure parent exists and then call Mkdir for path. i := len(path) for i > 0 && IsPathSeparator(path[i-1]) { // Skip trailing path separator. i-- @@ -45,7 +45,7 @@ func MkdirAll(path string, perm FileMode) error { } } - // Now parent exists, try to create. + // Parent now exists; invoke Mkdir and use its result. err = Mkdir(path, perm) if err != nil { // Handle arguments like "foo/." by @@ -66,7 +66,7 @@ func MkdirAll(path string, perm FileMode) error { func RemoveAll(path string) error { // Simple case: if Remove works, we're done. err := Remove(path) - if err == nil { + if err == nil || IsNotExist(err) { return nil } @@ -86,6 +86,11 @@ func RemoveAll(path string) error { // Directory. fd, err := Open(path) if err != nil { + if IsNotExist(err) { + // Race. It was deleted between the Lstat and Open. + // Return nil per RemoveAll's docs. + return nil + } return err } @@ -116,6 +121,9 @@ func RemoveAll(path string) error { // Remove directory. err1 := Remove(path) + if err1 == nil || IsNotExist(err1) { + return nil + } if err == nil { err = err1 } diff --git a/libgo/go/os/path_test.go b/libgo/go/os/path_test.go index 3af21cde9af..6f24a43132a 100644 --- a/libgo/go/os/path_test.go +++ b/libgo/go/os/path_test.go @@ -168,8 +168,12 @@ func TestRemoveAll(t *testing.T) { func TestMkdirAllWithSymlink(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "windows": + case "nacl", "plan9": t.Skipf("skipping on %s", runtime.GOOS) + case "windows": + if !supportsSymlinks { + t.Skipf("skipping on %s", runtime.GOOS) + } } tmpDir, err := ioutil.TempDir("", "TestMkdirAllWithSymlink-") @@ -198,8 +202,9 @@ func TestMkdirAllWithSymlink(t *testing.T) { } func TestMkdirAllAtSlash(t *testing.T) { - if runtime.GOOS == "windows" || runtime.GOOS == "plan9" { - return + switch runtime.GOOS { + case "android", "plan9", "windows": + t.Skipf("skipping on %s", runtime.GOOS) } RemoveAll("/_go_os_test") err := MkdirAll("/_go_os_test/dir", 0777) diff --git a/libgo/go/os/proc.go b/libgo/go/os/proc.go index 38c436ec54d..774f09900ec 100644 --- a/libgo/go/os/proc.go +++ b/libgo/go/os/proc.go @@ -6,11 +6,24 @@ package os -import "syscall" +import ( + "runtime" + "syscall" +) // Args hold the command-line arguments, starting with the program name. var Args []string +func init() { + if runtime.GOOS == "windows" { + // Initialized in exec_windows.go. + return + } + Args = runtime_args() +} + +func runtime_args() []string // in package runtime + // Getuid returns the numeric user id of the caller. func Getuid() int { return syscall.Getuid() } diff --git a/libgo/go/os/signal/signal_test.go b/libgo/go/os/signal/signal_test.go index 076fe3f93bd..22337a72d4b 100644 --- a/libgo/go/os/signal/signal_test.go +++ b/libgo/go/os/signal/signal_test.go @@ -125,7 +125,7 @@ func TestStop(t *testing.T) { if sig != syscall.SIGHUP || *sendUncaughtSighup == 1 { syscall.Kill(syscall.Getpid(), sig) } - time.Sleep(10 * time.Millisecond) + time.Sleep(100 * time.Millisecond) // Ask for signal c := make(chan os.Signal, 1) @@ -140,7 +140,7 @@ func TestStop(t *testing.T) { select { case s := <-c: t.Fatalf("unexpected signal %v", s) - case <-time.After(10 * time.Millisecond): + case <-time.After(100 * time.Millisecond): // nothing to read - good } @@ -154,7 +154,7 @@ func TestStop(t *testing.T) { select { case s := <-c: t.Fatalf("unexpected signal %v", s) - case <-time.After(10 * time.Millisecond): + case <-time.After(100 * time.Millisecond): // nothing to read - good } } diff --git a/libgo/go/os/types_windows.go b/libgo/go/os/types_windows.go index 38901681e67..7b2e54698c3 100644 --- a/libgo/go/os/types_windows.go +++ b/libgo/go/os/types_windows.go @@ -39,6 +39,9 @@ func (fs *fileStat) Mode() (m FileMode) { } else { m |= 0666 } + if fs.sys.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 { + m |= ModeSymlink + } return m } diff --git a/libgo/go/os/user/lookup_stubs.go b/libgo/go/os/user/lookup_stubs.go index 86f0e6e645a..4fb0e3c6edd 100644 --- a/libgo/go/os/user/lookup_stubs.go +++ b/libgo/go/os/user/lookup_stubs.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !cgo,!windows,!plan9 +// +build !cgo,!windows,!plan9 android package user diff --git a/libgo/go/os/user/lookup_unix.go b/libgo/go/os/user/lookup_unix.go index 0db08067201..64f4576f69e 100644 --- a/libgo/go/os/user/lookup_unix.go +++ b/libgo/go/os/user/lookup_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris // +build cgo package user diff --git a/libgo/go/path/filepath/match.go b/libgo/go/path/filepath/match.go index a9bcc103c55..ecc07aa5dac 100644 --- a/libgo/go/path/filepath/match.go +++ b/libgo/go/path/filepath/match.go @@ -228,6 +228,9 @@ func getEsc(chunk string) (r rune, nchunk string, err error) { // as in Match. The pattern may describe hierarchical names such as // /usr/*/bin/ed (assuming the Separator is '/'). // +// Glob ignores file system errors such as I/O errors reading directories. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. func Glob(pattern string) (matches []string, err error) { if !hasMeta(pattern) { if _, err = os.Lstat(pattern); err != nil { @@ -283,10 +286,7 @@ func glob(dir, pattern string, matches []string) (m []string, e error) { } defer d.Close() - names, err := d.Readdirnames(-1) - if err != nil { - return - } + names, _ := d.Readdirnames(-1) sort.Strings(names) for _, n := range names { diff --git a/libgo/go/path/filepath/match_test.go b/libgo/go/path/filepath/match_test.go index 9886620ade0..c29f93fb7bb 100644 --- a/libgo/go/path/filepath/match_test.go +++ b/libgo/go/path/filepath/match_test.go @@ -168,8 +168,13 @@ var globSymlinkTests = []struct { func TestGlobSymlink(t *testing.T) { switch runtime.GOOS { - case "nacl", "plan9", "windows": + case "nacl", "plan9": t.Skipf("skipping on %s", runtime.GOOS) + case "windows": + if !supportsSymlinks { + t.Skipf("skipping on %s", runtime.GOOS) + } + } tmpDir, err := ioutil.TempDir("", "globsymlink") diff --git a/libgo/go/path/filepath/path.go b/libgo/go/path/filepath/path.go index 71603cc5946..d37fc9dfc89 100644 --- a/libgo/go/path/filepath/path.go +++ b/libgo/go/path/filepath/path.go @@ -231,6 +231,10 @@ func EvalSymlinks(path string) (string, error) { // working directory to turn it into an absolute path. The absolute // path name for a given file is not guaranteed to be unique. func Abs(path string) (string, error) { + return abs(path) +} + +func unixAbs(path string) (string, error) { if IsAbs(path) { return Clean(path), nil } @@ -448,13 +452,6 @@ func Dir(path string) string { i-- } dir := Clean(path[len(vol) : i+1]) - last := len(dir) - 1 - if last > 0 && os.IsPathSeparator(dir[last]) { - dir = dir[:last] - } - if dir == "" { - dir = "." - } return vol + dir } diff --git a/libgo/go/path/filepath/path_plan9.go b/libgo/go/path/filepath/path_plan9.go index 12e85aae00c..ee8912d58e1 100644 --- a/libgo/go/path/filepath/path_plan9.go +++ b/libgo/go/path/filepath/path_plan9.go @@ -28,3 +28,7 @@ func splitList(path string) []string { } return strings.Split(path, string(ListSeparator)) } + +func abs(path string) (string, error) { + return unixAbs(path) +} diff --git a/libgo/go/path/filepath/path_test.go b/libgo/go/path/filepath/path_test.go index 6d1139432c3..c3ee0cb86b8 100644 --- a/libgo/go/path/filepath/path_test.go +++ b/libgo/go/path/filepath/path_test.go @@ -15,6 +15,8 @@ import ( "testing" ) +var supportsSymlinks = true + type PathTest struct { path, result string } @@ -629,6 +631,8 @@ var winisabstests = []IsAbsTest{ {`\`, false}, {`\Windows`, false}, {`c:a\b`, false}, + {`c:\a\b`, true}, + {`c:/a/b`, true}, {`\\host\share\foo`, true}, {`//host/share/foo/bar`, true}, } @@ -719,7 +723,7 @@ func TestEvalSymlinks(t *testing.T) { if d.dest == "" { err = os.Mkdir(path, 0755) } else { - if runtime.GOOS != "windows" { + if supportsSymlinks { err = os.Symlink(d.dest, path) } } @@ -729,7 +733,9 @@ func TestEvalSymlinks(t *testing.T) { } var tests []EvalSymlinksTest - if runtime.GOOS == "windows" { + if supportsSymlinks { + tests = EvalSymlinksTests + } else { for _, d := range EvalSymlinksTests { if d.path == d.dest { // will test only real files and directories @@ -742,15 +748,13 @@ func TestEvalSymlinks(t *testing.T) { tests = append(tests, d2) } } - } else { - tests = EvalSymlinksTests } // Evaluate the symlink farm. for _, d := range tests { path := simpleJoin(tmpDir, d.path) dest := simpleJoin(tmpDir, d.dest) - if filepath.IsAbs(d.dest) { + if filepath.IsAbs(d.dest) || os.IsPathSeparator(d.dest[0]) { dest = d.dest } if p, err := filepath.EvalSymlinks(path); err != nil { @@ -785,12 +789,6 @@ var absTests = []string{ } func TestAbs(t *testing.T) { - oldwd, err := os.Getwd() - if err != nil { - t.Fatal("Getwd failed: ", err) - } - defer os.Chdir(oldwd) - root, err := ioutil.TempDir("", "TestAbs") if err != nil { t.Fatal("TempDir failed: ", err) @@ -814,6 +812,19 @@ func TestAbs(t *testing.T) { } } + if runtime.GOOS == "windows" { + vol := filepath.VolumeName(root) + var extra []string + for _, path := range absTests { + if strings.Index(path, "$") != -1 { + continue + } + path = vol + path + extra = append(extra, path) + } + absTests = append(absTests, extra...) + } + err = os.Chdir(absTestDirs[0]) if err != nil { t.Fatal("chdir failed: ", err) diff --git a/libgo/go/path/filepath/path_unix.go b/libgo/go/path/filepath/path_unix.go index 7aba0ab5b9b..4e7d0d1b422 100644 --- a/libgo/go/path/filepath/path_unix.go +++ b/libgo/go/path/filepath/path_unix.go @@ -30,3 +30,7 @@ func splitList(path string) []string { } return strings.Split(path, string(ListSeparator)) } + +func abs(path string) (string, error) { + return unixAbs(path) +} diff --git a/libgo/go/path/filepath/path_windows.go b/libgo/go/path/filepath/path_windows.go index e99997257d7..ec50f6b264f 100644 --- a/libgo/go/path/filepath/path_windows.go +++ b/libgo/go/path/filepath/path_windows.go @@ -6,6 +6,7 @@ package filepath import ( "strings" + "syscall" ) func isSlash(c uint8) bool { @@ -103,3 +104,7 @@ func splitList(path string) []string { return list } + +func abs(path string) (string, error) { + return syscall.FullPath(path) +} diff --git a/libgo/go/path/filepath/symlink.go b/libgo/go/path/filepath/symlink.go index 307dd0f8fee..df0a9e0c2ba 100644 --- a/libgo/go/path/filepath/symlink.go +++ b/libgo/go/path/filepath/symlink.go @@ -2,18 +2,17 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !windows - package filepath import ( "bytes" "errors" "os" - "strings" ) -func evalSymlinks(path string) (string, error) { +const utf8RuneSelf = 0x80 + +func walkSymlinks(path string) (string, error) { const maxIter = 255 originalPath := path // consume path by taking each frontmost path element, @@ -25,7 +24,13 @@ func evalSymlinks(path string) (string, error) { } // find next path component, p - i := strings.IndexRune(path, Separator) + var i = -1 + for j, c := range path { + if c < utf8RuneSelf && os.IsPathSeparator(uint8(c)) { + i = j + break + } + } var p string if i == -1 { p, path = path, "" @@ -47,7 +52,7 @@ func evalSymlinks(path string) (string, error) { } if fi.Mode()&os.ModeSymlink == 0 { b.WriteString(p) - if path != "" { + if path != "" || (b.Len() == 2 && len(p) == 2 && p[1] == ':') { b.WriteRune(Separator) } continue @@ -58,7 +63,7 @@ func evalSymlinks(path string) (string, error) { if err != nil { return "", err } - if IsAbs(dest) { + if IsAbs(dest) || os.IsPathSeparator(dest[0]) { b.Reset() } path = dest + string(Separator) + path diff --git a/libgo/go/path/filepath/symlink_unix.go b/libgo/go/path/filepath/symlink_unix.go new file mode 100644 index 00000000000..d20e63a987e --- /dev/null +++ b/libgo/go/path/filepath/symlink_unix.go @@ -0,0 +1,7 @@ +// +build !windows + +package filepath + +func evalSymlinks(path string) (string, error) { + return walkSymlinks(path) +} diff --git a/libgo/go/path/filepath/symlink_windows.go b/libgo/go/path/filepath/symlink_windows.go index 9adc8a48af0..327c2c89a37 100644 --- a/libgo/go/path/filepath/symlink_windows.go +++ b/libgo/go/path/filepath/symlink_windows.go @@ -50,6 +50,11 @@ func toLong(path string) (string, error) { } func evalSymlinks(path string) (string, error) { + path, err := walkSymlinks(path) + if err != nil { + return "", err + } + p, err := toShort(path) if err != nil { return "", err diff --git a/libgo/go/path/path.go b/libgo/go/path/path.go index bdb85c6b92a..98a6d529229 100644 --- a/libgo/go/path/path.go +++ b/libgo/go/path/path.go @@ -206,13 +206,5 @@ func IsAbs(path string) bool { // slash. func Dir(path string) string { dir, _ := Split(path) - dir = Clean(dir) - last := len(dir) - 1 - if last > 0 && dir[last] == '/' { - dir = dir[:last] - } - if dir == "" { - dir = "." - } - return dir + return Clean(dir) } diff --git a/libgo/go/reflect/all_test.go b/libgo/go/reflect/all_test.go index 3e107795bdf..bda87867c74 100644 --- a/libgo/go/reflect/all_test.go +++ b/libgo/go/reflect/all_test.go @@ -679,7 +679,7 @@ var deepEqualTests = []DeepEqualTest{ {1, nil, false}, {fn1, fn3, false}, {fn3, fn3, false}, - {[][]int{[]int{1}}, [][]int{[]int{2}}, false}, + {[][]int{{1}}, [][]int{{2}}, false}, // Nil vs empty: not the same. {[]int{}, []int(nil), false}, @@ -2507,10 +2507,21 @@ func TestAllocations(t *testing.T) { noAlloc(t, 100, func(j int) { var i interface{} var v Value - i = 42 + j + + // We can uncomment this when compiler escape analysis + // is good enough to see that the integer assigned to i + // does not escape and therefore need not be allocated. + // + // i = 42 + j + // v = ValueOf(i) + // if int(v.Int()) != 42+j { + // panic("wrong int") + // } + + i = func(j int) int { return j } v = ValueOf(i) - if int(v.Int()) != 42+j { - panic("wrong int") + if v.Interface().(func(int) int)(j) != j { + panic("wrong result") } }) } @@ -2571,6 +2582,15 @@ func TestSlice(t *testing.T) { if vs != s[3:5] { t.Errorf("s.Slice(3, 5) = %q; expected %q", vs, s[3:5]) } + + rv := ValueOf(&xs).Elem() + rv = rv.Slice(3, 4) + ptr2 := rv.Pointer() + rv = rv.Slice(5, 5) + ptr3 := rv.Pointer() + if ptr3 != ptr2 { + t.Errorf("xs.Slice(3,4).Slice3(5,5).Pointer() = %#x, want %#x", ptr3, ptr2) + } } func TestSlice3(t *testing.T) { @@ -2609,6 +2629,15 @@ func TestSlice3(t *testing.T) { s := "hello world" rv = ValueOf(&s).Elem() shouldPanic(func() { rv.Slice3(1, 2, 3) }) + + rv = ValueOf(&xs).Elem() + rv = rv.Slice3(3, 5, 7) + ptr2 := rv.Pointer() + rv = rv.Slice3(4, 4, 4) + ptr3 := rv.Pointer() + if ptr3 != ptr2 { + t.Errorf("xs.Slice3(3,5,7).Slice3(4,4,4).Pointer() = %#x, want %#x", ptr3, ptr2) + } } func TestSetLenCap(t *testing.T) { @@ -2667,6 +2696,26 @@ func TestFuncArg(t *testing.T) { } } +func TestStructArg(t *testing.T) { + type padded struct { + B string + C int32 + } + var ( + gotA padded + gotB uint32 + wantA = padded{"3", 4} + wantB = uint32(5) + ) + f := func(a padded, b uint32) { + gotA, gotB = a, b + } + ValueOf(f).Call([]Value{ValueOf(wantA), ValueOf(wantB)}) + if gotA != wantA || gotB != wantB { + t.Errorf("function called with (%v, %v), want (%v, %v)", gotA, gotB, wantA, wantB) + } +} + var tagGetTests = []struct { Tag StructTag Key string @@ -3244,6 +3293,44 @@ func TestConvert(t *testing.T) { } } +type ComparableStruct struct { + X int +} + +type NonComparableStruct struct { + X int + Y map[string]int +} + +var comparableTests = []struct { + typ Type + ok bool +}{ + {TypeOf(1), true}, + {TypeOf("hello"), true}, + {TypeOf(new(byte)), true}, + {TypeOf((func())(nil)), false}, + {TypeOf([]byte{}), false}, + {TypeOf(map[string]int{}), false}, + {TypeOf(make(chan int)), true}, + {TypeOf(1.5), true}, + {TypeOf(false), true}, + {TypeOf(1i), true}, + {TypeOf(ComparableStruct{}), true}, + {TypeOf(NonComparableStruct{}), false}, + {TypeOf([10]map[string]int{}), false}, + {TypeOf([10]string{}), true}, + {TypeOf(new(interface{})).Elem(), true}, +} + +func TestComparable(t *testing.T) { + for _, tt := range comparableTests { + if ok := tt.typ.Comparable(); ok != tt.ok { + t.Errorf("TypeOf(%v).Comparable() = %v, want %v", tt.typ, ok, tt.ok) + } + } +} + func TestOverflow(t *testing.T) { if ovf := V(float64(0)).OverflowFloat(1e300); ovf { t.Errorf("%v wrongly overflows float64", 1e300) @@ -3290,6 +3377,9 @@ func checkSameType(t *testing.T, x, y interface{}) { } func TestArrayOf(t *testing.T) { + // TODO(rsc): Finish ArrayOf and enable-test. + t.Skip("ArrayOf is not finished (and not exported)") + // check construction and use of type not in binary type T int at := ArrayOf(10, TypeOf(T(1))) @@ -3911,3 +4001,166 @@ func TestCallMethodJump(t *testing.T) { // Stop garbage collecting during reflect.call. *CallGC = false } + +func TestMakeFuncStackCopy(t *testing.T) { + target := func(in []Value) []Value { + runtime.GC() + useStack(16) + return []Value{ValueOf(9)} + } + + var concrete func(*int, int) int + fn := MakeFunc(ValueOf(concrete).Type(), target) + ValueOf(&concrete).Elem().Set(fn) + x := concrete(nil, 7) + if x != 9 { + t.Errorf("have %#q want 9", x) + } +} + +// use about n KB of stack +func useStack(n int) { + if n == 0 { + return + } + var b [1024]byte // makes frame about 1KB + useStack(n - 1 + int(b[99])) +} + +type Impl struct{} + +func (Impl) f() {} + +func TestValueString(t *testing.T) { + rv := ValueOf(Impl{}) + if rv.String() != "<reflect_test.Impl Value>" { + t.Errorf("ValueOf(Impl{}).String() = %q, want %q", rv.String(), "<reflect_test.Impl Value>") + } + + method := rv.Method(0) + if method.String() != "<func() Value>" { + t.Errorf("ValueOf(Impl{}).Method(0).String() = %q, want %q", method.String(), "<func() Value>") + } +} + +func TestInvalid(t *testing.T) { + // Used to have inconsistency between IsValid() and Kind() != Invalid. + type T struct{ v interface{} } + + v := ValueOf(T{}).Field(0) + if v.IsValid() != true || v.Kind() != Interface { + t.Errorf("field: IsValid=%v, Kind=%v, want true, Interface", v.IsValid(), v.Kind()) + } + v = v.Elem() + if v.IsValid() != false || v.Kind() != Invalid { + t.Errorf("field elem: IsValid=%v, Kind=%v, want false, Invalid", v.IsValid(), v.Kind()) + } +} + +// Issue 8917. +func TestLargeGCProg(t *testing.T) { + fv := ValueOf(func([256]*byte) {}) + fv.Call([]Value{ValueOf([256]*byte{})}) +} + +// Issue 9179. +func TestCallGC(t *testing.T) { + f := func(a, b, c, d, e string) { + } + g := func(in []Value) []Value { + runtime.GC() + return nil + } + typ := ValueOf(f).Type() + f2 := MakeFunc(typ, g).Interface().(func(string, string, string, string, string)) + f2("four", "five5", "six666", "seven77", "eight888") +} + +type funcLayoutTest struct { + rcvr, t Type + argsize, retOffset uintptr + stack []byte +} + +var funcLayoutTests []funcLayoutTest + +func init() { + var argAlign = PtrSize + if runtime.GOARCH == "amd64p32" { + argAlign = 2 * PtrSize + } + roundup := func(x uintptr, a uintptr) uintptr { + return (x + a - 1) / a * a + } + + funcLayoutTests = append(funcLayoutTests, + funcLayoutTest{ + nil, + ValueOf(func(a, b string) string { return "" }).Type(), + 4 * PtrSize, + 4 * PtrSize, + []byte{BitsPointer, BitsScalar, BitsPointer}, + }) + + var r []byte + if PtrSize == 4 { + r = []byte{BitsScalar, BitsScalar, BitsScalar, BitsPointer} + } else { + r = []byte{BitsScalar, BitsScalar, BitsPointer} + } + funcLayoutTests = append(funcLayoutTests, + funcLayoutTest{ + nil, + ValueOf(func(a, b, c uint32, p *byte, d uint16) {}).Type(), + roundup(3*4, PtrSize) + PtrSize + 2, + roundup(roundup(3*4, PtrSize)+PtrSize+2, argAlign), + r, + }) + + funcLayoutTests = append(funcLayoutTests, + funcLayoutTest{ + nil, + ValueOf(func(a map[int]int, b uintptr, c interface{}) {}).Type(), + 4 * PtrSize, + 4 * PtrSize, + []byte{BitsPointer, BitsScalar, BitsPointer, BitsPointer}, + }) + + type S struct { + a, b uintptr + c, d *byte + } + funcLayoutTests = append(funcLayoutTests, + funcLayoutTest{ + nil, + ValueOf(func(a S) {}).Type(), + 4 * PtrSize, + 4 * PtrSize, + []byte{BitsScalar, BitsScalar, BitsPointer, BitsPointer}, + }) + + funcLayoutTests = append(funcLayoutTests, + funcLayoutTest{ + ValueOf((*byte)(nil)).Type(), + ValueOf(func(a uintptr, b *int) {}).Type(), + 3 * PtrSize, + roundup(3*PtrSize, argAlign), + []byte{BitsPointer, BitsScalar, BitsPointer}, + }) +} + +func TestFuncLayout(t *testing.T) { + t.Skip("gccgo does not use funcLayout") + for _, lt := range funcLayoutTests { + _, argsize, retOffset, stack := FuncLayout(lt.t, lt.rcvr) + if argsize != lt.argsize { + t.Errorf("funcLayout(%v, %v).argsize=%d, want %d", lt.t, lt.rcvr, argsize, lt.argsize) + } + if retOffset != lt.retOffset { + t.Errorf("funcLayout(%v, %v).retOffset=%d, want %d", lt.t, lt.rcvr, retOffset, lt.retOffset) + } + if !bytes.Equal(stack, lt.stack) { + t.Errorf("funcLayout(%v, %v).stack=%v, want %v", lt.t, lt.rcvr, stack, lt.stack) + } + } +} diff --git a/libgo/go/reflect/export_test.go b/libgo/go/reflect/export_test.go index 0778ad37f5c..49c45e82b2e 100644 --- a/libgo/go/reflect/export_test.go +++ b/libgo/go/reflect/export_test.go @@ -17,3 +17,11 @@ func IsRO(v Value) bool { var ArrayOf = arrayOf var CallGC = &callGC + +const PtrSize = ptrSize +const BitsPointer = bitsPointer +const BitsScalar = bitsScalar + +func FuncLayout(t Type, rcvr Type) (frametype Type, argSize, retOffset uintptr, stack []byte) { + return +} diff --git a/libgo/go/reflect/makefunc.go b/libgo/go/reflect/makefunc.go index eb4589c6ce9..276be26108f 100644 --- a/libgo/go/reflect/makefunc.go +++ b/libgo/go/reflect/makefunc.go @@ -79,7 +79,7 @@ func MakeFunc(typ Type, fn func(args []Value) (results []Value)) Value { ffi: ffi, } - return Value{t, unsafe.Pointer(&impl), flag(Func<<flagKindShift) | flagIndir} + return Value{t, unsafe.Pointer(&impl), flag(Func) | flagIndir} } // makeFuncStub is an assembly function that is the code half of @@ -103,8 +103,8 @@ func makeMethodValue(op string, v Value) Value { // Ignoring the flagMethod bit, v describes the receiver, not the method type. fl := v.flag & (flagRO | flagAddr | flagIndir) - fl |= flag(v.typ.Kind()) << flagKindShift - rcvr := Value{v.typ, v.ptr /* v.scalar, */, fl} + fl |= flag(v.typ.Kind()) + rcvr := Value{v.typ, v.ptr, fl} // v.Type returns the actual type of the method value. ft := v.Type().(*rtype) @@ -134,7 +134,7 @@ func makeMethodValue(op string, v Value) Value { fv.code, fv.ffi = makeFuncFFI(ftyp, fv.call) } - return Value{ft, unsafe.Pointer(&fv), v.flag&flagRO | flag(Func)<<flagKindShift | flagIndir} + return Value{ft, unsafe.Pointer(&fv), v.flag&flagRO | flag(Func) | flagIndir} } // makeValueMethod takes a method function and returns a function that @@ -169,7 +169,7 @@ func makeValueMethod(v Value) Value { impl.code, impl.ffi = makeFuncFFI(ftyp, impl.call) } - return Value{t, unsafe.Pointer(&impl), flag(Func<<flagKindShift) | flagIndir} + return Value{t, unsafe.Pointer(&impl), v.flag&flagRO | flag(Func) | flagIndir} } // Call the function represented by a makeFuncImpl. diff --git a/libgo/go/reflect/makefunc_ffi.go b/libgo/go/reflect/makefunc_ffi.go index a13ef179f5d..40c1ea80fbe 100644 --- a/libgo/go/reflect/makefunc_ffi.go +++ b/libgo/go/reflect/makefunc_ffi.go @@ -56,7 +56,7 @@ func ffiCall(ftyp *funcType, fn func([]Value) []Value, params unsafe.Pointer, re for _, rt := range ftyp.in { p := unsafe_New(rt) memmove(p, *(*unsafe.Pointer)(ap), rt.size) - v := Value{rt, p, flag(rt.Kind()<<flagKindShift) | flagIndir} + v := Value{rt, p, flag(rt.Kind()) | flagIndir} in = append(in, v) ap = (unsafe.Pointer)(uintptr(ap) + ptrSize) } diff --git a/libgo/go/reflect/makefuncgo_386.go b/libgo/go/reflect/makefuncgo_386.go index 7809fb01f23..c20f0ac3b3e 100644 --- a/libgo/go/reflect/makefuncgo_386.go +++ b/libgo/go/reflect/makefuncgo_386.go @@ -75,7 +75,7 @@ func MakeFuncStubGo(regs *i386Regs, c *makeFuncImpl) { p := unsafe_New(rt) memmove(p, unsafe.Pointer(ap), rt.size) - v := Value{rt, p, flag(rt.Kind()<<flagKindShift) | flagIndir} + v := Value{rt, p, flag(rt.Kind()) | flagIndir} in = append(in, v) ap += rt.size } @@ -128,15 +128,12 @@ func MakeFuncStubGo(regs *i386Regs, c *makeFuncImpl) { v := out[0] switch v.Kind() { - case Ptr, UnsafePointer: + case Ptr, UnsafePointer, Chan, Func, Map: regs.eax = uint32(uintptr(v.pointer())) - case Float32: - regs.st0 = float64(*(*float32)(v.ptr)) - regs.sf = true - case Float64: - regs.st0 = *(*float64)(v.ptr) + case Float32, Float64: + regs.st0 = v.Float() regs.sf = true default: - regs.eax = uint32(loadScalar(v.ptr, v.typ.size)) + memmove(unsafe.Pointer(®s.eax), v.ptr, v.typ.size) } } diff --git a/libgo/go/reflect/makefuncgo_amd64.go b/libgo/go/reflect/makefuncgo_amd64.go index 7118951d1fd..a236aa26795 100644 --- a/libgo/go/reflect/makefuncgo_amd64.go +++ b/libgo/go/reflect/makefuncgo_amd64.go @@ -224,7 +224,7 @@ argloop: for _, rt := range ftyp.in { c1, c2 := amd64Classify(rt) - fl := flag(rt.Kind()) << flagKindShift + fl := flag(rt.Kind()) if c2 == amd64NoClass { // Argument is passed in a single register or @@ -364,10 +364,11 @@ argloop: if len(out) == 1 && ret2 == amd64NoClass { v := out[0] var w unsafe.Pointer - if v.Kind() == Ptr || v.Kind() == UnsafePointer { + switch v.Kind() { + case Ptr, UnsafePointer, Chan, Func, Map: w = v.pointer() - } else { - w = unsafe.Pointer(loadScalar(v.ptr, v.typ.size)) + default: + memmove(unsafe.Pointer(&w), v.ptr, v.typ.size) } switch ret1 { case amd64Integer: @@ -439,7 +440,7 @@ func amd64Memarg(in []Value, ap uintptr, rt *rtype) ([]Value, uintptr) { p := unsafe_New(rt) memmove(p, unsafe.Pointer(ap), rt.size) - v := Value{rt, p, flag(rt.Kind()<<flagKindShift) | flagIndir} + v := Value{rt, p, flag(rt.Kind()) | flagIndir} in = append(in, v) ap += rt.size return in, ap diff --git a/libgo/go/reflect/makefuncgo_s390.go b/libgo/go/reflect/makefuncgo_s390.go index ff22add81a1..47daa77f6bb 100644 --- a/libgo/go/reflect/makefuncgo_s390.go +++ b/libgo/go/reflect/makefuncgo_s390.go @@ -232,7 +232,7 @@ func S390MakeFuncStubGo(regs *s390_regs, c *makeFuncImpl) { argloop: for _, rt := range ftyp.in { class, off_reg, off_slot := s390ClassifyParameter(rt) - fl := flag(rt.Kind()) << flagKindShift + fl := flag(rt.Kind()) switch class { case s390_empty: v := Value{rt, nil, fl | flagIndir} @@ -338,10 +338,11 @@ argloop: // Single return value in a general or floating point register. v := out[0] var w uintptr - if v.Kind() == Ptr || v.Kind() == UnsafePointer { + switch v.Kind() { + case Ptr, UnsafePointer, Chan, Func, Map: w = uintptr(v.pointer()) - } else { - w = uintptr(loadScalar(v.ptr, v.typ.size)) + default: + memmove(unsafe.Pointer(&w), v.ptr, v.typ.size) if ret_off_reg != 0 { w = s390ReloadForRegister( ret_type, w, ret_off_reg) @@ -394,7 +395,7 @@ func s390_add_stackreg(in []Value, ap uintptr, rt *rtype, offset uintptr) ([]Val p := unsafe_New(rt) memmove(p, unsafe.Pointer(ap), rt.size) - v := Value{rt, p, flag(rt.Kind()<<flagKindShift) | flagIndir} + v := Value{rt, p, flag(rt.Kind()) | flagIndir} in = append(in, v) ap += rt.size ap = align(ap, s390_arch_stack_slot_align) @@ -413,7 +414,7 @@ func s390_add_memarg(in []Value, ap uintptr, rt *rtype) ([]Value, uintptr) { p := unsafe_New(rt) memmove(p, *(*unsafe.Pointer)(unsafe.Pointer(ap)), rt.size) - v := Value{rt, p, flag(rt.Kind()<<flagKindShift) | flagIndir} + v := Value{rt, p, flag(rt.Kind()) | flagIndir} in = append(in, v) ap += s390_arch_stack_slot_align diff --git a/libgo/go/reflect/makefuncgo_s390x.go b/libgo/go/reflect/makefuncgo_s390x.go index a0a5567f3b9..ea4c93e4f67 100644 --- a/libgo/go/reflect/makefuncgo_s390x.go +++ b/libgo/go/reflect/makefuncgo_s390x.go @@ -226,7 +226,7 @@ func S390xMakeFuncStubGo(regs *s390x_regs, c *makeFuncImpl) { argloop: for _, rt := range ftyp.in { class, off_reg, off_slot := s390xClassifyParameter(rt) - fl := flag(rt.Kind()) << flagKindShift + fl := flag(rt.Kind()) switch class { case s390x_empty: v := Value{rt, nil, fl | flagIndir} @@ -317,10 +317,11 @@ argloop: // Single return value in a general or floating point register. v := out[0] var w uintptr - if v.Kind() == Ptr || v.Kind() == UnsafePointer { + switch v.Kind() { + case Ptr, UnsafePointer, Chan, Func, Map: w = uintptr(v.pointer()) } else { - w = uintptr(loadScalar(v.ptr, v.typ.size)) + memmove(unsafe.Pointer(&w), v.ptr, v.typ.size) if ret_off_reg != 0 { w = s390xReloadForRegister( ret_type, w, ret_off_reg) @@ -370,7 +371,7 @@ func s390x_add_stackreg(in []Value, ap uintptr, rt *rtype, offset uintptr) ([]Va p := unsafe_New(rt) memmove(p, unsafe.Pointer(ap), rt.size) - v := Value{rt, p, flag(rt.Kind()<<flagKindShift) | flagIndir} + v := Value{rt, p, flag(rt.Kind()) | flagIndir} in = append(in, v) ap += rt.size ap = align(ap, s390x_arch_stack_slot_align) @@ -389,7 +390,7 @@ func s390x_add_memarg(in []Value, ap uintptr, rt *rtype) ([]Value, uintptr) { p := unsafe_New(rt) memmove(p, *(*unsafe.Pointer)(unsafe.Pointer(ap)), rt.size) - v := Value{rt, p, flag(rt.Kind()<<flagKindShift) | flagIndir} + v := Value{rt, p, flag(rt.Kind()) | flagIndir} in = append(in, v) ap += s390x_arch_stack_slot_align diff --git a/libgo/go/reflect/type.go b/libgo/go/reflect/type.go index 91697c4b56b..101135410cf 100644 --- a/libgo/go/reflect/type.go +++ b/libgo/go/reflect/type.go @@ -99,6 +99,9 @@ type Type interface { // ConvertibleTo returns true if a value of the type is convertible to type u. ConvertibleTo(u Type) bool + // Comparable returns true if values of this type are comparable. + Comparable() bool + // Methods applicable only to some types, depending on Kind. // The methods allowed for each kind are: // @@ -249,7 +252,7 @@ type rtype struct { align int8 // alignment of variable with this type fieldAlign uint8 // alignment of struct field with this type _ uint8 // unused/padding - size uintptr // size in bytes + size uintptr hash uint32 // hash of type; avoids computation in hash tables hashfn uintptr // hash function code @@ -331,8 +334,6 @@ type mapType struct { rtype `reflect:"map"` key *rtype // map key type elem *rtype // map element (value) type - // bucket *rtype // internal bucket structure - // hmap *rtype // internal map header } // ptrType represents a pointer type. @@ -401,11 +402,11 @@ type Method struct { Index int // index for Type.Method } -// High bit says whether type has -// embedded pointers,to help garbage collector. const ( - kindMask = 0x7f - kindNoPointers = 0x80 + kindDirectIface = 1 << 5 + kindGCProg = 1 << 6 // Type.gc points to GC program + kindNoPointers = 1 << 7 + kindMask = (1 << 5) - 1 ) func (k Kind) String() string { @@ -513,7 +514,7 @@ func (t *uncommonType) Method(i int) (m Method) { if p.name != nil { m.Name = *p.name } - fl := flag(Func) << flagKindShift + fl := flag(Func) if p.pkgPath != nil { m.PkgPath = *p.pkgPath fl |= flagRO @@ -522,7 +523,7 @@ func (t *uncommonType) Method(i int) (m Method) { m.Type = toType(mt) x := new(unsafe.Pointer) *x = unsafe.Pointer(&p.tfn) - m.Func = Value{mt, unsafe.Pointer(x) /* 0, */, fl | flagIndir | flagMethodFn} + m.Func = Value{mt, unsafe.Pointer(x), fl | flagIndir | flagMethodFn} m.Index = i return } @@ -1146,7 +1147,6 @@ func (t *rtype) ptrTo() *rtype { q := canonicalize(&p.rtype) p = (*ptrType)(unsafe.Pointer(q.(*rtype))) - ptrMap.m[t] = p ptrMap.Unlock() return &p.rtype } @@ -1185,6 +1185,34 @@ func (t *rtype) ConvertibleTo(u Type) bool { return convertOp(uu, t) != nil } +func (t *rtype) Comparable() bool { + switch t.Kind() { + case Bool, Int, Int8, Int16, Int32, Int64, + Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, + Float32, Float64, Complex64, Complex128, + Chan, Interface, Ptr, String, UnsafePointer: + return true + + case Func, Map, Slice: + return false + + case Array: + return (*arrayType)(unsafe.Pointer(t)).elem.Comparable() + + case Struct: + tt := (*structType)(unsafe.Pointer(t)) + for i := range tt.fields { + if !tt.fields[i].typ.Comparable() { + return false + } + } + return true + + default: + panic("reflect: impossible") + } +} + // implements returns true if the type V implements the interface type T. func implements(T, V *rtype) bool { if T.Kind() != Interface { @@ -1419,11 +1447,6 @@ type chanGC struct { end uintptr // _GC_END } -type badGC struct { - width uintptr - end uintptr -} - // ChanOf returns the channel type with the given direction and element type. // For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int. // @@ -1536,8 +1559,6 @@ func MapOf(key, elem Type) Type { mt.key = ktyp mt.elem = etyp - // mt.bucket = bucketOf(ktyp, etyp) - // mt.hmap = hMapOf(mt.bucket) mt.uncommonType = nil mt.ptrToThis = nil mt.zero = unsafe.Pointer(&make([]byte, mt.size)[0]) @@ -1559,57 +1580,151 @@ func MapOf(key, elem Type) Type { return cachePut(ckey, &mt.rtype) } -// Make sure these routines stay in sync with ../../pkg/runtime/hashmap.c! +// gcProg is a helper type for generatation of GC pointer info. +type gcProg struct { + gc []byte + size uintptr // size of type in bytes +} + +func (gc *gcProg) append(v byte) { + gc.align(unsafe.Sizeof(uintptr(0))) + gc.appendWord(v) +} + +// Appends t's type info to the current program. +func (gc *gcProg) appendProg(t *rtype) { + gc.align(uintptr(t.align)) + if !t.pointers() { + gc.size += t.size + return + } + switch t.Kind() { + default: + panic("reflect: non-pointer type marked as having pointers") + case Ptr, UnsafePointer, Chan, Func, Map: + gc.appendWord(bitsPointer) + case Slice: + gc.appendWord(bitsPointer) + gc.appendWord(bitsScalar) + gc.appendWord(bitsScalar) + case String: + gc.appendWord(bitsPointer) + gc.appendWord(bitsScalar) + case Array: + c := t.Len() + e := t.Elem().common() + for i := 0; i < c; i++ { + gc.appendProg(e) + } + case Interface: + gc.appendWord(bitsMultiWord) + if t.NumMethod() == 0 { + gc.appendWord(bitsEface) + } else { + gc.appendWord(bitsIface) + } + case Struct: + c := t.NumField() + for i := 0; i < c; i++ { + gc.appendProg(t.Field(i).Type.common()) + } + gc.align(uintptr(t.align)) + } +} + +func (gc *gcProg) appendWord(v byte) { + ptrsize := unsafe.Sizeof(uintptr(0)) + if gc.size%ptrsize != 0 { + panic("reflect: unaligned GC program") + } + nptr := gc.size / ptrsize + for uintptr(len(gc.gc)) < nptr/2+1 { + gc.gc = append(gc.gc, 0x44) // BitsScalar + } + gc.gc[nptr/2] &= ^(3 << ((nptr%2)*4 + 2)) + gc.gc[nptr/2] |= v << ((nptr%2)*4 + 2) + gc.size += ptrsize +} + +func (gc *gcProg) finalize() unsafe.Pointer { + if gc.size == 0 { + return nil + } + ptrsize := unsafe.Sizeof(uintptr(0)) + gc.align(ptrsize) + nptr := gc.size / ptrsize + for uintptr(len(gc.gc)) < nptr/2+1 { + gc.gc = append(gc.gc, 0x44) // BitsScalar + } + // If number of words is odd, repeat the mask twice. + // Compiler does the same. + if nptr%2 != 0 { + for i := uintptr(0); i < nptr; i++ { + gc.appendWord(extractGCWord(gc.gc, i)) + } + } + return unsafe.Pointer(&gc.gc[0]) +} + +func extractGCWord(gc []byte, i uintptr) byte { + return (gc[i/2] >> ((i%2)*4 + 2)) & 3 +} + +func (gc *gcProg) align(a uintptr) { + gc.size = align(gc.size, a) +} + +// These constants must stay in sync with ../runtime/mgc0.h. +const ( + bitsScalar = 1 + bitsPointer = 2 + bitsMultiWord = 3 + + bitsIface = 2 + bitsEface = 3 +) + +// Make sure these routines stay in sync with ../../runtime/hashmap.go! // These types exist only for GC, so we only fill out GC relevant info. // Currently, that's just size and the GC program. We also fill in string // for possible debugging use. const ( - _BUCKETSIZE = 8 - _MAXKEYSIZE = 128 - _MAXVALSIZE = 128 + bucketSize = 8 + maxKeySize = 128 + maxValSize = 128 ) func bucketOf(ktyp, etyp *rtype) *rtype { - if ktyp.size > _MAXKEYSIZE { + if ktyp.size > maxKeySize { ktyp = PtrTo(ktyp).(*rtype) } - if etyp.size > _MAXVALSIZE { + if etyp.size > maxValSize { etyp = PtrTo(etyp).(*rtype) } ptrsize := unsafe.Sizeof(uintptr(0)) - gc := make([]uintptr, 1) // first entry is size, filled in at the end - offset := _BUCKETSIZE * unsafe.Sizeof(uint8(0)) // topbits - gc = append(gc, _GC_PTR, offset, 0 /*self pointer set below*/) // overflow - offset += ptrsize - + var gc gcProg + // topbits + for i := 0; i < int(bucketSize*unsafe.Sizeof(uint8(0))/ptrsize); i++ { + gc.append(bitsScalar) + } + gc.append(bitsPointer) // overflow if runtime.GOARCH == "amd64p32" { - offset += 4 + gc.append(bitsScalar) } - // keys - if ktyp.kind&kindNoPointers == 0 { - gc = append(gc, _GC_ARRAY_START, offset, _BUCKETSIZE, ktyp.size) - gc = appendGCProgram(gc, ktyp) - gc = append(gc, _GC_ARRAY_NEXT) + for i := 0; i < bucketSize; i++ { + gc.appendProg(ktyp) } - offset += _BUCKETSIZE * ktyp.size - // values - if etyp.kind&kindNoPointers == 0 { - gc = append(gc, _GC_ARRAY_START, offset, _BUCKETSIZE, etyp.size) - gc = appendGCProgram(gc, etyp) - gc = append(gc, _GC_ARRAY_NEXT) + for i := 0; i < bucketSize; i++ { + gc.appendProg(etyp) } - offset += _BUCKETSIZE * etyp.size - - gc = append(gc, _GC_END) - gc[0] = offset - gc[3] = uintptr(unsafe.Pointer(&gc[0])) // set self pointer b := new(rtype) - b.size = offset - // b.gc = unsafe.Pointer(&gc[0]) + b.size = gc.size + // b.gc[0] = gc.finalize() + b.kind |= kindGCProg s := "bucket(" + *ktyp.string + "," + *etyp.string + ")" b.string = &s return b @@ -1756,6 +1871,8 @@ func SliceOf(t Type) Type { // // TODO(rsc): Unexported for now. Export once the alg field is set correctly // for the type. This may require significant work. +// +// TODO(rsc): TestArrayOf is also disabled. Re-enable. func arrayOf(count int, elem Type) Type { typ := elem.(*rtype) slice := SliceOf(elem) @@ -1774,6 +1891,7 @@ func arrayOf(count int, elem Type) Type { prototype := *(**arrayType)(unsafe.Pointer(&iarray)) array := new(arrayType) *array = *prototype + // TODO: Set extra kind bits correctly. array.string = &s // gccgo uses a different hash. @@ -1794,6 +1912,7 @@ func arrayOf(count int, elem Type) Type { array.fieldAlign = typ.fieldAlign // TODO: array.alg // TODO: array.gc + // TODO: array.uncommonType = nil array.ptrToThis = nil array.zero = unsafe.Pointer(&make([]byte, array.size)[0]) @@ -1845,3 +1964,68 @@ func toType(p *rtype) Type { } return canonicalize(p) } + +// ifaceIndir reports whether t is stored indirectly in an interface value. +func ifaceIndir(t *rtype) bool { + return t.kind&kindDirectIface == 0 +} + +// Layout matches runtime.BitVector (well enough). +type bitVector struct { + n uint32 // number of bits + data []byte +} + +// append a bit pair to the bitmap. +func (bv *bitVector) append2(bits uint8) { + // assume bv.n is a multiple of 2, since append2 is the only operation. + if bv.n%8 == 0 { + bv.data = append(bv.data, 0) + } + bv.data[bv.n/8] |= bits << (bv.n % 8) + bv.n += 2 +} + +func addTypeBits(bv *bitVector, offset *uintptr, t *rtype) { + *offset = align(*offset, uintptr(t.align)) + if t.kind&kindNoPointers != 0 { + *offset += t.size + return + } + + switch Kind(t.kind & kindMask) { + case Chan, Func, Map, Ptr, Slice, String, UnsafePointer: + // 1 pointer at start of representation + for bv.n < 2*uint32(*offset/uintptr(ptrSize)) { + bv.append2(bitsScalar) + } + bv.append2(bitsPointer) + + case Interface: + // 2 pointers + for bv.n < 2*uint32(*offset/uintptr(ptrSize)) { + bv.append2(bitsScalar) + } + bv.append2(bitsPointer) + bv.append2(bitsPointer) + + case Array: + // repeat inner type + tt := (*arrayType)(unsafe.Pointer(t)) + for i := 0; i < int(tt.len); i++ { + addTypeBits(bv, offset, tt.elem) + } + + case Struct: + // apply fields + tt := (*structType)(unsafe.Pointer(t)) + start := *offset + for i := range tt.fields { + f := &tt.fields[i] + off := start + f.offset + addTypeBits(bv, &off, f.typ) + } + } + + *offset += t.size +} diff --git a/libgo/go/reflect/value.go b/libgo/go/reflect/value.go index c36e9954427..09210b37b70 100644 --- a/libgo/go/reflect/value.go +++ b/libgo/go/reflect/value.go @@ -7,40 +7,12 @@ package reflect import ( "math" "runtime" - "strconv" "unsafe" ) -const bigEndian = false // can be smarter if we find a big-endian machine const ptrSize = unsafe.Sizeof((*byte)(nil)) const cannotSet = "cannot set value obtained from unexported struct field" -// TODO: This will have to go away when -// the new gc goes in. -func memmove(adst, asrc unsafe.Pointer, n uintptr) { - dst := uintptr(adst) - src := uintptr(asrc) - switch { - case src < dst && src+n > dst: - // byte copy backward - // careful: i is unsigned - for i := n; i > 0; { - i-- - *(*byte)(unsafe.Pointer(dst + i)) = *(*byte)(unsafe.Pointer(src + i)) - } - case (n|src|dst)&(ptrSize-1) != 0: - // byte copy forward - for i := uintptr(0); i < n; i++ { - *(*byte)(unsafe.Pointer(dst + i)) = *(*byte)(unsafe.Pointer(src + i)) - } - default: - // word copy forward - for i := uintptr(0); i < n; i += ptrSize { - *(*uintptr)(unsafe.Pointer(dst + i)) = *(*uintptr)(unsafe.Pointer(src + i)) - } - } -} - // Value is the reflection interface to a Go value. // // Not all methods apply to all kinds of values. Restrictions, @@ -64,16 +36,8 @@ type Value struct { // Pointer-valued data or, if flagIndir is set, pointer to data. // Valid when either flagIndir is set or typ.pointers() is true. - // Gccgo always uses this field. ptr unsafe.Pointer - // Non-pointer-valued data. When the data is smaller - // than a word, it begins at the first byte (in the memory - // address sense) of this field. - // Valid when flagIndir is not set and typ.pointers() is false. - // Gccgo never uses this field. - // scalar uintptr - // flag holds metadata about the value. // The lowest bits are flag bits: // - flagRO: obtained via unexported field, so read-only @@ -84,7 +48,7 @@ type Value struct { // This repeats typ.Kind() except for method values. // The remaining 23+ bits give a method number for method values. // If flag.kind() != Func, code can assume that flagMethod is unset. - // If typ.size > ptrSize, code can assume that flagIndir is set. + // If ifaceIndir(typ), code can assume that flagIndir is set. flag // A method value represents a curried method invocation @@ -97,19 +61,18 @@ type Value struct { type flag uintptr const ( - flagRO flag = 1 << iota - flagIndir - flagAddr - flagMethod - flagMethodFn // gccgo: first fn parameter is always pointer - flagKindShift = iota flagKindWidth = 5 // there are 27 kinds flagKindMask flag = 1<<flagKindWidth - 1 - flagMethodShift = flagKindShift + flagKindWidth + flagRO flag = 1 << 5 + flagIndir flag = 1 << 6 + flagAddr flag = 1 << 7 + flagMethod flag = 1 << 8 + flagMethodFn flag = 1 << 9 // gccgo: first fn parameter is always pointer + flagMethodShift = 10 ) func (f flag) kind() Kind { - return Kind((f >> flagKindShift) & flagKindMask) + return Kind(f & flagKindMask) } // pointer returns the underlying pointer represented by v. @@ -131,11 +94,11 @@ func packEface(v Value) interface{} { e := (*emptyInterface)(unsafe.Pointer(&i)) // First, fill in the data portion of the interface. switch { - case v.Kind() != Ptr && v.Kind() != UnsafePointer: - // Value is indirect, and so is the interface we're making. + case ifaceIndir(t): if v.flag&flagIndir == 0 { - panic("reflect: missing flagIndir") + panic("bad indir") } + // Value is indirect, and so is the interface we're making. ptr := v.ptr if v.flag&flagAddr != 0 { // TODO: pass safe boolean from valueInterface so @@ -144,23 +107,14 @@ func packEface(v Value) interface{} { memmove(c, ptr, t.size) ptr = c } - e.word = iword(ptr) + e.word = ptr case v.flag&flagIndir != 0: // Value is indirect, but interface is direct. We need // to load the data at v.ptr into the interface data word. - if t.pointers() { - e.word = iword(*(*unsafe.Pointer)(v.ptr)) - } else { - e.word = iword(loadScalar(v.ptr, t.size)) - } + e.word = *(*unsafe.Pointer)(v.ptr) default: // Value is direct, and so is the interface. - if t.pointers() { - e.word = iword(v.ptr) - } else { - // e.word = iword(v.scalar) - panic("reflect: missing flagIndir") - } + e.word = v.ptr } // Now, fill in the type portion. We're very careful here not // to have any operation between the e.word and e.typ assignments @@ -178,8 +132,8 @@ func unpackEface(i interface{}) Value { if t == nil { return Value{} } - f := flag(t.Kind()) << flagKindShift - if t.Kind() != Ptr && t.Kind() != UnsafePointer { + f := flag(t.Kind()) + if ifaceIndir(t) { f |= flagIndir } return Value{t, unsafe.Pointer(e.word), f} @@ -211,78 +165,10 @@ func methodName() string { return f.Name() } -// An iword is the word that would be stored in an -// interface to represent a given value v. Specifically, if v is -// bigger than a pointer, its word is a pointer to v's data. -// Otherwise, its word holds the data stored -// in its leading bytes (so is not a pointer). -// This type is very dangerous for the garbage collector because -// it must be treated conservatively. We try to never expose it -// to the GC here so that GC remains precise. -type iword unsafe.Pointer - -// loadScalar loads n bytes at p from memory into a uintptr -// that forms the second word of an interface. The data -// must be non-pointer in nature. -func loadScalar(p unsafe.Pointer, n uintptr) uintptr { - // Run the copy ourselves instead of calling memmove - // to avoid moving w to the heap. - var w uintptr - switch n { - default: - panic("reflect: internal error: loadScalar of " + strconv.Itoa(int(n)) + "-byte value") - case 0: - case 1: - *(*uint8)(unsafe.Pointer(&w)) = *(*uint8)(p) - case 2: - *(*uint16)(unsafe.Pointer(&w)) = *(*uint16)(p) - case 3: - *(*[3]byte)(unsafe.Pointer(&w)) = *(*[3]byte)(p) - case 4: - *(*uint32)(unsafe.Pointer(&w)) = *(*uint32)(p) - case 5: - *(*[5]byte)(unsafe.Pointer(&w)) = *(*[5]byte)(p) - case 6: - *(*[6]byte)(unsafe.Pointer(&w)) = *(*[6]byte)(p) - case 7: - *(*[7]byte)(unsafe.Pointer(&w)) = *(*[7]byte)(p) - case 8: - *(*uint64)(unsafe.Pointer(&w)) = *(*uint64)(p) - } - return w -} - -// storeScalar stores n bytes from w into p. -func storeScalar(p unsafe.Pointer, w uintptr, n uintptr) { - // Run the copy ourselves instead of calling memmove - // to avoid moving w to the heap. - switch n { - default: - panic("reflect: internal error: storeScalar of " + strconv.Itoa(int(n)) + "-byte value") - case 0: - case 1: - *(*uint8)(p) = *(*uint8)(unsafe.Pointer(&w)) - case 2: - *(*uint16)(p) = *(*uint16)(unsafe.Pointer(&w)) - case 3: - *(*[3]byte)(p) = *(*[3]byte)(unsafe.Pointer(&w)) - case 4: - *(*uint32)(p) = *(*uint32)(unsafe.Pointer(&w)) - case 5: - *(*[5]byte)(p) = *(*[5]byte)(unsafe.Pointer(&w)) - case 6: - *(*[6]byte)(p) = *(*[6]byte)(unsafe.Pointer(&w)) - case 7: - *(*[7]byte)(p) = *(*[7]byte)(unsafe.Pointer(&w)) - case 8: - *(*uint64)(p) = *(*uint64)(unsafe.Pointer(&w)) - } -} - // emptyInterface is the header for an interface{} value. type emptyInterface struct { typ *rtype - word iword + word unsafe.Pointer } // nonEmptyInterface is the header for a interface value with methods. @@ -292,7 +178,7 @@ type nonEmptyInterface struct { typ *rtype // dynamic concrete type fun [100000]unsafe.Pointer // method table } - word iword + word unsafe.Pointer } // mustBe panics if f's kind is not expected. @@ -302,9 +188,8 @@ type nonEmptyInterface struct { // v.flag.mustBe(Bool), which will only bother to copy the // single important word for the receiver. func (f flag) mustBe(expected Kind) { - k := f.kind() - if k != expected { - panic(&ValueError{methodName(), k}) + if f.kind() != expected { + panic(&ValueError{methodName(), f.kind()}) } } @@ -344,18 +229,14 @@ func (v Value) Addr() Value { if v.flag&flagAddr == 0 { panic("reflect.Value.Addr of unaddressable value") } - return Value{v.typ.ptrTo(), v.ptr /* 0, */, (v.flag & flagRO) | flag(Ptr)<<flagKindShift} + return Value{v.typ.ptrTo(), v.ptr, (v.flag & flagRO) | flag(Ptr)} } // Bool returns v's underlying value. // It panics if v's kind is not Bool. func (v Value) Bool() bool { v.mustBe(Bool) - if v.flag&flagIndir != 0 { - return *(*bool)(v.ptr) - } - // return *(*bool)(unsafe.Pointer(&v.scalar)) - panic("reflect: missing flagIndir") + return *(*bool)(v.ptr) } // Bytes returns v's underlying value. @@ -594,7 +475,7 @@ func methodReceiver(op string, v Value, methodIndex int) (rcvrtype, t *rtype, fn i := methodIndex if v.typ.Kind() == Interface { tt := (*interfaceType)(unsafe.Pointer(v.typ)) - if i < 0 || i >= len(tt.methods) { + if uint(i) >= uint(len(tt.methods)) { panic("reflect: internal error: invalid method index") } m := &tt.methods[i] @@ -611,7 +492,7 @@ func methodReceiver(op string, v Value, methodIndex int) (rcvrtype, t *rtype, fn } else { rcvrtype = v.typ ut := v.typ.uncommon() - if ut == nil || i < 0 || i >= len(ut.methods) { + if ut == nil || uint(i) >= uint(len(ut.methods)) { panic("reflect: internal error: invalid method index") } m := &ut.methods[i] @@ -634,19 +515,10 @@ func storeRcvr(v Value, p unsafe.Pointer) { // the interface data word becomes the receiver word iface := (*nonEmptyInterface)(v.ptr) *(*unsafe.Pointer)(p) = unsafe.Pointer(iface.word) - } else if v.flag&flagIndir != 0 { - if t.size > ptrSize { - *(*unsafe.Pointer)(p) = v.ptr - } else if t.pointers() { - *(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(v.ptr) - } else { - *(*uintptr)(p) = loadScalar(v.ptr, t.size) - } - } else if t.pointers() { - *(*unsafe.Pointer)(p) = v.ptr + } else if v.flag&flagIndir != 0 && !ifaceIndir(t) { + *(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(v.ptr) } else { - // *(*uintptr)(p) = v.scalar - panic("reflect: missing flagIndir") + *(*unsafe.Pointer)(p) = v.ptr } } @@ -679,7 +551,7 @@ func (v Value) Cap() int { // Slice is always bigger than a word; assume flagIndir. return (*sliceHeader)(v.ptr).Cap } - panic(&ValueError{"reflect.Value.Cap", k}) + panic(&ValueError{"reflect.Value.Cap", v.kind()}) } // Close closes the channel v. @@ -696,16 +568,11 @@ func (v Value) Complex() complex128 { k := v.kind() switch k { case Complex64: - if v.flag&flagIndir != 0 { - return complex128(*(*complex64)(v.ptr)) - } - // return complex128(*(*complex64)(unsafe.Pointer(&v.scalar))) - panic("reflect: missing flagIndir") + return complex128(*(*complex64)(v.ptr)) case Complex128: - // complex128 is always bigger than a word; assume flagIndir. return *(*complex128)(v.ptr) } - panic(&ValueError{"reflect.Value.Complex", k}) + panic(&ValueError{"reflect.Value.Complex", v.kind()}) } // Elem returns the value that the interface v contains @@ -725,7 +592,9 @@ func (v Value) Elem() Value { })(v.ptr)) } x := unpackEface(eface) - x.flag |= v.flag & flagRO + if x.flag != 0 { + x.flag |= v.flag & flagRO + } return x case Ptr: ptr := v.ptr @@ -739,58 +608,46 @@ func (v Value) Elem() Value { tt := (*ptrType)(unsafe.Pointer(v.typ)) typ := tt.elem fl := v.flag&flagRO | flagIndir | flagAddr - fl |= flag(typ.Kind() << flagKindShift) - return Value{typ, ptr /* 0, */, fl} + fl |= flag(typ.Kind()) + return Value{typ, ptr, fl} } - panic(&ValueError{"reflect.Value.Elem", k}) + panic(&ValueError{"reflect.Value.Elem", v.kind()}) } // Field returns the i'th field of the struct v. // It panics if v's Kind is not Struct or i is out of range. func (v Value) Field(i int) Value { - v.mustBe(Struct) + if v.kind() != Struct { + panic(&ValueError{"reflect.Value.Field", v.kind()}) + } tt := (*structType)(unsafe.Pointer(v.typ)) - if i < 0 || i >= len(tt.fields) { + if uint(i) >= uint(len(tt.fields)) { panic("reflect: Field index out of range") } field := &tt.fields[i] typ := field.typ // Inherit permission bits from v. - fl := v.flag & (flagRO | flagIndir | flagAddr) + fl := v.flag&(flagRO|flagIndir|flagAddr) | flag(typ.Kind()) // Using an unexported field forces flagRO. if field.pkgPath != nil { fl |= flagRO } - fl |= flag(typ.Kind()) << flagKindShift - - var ptr unsafe.Pointer - // var scalar uintptr - switch { - case fl&flagIndir != 0: - // Indirect. Just bump pointer. - ptr = unsafe.Pointer(uintptr(v.ptr) + field.offset) - case typ.pointers(): - if field.offset != 0 { - panic("field access of ptr value isn't at offset 0") - } - ptr = v.ptr - case bigEndian: - // Must be scalar. Discard leading bytes. - // scalar = v.scalar << (field.offset * 8) - panic("reflect: missing flagIndir") - default: - // Must be scalar. Discard leading bytes. - // scalar = v.scalar >> (field.offset * 8) - panic("reflect: missing flagIndir") - } - - return Value{typ, ptr /* scalar, */, fl} + // Either flagIndir is set and v.ptr points at struct, + // or flagIndir is not set and v.ptr is the actual struct data. + // In the former case, we want v.ptr + offset. + // In the latter case, we must be have field.offset = 0, + // so v.ptr + field.offset is still okay. + ptr := unsafe.Pointer(uintptr(v.ptr) + field.offset) + return Value{typ, ptr, fl} } // FieldByIndex returns the nested field corresponding to index. // It panics if v's Kind is not struct. func (v Value) FieldByIndex(index []int) Value { + if len(index) == 1 { + return v.Field(index[0]) + } v.mustBe(Struct) for i, x := range index { if i > 0 { @@ -822,7 +679,6 @@ func (v Value) FieldByName(name string) Value { // It panics if v's Kind is not struct. // It returns the zero Value if no field was found. func (v Value) FieldByNameFunc(match func(string) bool) Value { - v.mustBe(Struct) if f, ok := v.typ.FieldByNameFunc(match); ok { return v.FieldByIndex(f.Index) } @@ -835,19 +691,11 @@ func (v Value) Float() float64 { k := v.kind() switch k { case Float32: - if v.flag&flagIndir != 0 { - return float64(*(*float32)(v.ptr)) - } - // return float64(*(*float32)(unsafe.Pointer(&v.scalar))) - panic("reflect: missing flagIndir") + return float64(*(*float32)(v.ptr)) case Float64: - if v.flag&flagIndir != 0 { - return *(*float64)(v.ptr) - } - // return *(*float64)(unsafe.Pointer(&v.scalar)) - panic("reflect: missing flagIndir") + return *(*float64)(v.ptr) } - panic(&ValueError{"reflect.Value.Float", k}) + panic(&ValueError{"reflect.Value.Float", v.kind()}) } var uint8Type = TypeOf(uint8(0)).(*rtype) @@ -855,82 +703,54 @@ var uint8Type = TypeOf(uint8(0)).(*rtype) // Index returns v's i'th element. // It panics if v's Kind is not Array, Slice, or String or i is out of range. func (v Value) Index(i int) Value { - k := v.kind() - switch k { + switch v.kind() { case Array: tt := (*arrayType)(unsafe.Pointer(v.typ)) - if i < 0 || i > int(tt.len) { + if uint(i) >= uint(tt.len) { panic("reflect: array index out of range") } typ := tt.elem - fl := v.flag & (flagRO | flagIndir | flagAddr) // bits same as overall array - fl |= flag(typ.Kind()) << flagKindShift offset := uintptr(i) * typ.size - var val unsafe.Pointer - switch { - case fl&flagIndir != 0: - // Indirect. Just bump pointer. - val = unsafe.Pointer(uintptr(v.ptr) + offset) - case typ.pointers(): - if offset != 0 { - panic("can't Index(i) with i!=0 on ptrLike value") - } - val = v.ptr - case bigEndian: - // Direct. Discard leading bytes. - // scalar = v.scalar << (offset * 8) - panic("reflect: missing flagIndir") - default: - // Direct. Discard leading bytes. - // scalar = v.scalar >> (offset * 8) - panic("reflect: missing flagIndir") - } - return Value{typ, val /* scalar, */, fl} + // Either flagIndir is set and v.ptr points at array, + // or flagIndir is not set and v.ptr is the actual array data. + // In the former case, we want v.ptr + offset. + // In the latter case, we must be doing Index(0), so offset = 0, + // so v.ptr + offset is still okay. + val := unsafe.Pointer(uintptr(v.ptr) + offset) + fl := v.flag&(flagRO|flagIndir|flagAddr) | flag(typ.Kind()) // bits same as overall array + return Value{typ, val, fl} case Slice: // Element flag same as Elem of Ptr. // Addressable, indirect, possibly read-only. - fl := flagAddr | flagIndir | v.flag&flagRO s := (*sliceHeader)(v.ptr) - if i < 0 || i >= s.Len { + if uint(i) >= uint(s.Len) { panic("reflect: slice index out of range") } tt := (*sliceType)(unsafe.Pointer(v.typ)) typ := tt.elem - fl |= flag(typ.Kind()) << flagKindShift val := unsafe.Pointer(uintptr(s.Data) + uintptr(i)*typ.size) - return Value{typ, val /* 0, */, fl} + fl := flagAddr | flagIndir | v.flag&flagRO | flag(typ.Kind()) + return Value{typ, val, fl} case String: - fl := v.flag&flagRO | flag(Uint8<<flagKindShift) | flagIndir - s := (*StringHeader)(v.ptr) - if i < 0 || i >= s.Len { + s := (*stringHeader)(v.ptr) + if uint(i) >= uint(s.Len) { panic("reflect: string index out of range") } - b := uintptr(0) - *(*byte)(unsafe.Pointer(&b)) = *(*byte)(unsafe.Pointer(uintptr(s.Data) + uintptr(i))) - return Value{uint8Type, unsafe.Pointer(&b) /* 0, */, fl | flagIndir} + p := unsafe.Pointer(uintptr(s.Data) + uintptr(i)) + fl := v.flag&flagRO | flag(Uint8) | flagIndir + return Value{uint8Type, p, fl} } - panic(&ValueError{"reflect.Value.Index", k}) + panic(&ValueError{"reflect.Value.Index", v.kind()}) } // Int returns v's underlying value, as an int64. // It panics if v's Kind is not Int, Int8, Int16, Int32, or Int64. func (v Value) Int() int64 { k := v.kind() - var p unsafe.Pointer - if v.flag&flagIndir != 0 { - p = v.ptr - } else { - // The escape analysis is good enough that &v.scalar - // does not trigger a heap allocation. - // p = unsafe.Pointer(&v.scalar) - switch k { - case Int, Int8, Int16, Int32, Int64: - panic("reflect: missing flagIndir") - } - } + p := v.ptr switch k { case Int: return int64(*(*int)(p)) @@ -943,7 +763,7 @@ func (v Value) Int() int64 { case Int64: return int64(*(*int64)(p)) } - panic(&ValueError{"reflect.Value.Int", k}) + panic(&ValueError{"reflect.Value.Int", v.kind()}) } // CanInterface returns true if Interface can be used without panicking. @@ -1040,7 +860,7 @@ func (v Value) IsNil() bool { // Both are always bigger than a word; assume flagIndir. return *(*unsafe.Pointer)(v.ptr) == nil } - panic(&ValueError{"reflect.Value.IsNil", k}) + panic(&ValueError{"reflect.Value.IsNil", v.kind()}) } // IsValid returns true if v represents a value. @@ -1077,7 +897,7 @@ func (v Value) Len() int { // String is bigger than a word; assume flagIndir. return (*stringHeader)(v.ptr).Len } - panic(&ValueError{"reflect.Value.Len", k}) + panic(&ValueError{"reflect.Value.Len", v.kind()}) } // MapIndex returns the value associated with key in the map v. @@ -1100,11 +920,8 @@ func (v Value) MapIndex(key Value) Value { var k unsafe.Pointer if key.flag&flagIndir != 0 { k = key.ptr - } else if key.typ.pointers() { - k = unsafe.Pointer(&key.ptr) } else { - // k = unsafe.Pointer(&key.scalar) - panic("reflect: missing flagIndir") + k = unsafe.Pointer(&key.ptr) } e := mapaccess(v.typ, v.pointer(), k) if e == nil { @@ -1112,17 +929,15 @@ func (v Value) MapIndex(key Value) Value { } typ := tt.elem fl := (v.flag | key.flag) & flagRO - fl |= flag(typ.Kind()) << flagKindShift - if typ.Kind() != Ptr && typ.Kind() != UnsafePointer { + fl |= flag(typ.Kind()) + if ifaceIndir(typ) { // Copy result so future changes to the map // won't change the underlying value. c := unsafe_New(typ) memmove(c, e, typ.size) - return Value{typ, c /* 0, */, fl | flagIndir} - } else if typ.pointers() { - return Value{typ, *(*unsafe.Pointer)(e) /* 0, */, fl} + return Value{typ, c, fl | flagIndir} } else { - panic("reflect: can't happen") + return Value{typ, *(*unsafe.Pointer)(e), fl} } } @@ -1135,10 +950,7 @@ func (v Value) MapKeys() []Value { tt := (*mapType)(unsafe.Pointer(v.typ)) keyType := tt.key - fl := v.flag&flagRO | flag(keyType.Kind())<<flagKindShift - if keyType.Kind() != Ptr && keyType.Kind() != UnsafePointer { - fl |= flagIndir - } + fl := v.flag&flagRO | flag(keyType.Kind()) m := v.pointer() mlen := int(0) @@ -1156,16 +968,14 @@ func (v Value) MapKeys() []Value { // we can do about it. break } - if keyType.Kind() != Ptr && keyType.Kind() != UnsafePointer { + if ifaceIndir(keyType) { // Copy result so future changes to the map // won't change the underlying value. c := unsafe_New(keyType) memmove(c, key, keyType.size) - a[i] = Value{keyType, c /* 0, */, fl | flagIndir} - } else if keyType.pointers() { - a[i] = Value{keyType, *(*unsafe.Pointer)(key) /* 0, */, fl} + a[i] = Value{keyType, c, fl | flagIndir} } else { - panic("reflect: can't happen") + a[i] = Value{keyType, *(*unsafe.Pointer)(key), fl} } mapiternext(it) } @@ -1180,16 +990,16 @@ func (v Value) Method(i int) Value { if v.typ == nil { panic(&ValueError{"reflect.Value.Method", Invalid}) } - if v.flag&flagMethod != 0 || i < 0 || i >= v.typ.NumMethod() { + if v.flag&flagMethod != 0 || uint(i) >= uint(v.typ.NumMethod()) { panic("reflect: Method index out of range") } if v.typ.Kind() == Interface && v.IsNil() { panic("reflect: Method on nil interface value") } fl := v.flag & (flagRO | flagIndir) - fl |= flag(Func) << flagKindShift + fl |= flag(Func) fl |= flag(i)<<flagMethodShift | flagMethod - return Value{v.typ, v.ptr /* v.scalar, */, fl} + return Value{v.typ, v.ptr, fl} } // NumMethod returns the number of methods in the value's method set. @@ -1240,7 +1050,7 @@ func (v Value) OverflowComplex(x complex128) bool { case Complex128: return false } - panic(&ValueError{"reflect.Value.OverflowComplex", k}) + panic(&ValueError{"reflect.Value.OverflowComplex", v.kind()}) } // OverflowFloat returns true if the float64 x cannot be represented by v's type. @@ -1253,7 +1063,7 @@ func (v Value) OverflowFloat(x float64) bool { case Float64: return false } - panic(&ValueError{"reflect.Value.OverflowFloat", k}) + panic(&ValueError{"reflect.Value.OverflowFloat", v.kind()}) } func overflowFloat32(x float64) bool { @@ -1273,7 +1083,7 @@ func (v Value) OverflowInt(x int64) bool { trunc := (x << (64 - bitSize)) >> (64 - bitSize) return x != trunc } - panic(&ValueError{"reflect.Value.OverflowInt", k}) + panic(&ValueError{"reflect.Value.OverflowInt", v.kind()}) } // OverflowUint returns true if the uint64 x cannot be represented by v's type. @@ -1286,7 +1096,7 @@ func (v Value) OverflowUint(x uint64) bool { trunc := (x << (64 - bitSize)) >> (64 - bitSize) return x != trunc } - panic(&ValueError{"reflect.Value.OverflowUint", k}) + panic(&ValueError{"reflect.Value.OverflowUint", v.kind()}) } // Pointer returns v's value as a uintptr. @@ -1331,7 +1141,7 @@ func (v Value) Pointer() uintptr { case Slice: return (*SliceHeader)(v.ptr).Data } - panic(&ValueError{"reflect.Value.Pointer", k}) + panic(&ValueError{"reflect.Value.Pointer", v.kind()}) } // Recv receives and returns a value from the channel v. @@ -1353,9 +1163,9 @@ func (v Value) recv(nb bool) (val Value, ok bool) { panic("reflect: recv on send-only channel") } t := tt.elem - val = Value{t, nil /* 0, */, flag(t.Kind()) << flagKindShift} + val = Value{t, nil, flag(t.Kind())} var p unsafe.Pointer - if t.Kind() != Ptr && t.Kind() != UnsafePointer { + if ifaceIndir(t) { p = unsafe_New(t) val.ptr = p val.flag |= flagIndir @@ -1390,11 +1200,8 @@ func (v Value) send(x Value, nb bool) (selected bool) { var p unsafe.Pointer if x.flag&flagIndir != 0 { p = x.ptr - } else if x.typ.pointers() { - p = unsafe.Pointer(&x.ptr) } else { - // p = unsafe.Pointer(&x.scalar) - panic("reflect: missing flagIndir") + p = unsafe.Pointer(&x.ptr) } return chansend(v.typ, v.pointer(), p, nb) } @@ -1412,11 +1219,8 @@ func (v Value) Set(x Value) { x = x.assignTo("reflect.Set", v.typ, target) if x.flag&flagIndir != 0 { memmove(v.ptr, x.ptr, v.typ.size) - } else if x.typ.pointers() { - *(*unsafe.Pointer)(v.ptr) = x.ptr } else { - // memmove(v.ptr, unsafe.Pointer(&x.scalar), v.typ.size) - panic("reflect: missing flagIndir") + *(*unsafe.Pointer)(v.ptr) = x.ptr } } @@ -1456,7 +1260,7 @@ func (v Value) SetComplex(x complex128) { v.mustBeAssignable() switch k := v.kind(); k { default: - panic(&ValueError{"reflect.Value.SetComplex", k}) + panic(&ValueError{"reflect.Value.SetComplex", v.kind()}) case Complex64: *(*complex64)(v.ptr) = complex64(x) case Complex128: @@ -1470,7 +1274,7 @@ func (v Value) SetFloat(x float64) { v.mustBeAssignable() switch k := v.kind(); k { default: - panic(&ValueError{"reflect.Value.SetFloat", k}) + panic(&ValueError{"reflect.Value.SetFloat", v.kind()}) case Float32: *(*float32)(v.ptr) = float32(x) case Float64: @@ -1484,7 +1288,7 @@ func (v Value) SetInt(x int64) { v.mustBeAssignable() switch k := v.kind(); k { default: - panic(&ValueError{"reflect.Value.SetInt", k}) + panic(&ValueError{"reflect.Value.SetInt", v.kind()}) case Int: *(*int)(v.ptr) = int(x) case Int8: @@ -1505,7 +1309,7 @@ func (v Value) SetLen(n int) { v.mustBeAssignable() v.mustBe(Slice) s := (*sliceHeader)(v.ptr) - if n < 0 || n > int(s.Cap) { + if uint(n) > uint(s.Cap) { panic("reflect: slice length out of range in SetLen") } s.Len = n @@ -1539,11 +1343,8 @@ func (v Value) SetMapIndex(key, val Value) { var k unsafe.Pointer if key.flag&flagIndir != 0 { k = key.ptr - } else if key.typ.pointers() { - k = unsafe.Pointer(&key.ptr) } else { - // k = unsafe.Pointer(&key.scalar) - panic("reflect: missing flagIndir") + k = unsafe.Pointer(&key.ptr) } if val.typ == nil { mapdelete(v.typ, v.pointer(), k) @@ -1554,11 +1355,8 @@ func (v Value) SetMapIndex(key, val Value) { var e unsafe.Pointer if val.flag&flagIndir != 0 { e = val.ptr - } else if val.typ.pointers() { - e = unsafe.Pointer(&val.ptr) } else { - // e = unsafe.Pointer(&val.scalar) - panic("reflect: missing flagIndir") + e = unsafe.Pointer(&val.ptr) } mapassign(v.typ, v.pointer(), k, e) } @@ -1569,7 +1367,7 @@ func (v Value) SetUint(x uint64) { v.mustBeAssignable() switch k := v.kind(); k { default: - panic(&ValueError{"reflect.Value.SetUint", k}) + panic(&ValueError{"reflect.Value.SetUint", v.kind()}) case Uint: *(*uint)(v.ptr) = uint(x) case Uint8: @@ -1612,7 +1410,7 @@ func (v Value) Slice(i, j int) Value { ) switch kind := v.kind(); kind { default: - panic(&ValueError{"reflect.Value.Slice", kind}) + panic(&ValueError{"reflect.Value.Slice", v.kind()}) case Array: if v.flag&flagAddr == 0 { @@ -1635,7 +1433,7 @@ func (v Value) Slice(i, j int) Value { panic("reflect.Value.Slice: string slice index out of bounds") } t := stringHeader{unsafe.Pointer(uintptr(s.Data) + uintptr(i)), j - i} - return Value{v.typ, unsafe.Pointer(&t) /* 0, */, v.flag} + return Value{v.typ, unsafe.Pointer(&t), v.flag} } if i < 0 || j < i || j > cap { @@ -1647,12 +1445,17 @@ func (v Value) Slice(i, j int) Value { // Reinterpret as *sliceHeader to edit. s := (*sliceHeader)(unsafe.Pointer(&x)) - s.Data = unsafe.Pointer(uintptr(base) + uintptr(i)*typ.elem.Size()) s.Len = j - i s.Cap = cap - i + if cap-i > 0 { + s.Data = unsafe.Pointer(uintptr(base) + uintptr(i)*typ.elem.Size()) + } else { + // do not advance pointer, to avoid pointing beyond end of slice + s.Data = base + } - fl := v.flag&flagRO | flagIndir | flag(Slice)<<flagKindShift - return Value{typ.common(), unsafe.Pointer(&x) /* 0, */, fl} + fl := v.flag&flagRO | flagIndir | flag(Slice) + return Value{typ.common(), unsafe.Pointer(&x), fl} } // Slice3 is the 3-index form of the slice operation: it returns v[i:j:k]. @@ -1666,7 +1469,7 @@ func (v Value) Slice3(i, j, k int) Value { ) switch kind := v.kind(); kind { default: - panic(&ValueError{"reflect.Value.Slice3", kind}) + panic(&ValueError{"reflect.Value.Slice3", v.kind()}) case Array: if v.flag&flagAddr == 0 { @@ -1694,12 +1497,17 @@ func (v Value) Slice3(i, j, k int) Value { // Reinterpret as *sliceHeader to edit. s := (*sliceHeader)(unsafe.Pointer(&x)) - s.Data = unsafe.Pointer(uintptr(base) + uintptr(i)*typ.elem.Size()) s.Len = j - i s.Cap = k - i + if k-i > 0 { + s.Data = unsafe.Pointer(uintptr(base) + uintptr(i)*typ.elem.Size()) + } else { + // do not advance pointer, to avoid pointing beyond end of slice + s.Data = base + } - fl := v.flag&flagRO | flagIndir | flag(Slice)<<flagKindShift - return Value{typ.common(), unsafe.Pointer(&x) /* 0, */, fl} + fl := v.flag&flagRO | flagIndir | flag(Slice) + return Value{typ.common(), unsafe.Pointer(&x), fl} } // String returns the string v's underlying value, as a string. @@ -1715,7 +1523,7 @@ func (v Value) String() string { } // If you call String on a reflect.Value of other type, it's better to // print something than to panic. Useful in debugging. - return "<" + v.typ.String() + " Value>" + return "<" + v.Type().String() + " Value>" } // TryRecv attempts to receive a value from the channel v but will not block. @@ -1756,7 +1564,7 @@ func (v Value) Type() Type { if v.typ.Kind() == Interface { // Method on interface. tt := (*interfaceType)(unsafe.Pointer(v.typ)) - if i < 0 || i >= len(tt.methods) { + if uint(i) >= uint(len(tt.methods)) { panic("reflect: internal error: invalid method index") } m := &tt.methods[i] @@ -1764,7 +1572,7 @@ func (v Value) Type() Type { } // Method on concrete type. ut := v.typ.uncommon() - if ut == nil || i < 0 || i >= len(ut.methods) { + if ut == nil || uint(i) >= uint(len(ut.methods)) { panic("reflect: internal error: invalid method index") } m := &ut.methods[i] @@ -1775,18 +1583,7 @@ func (v Value) Type() Type { // It panics if v's Kind is not Uint, Uintptr, Uint8, Uint16, Uint32, or Uint64. func (v Value) Uint() uint64 { k := v.kind() - var p unsafe.Pointer - if v.flag&flagIndir != 0 { - p = v.ptr - } else { - // The escape analysis is good enough that &v.scalar - // does not trigger a heap allocation. - // p = unsafe.Pointer(&v.scalar) - switch k { - case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr: - panic("reflect: missing flagIndir") - } - } + p := v.ptr switch k { case Uint: return uint64(*(*uint)(p)) @@ -1801,7 +1598,7 @@ func (v Value) Uint() uint64 { case Uintptr: return uint64(*(*uintptr)(p)) } - panic(&ValueError{"reflect.Value.Uint", k}) + panic(&ValueError{"reflect.Value.Uint", v.kind()}) } // UnsafeAddr returns a pointer to v's data. @@ -1940,17 +1737,6 @@ func Copy(dst, src Value) int { n = sn } - // If sk is an in-line array, cannot take its address. - // Instead, copy element by element. - // TODO: memmove would be ok for this (sa = unsafe.Pointer(&v.scalar)) - // if we teach the compiler that ptrs don't escape from memmove. - if src.flag&flagIndir == 0 { - for i := 0; i < n; i++ { - dst.Index(i).Set(src.Index(i)) - } - return n - } - // Copy via memmove. var da, sa unsafe.Pointer if dk == Array { @@ -1958,7 +1744,9 @@ func Copy(dst, src Value) int { } else { da = (*sliceHeader)(dst.ptr).Data } - if sk == Array { + if src.flag&flagIndir == 0 { + sa = unsafe.Pointer(&src.ptr) + } else if sk == Array { sa = src.ptr } else { sa = (*sliceHeader)(src.ptr).Data @@ -1968,7 +1756,7 @@ func Copy(dst, src Value) int { } // A runtimeSelect is a single case passed to rselect. -// This must match ../runtime/chan.c:/runtimeSelect +// This must match ../runtime/select.go:/runtimeSelect type runtimeSelect struct { dir uintptr // 0, SendDir, or RecvDir typ *rtype // channel type @@ -1986,7 +1774,7 @@ func rselect([]runtimeSelect) (chosen int, recvOK bool) // A SelectDir describes the communication direction of a select case. type SelectDir int -// NOTE: These values must match ../runtime/chan.c:/SelectDir. +// NOTE: These values must match ../runtime/select.go:/selectDir. const ( _ SelectDir = iota @@ -2071,11 +1859,8 @@ func Select(cases []SelectCase) (chosen int, recv Value, recvOK bool) { v = v.assignTo("reflect.Select", tt.elem, nil) if v.flag&flagIndir != 0 { rc.val = v.ptr - } else if v.typ.pointers() { - rc.val = unsafe.Pointer(&v.ptr) } else { - // rc.val = unsafe.Pointer(&v.scalar) - panic("reflect: missing flagIndir") + rc.val = unsafe.Pointer(&v.ptr) } case SelectRecv: @@ -2103,11 +1888,11 @@ func Select(cases []SelectCase) (chosen int, recv Value, recvOK bool) { tt := (*chanType)(unsafe.Pointer(runcases[chosen].typ)) t := tt.elem p := runcases[chosen].val - fl := flag(t.Kind()) << flagKindShift - if t.Kind() != Ptr && t.Kind() != UnsafePointer { - recv = Value{t, p /* 0, */, fl | flagIndir} + fl := flag(t.Kind()) + if ifaceIndir(t) { + recv = Value{t, p, fl | flagIndir} } else { - recv = Value{t, *(*unsafe.Pointer)(p) /* 0, */, fl} + recv = Value{t, *(*unsafe.Pointer)(p), fl} } } return chosen, recv, recvOK @@ -2138,7 +1923,7 @@ func MakeSlice(typ Type, len, cap int) Value { } s := sliceHeader{unsafe_NewArray(typ.Elem().(*rtype), cap), len, cap} - return Value{typ.common(), unsafe.Pointer(&s) /* 0, */, flagIndir | flag(Slice)<<flagKindShift} + return Value{typ.common(), unsafe.Pointer(&s), flagIndir | flag(Slice)} } // MakeChan creates a new channel with the specified type and buffer size. @@ -2153,7 +1938,7 @@ func MakeChan(typ Type, buffer int) Value { panic("reflect.MakeChan: unidirectional channel type") } ch := makechan(typ.(*rtype), uint64(buffer)) - return Value{typ.common(), unsafe.Pointer(&ch) /* 0, */, flagIndir | (flag(Chan) << flagKindShift)} + return Value{typ.common(), unsafe.Pointer(&ch), flag(Chan) | flagIndir} } // MakeMap creates a new map of the specified type. @@ -2162,7 +1947,7 @@ func MakeMap(typ Type) Value { panic("reflect.MakeMap of non-map type") } m := makemap(typ.(*rtype)) - return Value{typ.common(), unsafe.Pointer(&m) /* 0, */, flagIndir | (flag(Map) << flagKindShift)} + return Value{typ.common(), unsafe.Pointer(&m), flag(Map) | flagIndir} } // Indirect returns the value that v points to. @@ -2202,11 +1987,11 @@ func Zero(typ Type) Value { panic("reflect: Zero(nil)") } t := typ.common() - fl := flag(t.Kind()) << flagKindShift - if t.Kind() == Ptr || t.Kind() == UnsafePointer { - return Value{t, nil /* 0, */, fl} + fl := flag(t.Kind()) + if ifaceIndir(t) { + return Value{t, unsafe_New(typ.(*rtype)), fl | flagIndir} } - return Value{t, unsafe_New(typ.(*rtype)) /* 0, */, fl | flagIndir} + return Value{t, nil, fl} } // New returns a Value representing a pointer to a new zero value @@ -2216,15 +2001,15 @@ func New(typ Type) Value { panic("reflect: New(nil)") } ptr := unsafe_New(typ.(*rtype)) - fl := flag(Ptr) << flagKindShift - return Value{typ.common().ptrTo(), ptr /* 0, */, fl} + fl := flag(Ptr) + return Value{typ.common().ptrTo(), ptr, fl} } // NewAt returns a Value representing a pointer to a value of the // specified type, using p as that pointer. func NewAt(typ Type, p unsafe.Pointer) Value { - fl := flag(Ptr) << flagKindShift - return Value{typ.common().ptrTo(), p /* 0, */, fl} + fl := flag(Ptr) + return Value{typ.common().ptrTo(), p, fl} } // assignTo returns a value v that can be assigned directly to typ. @@ -2241,8 +2026,8 @@ func (v Value) assignTo(context string, dst *rtype, target unsafe.Pointer) Value // Same memory layout, so no harm done. v.typ = dst fl := v.flag & (flagRO | flagAddr | flagIndir) - fl |= flag(dst.Kind()) << flagKindShift - return Value{dst, v.ptr /* v.scalar, */, fl} + fl |= flag(dst.Kind()) + return Value{dst, v.ptr, fl} case implements(dst, v.typ): if target == nil { @@ -2254,7 +2039,7 @@ func (v Value) assignTo(context string, dst *rtype, target unsafe.Pointer) Value } else { ifaceE2I(dst, x, target) } - return Value{dst, target /* 0, */, flagIndir | flag(Interface)<<flagKindShift} + return Value{dst, target, flagIndir | flag(Interface)} } // Failed. @@ -2362,86 +2147,66 @@ func convertOp(dst, src *rtype) func(Value, Type) Value { // where t is a signed or unsigned int type. func makeInt(f flag, bits uint64, t Type) Value { typ := t.common() - if typ.size > ptrSize { - // Assume ptrSize >= 4, so this must be uint64. - ptr := unsafe_New(typ) - *(*uint64)(unsafe.Pointer(ptr)) = bits - return Value{typ, ptr /* 0, */, f | flagIndir | flag(typ.Kind())<<flagKindShift} - } - var s uintptr + ptr := unsafe_New(typ) switch typ.size { case 1: - *(*uint8)(unsafe.Pointer(&s)) = uint8(bits) + *(*uint8)(unsafe.Pointer(ptr)) = uint8(bits) case 2: - *(*uint16)(unsafe.Pointer(&s)) = uint16(bits) + *(*uint16)(unsafe.Pointer(ptr)) = uint16(bits) case 4: - *(*uint32)(unsafe.Pointer(&s)) = uint32(bits) + *(*uint32)(unsafe.Pointer(ptr)) = uint32(bits) case 8: - *(*uint64)(unsafe.Pointer(&s)) = uint64(bits) + *(*uint64)(unsafe.Pointer(ptr)) = bits } - return Value{typ, unsafe.Pointer(&s) /* 0, */, f | flagIndir | flag(typ.Kind())<<flagKindShift} + return Value{typ, ptr, f | flagIndir | flag(typ.Kind())} } // makeFloat returns a Value of type t equal to v (possibly truncated to float32), // where t is a float32 or float64 type. func makeFloat(f flag, v float64, t Type) Value { typ := t.common() - if typ.size > ptrSize { - // Assume ptrSize >= 4, so this must be float64. - ptr := unsafe_New(typ) - *(*float64)(unsafe.Pointer(ptr)) = v - return Value{typ, ptr /* 0, */, f | flagIndir | flag(typ.Kind())<<flagKindShift} - } - - var s uintptr + ptr := unsafe_New(typ) switch typ.size { case 4: - *(*float32)(unsafe.Pointer(&s)) = float32(v) + *(*float32)(unsafe.Pointer(ptr)) = float32(v) case 8: - *(*float64)(unsafe.Pointer(&s)) = v + *(*float64)(unsafe.Pointer(ptr)) = v } - return Value{typ, unsafe.Pointer(&s) /* 0, */, f | flagIndir | flag(typ.Kind())<<flagKindShift} + return Value{typ, ptr, f | flagIndir | flag(typ.Kind())} } // makeComplex returns a Value of type t equal to v (possibly truncated to complex64), // where t is a complex64 or complex128 type. func makeComplex(f flag, v complex128, t Type) Value { typ := t.common() - if typ.size > ptrSize { - ptr := unsafe_New(typ) - switch typ.size { - case 8: - *(*complex64)(unsafe.Pointer(ptr)) = complex64(v) - case 16: - *(*complex128)(unsafe.Pointer(ptr)) = v - } - return Value{typ, ptr /* 0, */, f | flagIndir | flag(typ.Kind())<<flagKindShift} + ptr := unsafe_New(typ) + switch typ.size { + case 8: + *(*complex64)(unsafe.Pointer(ptr)) = complex64(v) + case 16: + *(*complex128)(unsafe.Pointer(ptr)) = v } - - // Assume ptrSize <= 8 so this must be complex64. - var s uintptr - *(*complex64)(unsafe.Pointer(&s)) = complex64(v) - return Value{typ, unsafe.Pointer(&s) /* 0, */, f | flagIndir | flag(typ.Kind())<<flagKindShift} + return Value{typ, ptr, f | flagIndir | flag(typ.Kind())} } func makeString(f flag, v string, t Type) Value { ret := New(t).Elem() ret.SetString(v) - ret.flag = ret.flag&^flagAddr | f | flagIndir + ret.flag = ret.flag&^flagAddr | f return ret } func makeBytes(f flag, v []byte, t Type) Value { ret := New(t).Elem() ret.SetBytes(v) - ret.flag = ret.flag&^flagAddr | f | flagIndir + ret.flag = ret.flag&^flagAddr | f return ret } func makeRunes(f flag, v []rune, t Type) Value { ret := New(t).Elem() ret.setRunes(v) - ret.flag = ret.flag&^flagAddr | f | flagIndir + ret.flag = ret.flag&^flagAddr | f return ret } @@ -2532,7 +2297,7 @@ func cvtDirect(v Value, typ Type) Value { ptr = c f &^= flagAddr } - return Value{t, ptr /* v.scalar, */, v.flag&flagRO | f} // v.flag&flagRO|f == f? + return Value{t, ptr, v.flag&flagRO | f} // v.flag&flagRO|f == f? } // convertOp: concrete -> interface @@ -2544,7 +2309,7 @@ func cvtT2I(v Value, typ Type) Value { } else { ifaceE2I(typ.(*rtype), x, target) } - return Value{typ.common(), target /* 0, */, v.flag&flagRO | flagIndir | flag(Interface)<<flagKindShift} + return Value{typ.common(), target, v.flag&flagRO | flagIndir | flag(Interface)} } // convertOp: interface -> interface @@ -2557,7 +2322,7 @@ func cvtI2I(v Value, typ Type) Value { return cvtT2I(v.Elem(), typ) } -// implemented in ../pkg/runtime +// implemented in ../runtime func chancap(ch unsafe.Pointer) int func chanclose(ch unsafe.Pointer) func chanlen(ch unsafe.Pointer) int @@ -2577,10 +2342,14 @@ func mapiterinit(t *rtype, m unsafe.Pointer) unsafe.Pointer func mapiterkey(it unsafe.Pointer) (key unsafe.Pointer) func mapiternext(it unsafe.Pointer) func maplen(m unsafe.Pointer) int - func call(typ *rtype, fnaddr unsafe.Pointer, isInterface bool, isMethod bool, params *unsafe.Pointer, results *unsafe.Pointer) + func ifaceE2I(t *rtype, src interface{}, dst unsafe.Pointer) +//go:noescape +//extern memmove +func memmove(adst, asrc unsafe.Pointer, n uintptr) + // Dummy annotation marking that the value x escapes, // for use in cases where the reflect code is so clever that // the compiler cannot follow. diff --git a/libgo/go/regexp/all_test.go b/libgo/go/regexp/all_test.go index 301a1dfcd83..01ea3742a8b 100644 --- a/libgo/go/regexp/all_test.go +++ b/libgo/go/regexp/all_test.go @@ -6,6 +6,7 @@ package regexp import ( "reflect" + "regexp/syntax" "strings" "testing" ) @@ -473,9 +474,19 @@ func TestSplit(t *testing.T) { } } -// This ran out of stack before issue 7608 was fixed. +// Check that one-pass cutoff does trigger. func TestOnePassCutoff(t *testing.T) { - MustCompile(`^(?:x{1,1000}){1,1000}$`) + re, err := syntax.Parse(`^x{1,1000}y{1,1000}$`, syntax.Perl) + if err != nil { + t.Fatalf("parse: %v", err) + } + p, err := syntax.Compile(re.Simplify()) + if err != nil { + t.Fatalf("compile: %v", err) + } + if compileOnePass(p) != notOnePass { + t.Fatalf("makeOnePass succeeded; wanted notOnePass") + } } func BenchmarkLiteral(b *testing.B) { diff --git a/libgo/go/regexp/onepass.go b/libgo/go/regexp/onepass.go index 501fb28af66..e6f42856387 100644 --- a/libgo/go/regexp/onepass.go +++ b/libgo/go/regexp/onepass.go @@ -1,4 +1,6 @@ // Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package regexp @@ -9,9 +11,6 @@ import ( "unicode" ) -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - // "One-pass" regexp execution. // Some regexps can be analyzed to determine that they never need // backtracking: they are guaranteed to run in one pass over the string @@ -484,7 +483,7 @@ func makeOnePass(p *onePassProg) *onePassProg { } } if p != notOnePass { - for i, _ := range p.Inst { + for i := range p.Inst { p.Inst[i].Rune = onePassRunes[i] } } diff --git a/libgo/go/regexp/regexp.go b/libgo/go/regexp/regexp.go index 0b8336a04fb..b615acdf0e5 100644 --- a/libgo/go/regexp/regexp.go +++ b/libgo/go/regexp/regexp.go @@ -452,7 +452,7 @@ func (re *Regexp) ReplaceAllString(src, repl string) string { return string(b) } -// ReplaceAllStringLiteral returns a copy of src, replacing matches of the Regexp +// ReplaceAllLiteralString returns a copy of src, replacing matches of the Regexp // with the replacement string repl. The replacement repl is substituted directly, // without using Expand. func (re *Regexp) ReplaceAllLiteralString(src, repl string) string { diff --git a/libgo/go/regexp/syntax/doc.go b/libgo/go/regexp/syntax/doc.go index 8e72c90d3eb..e5e71f14f59 100644 --- a/libgo/go/regexp/syntax/doc.go +++ b/libgo/go/regexp/syntax/doc.go @@ -21,8 +21,8 @@ Single characters: [^xyz] negated character class \d Perl character class \D negated Perl character class - [:alpha:] ASCII character class - [:^alpha:] negated ASCII character class + [[:alpha:]] ASCII character class + [[:^alpha:]] negated ASCII character class \pN Unicode character class (one-letter name) \p{Greek} Unicode character class \PN negated Unicode character class (one-letter name) @@ -46,14 +46,14 @@ Repetitions: x{n,}? n or more x, prefer fewer x{n}? exactly n x -Implementation restriction: The counting forms x{n} etc. (but not the other -forms x* etc.) have an upper limit of n=1000. Negative or higher explicit -counts yield the parse error ErrInvalidRepeatSize. +Implementation restriction: The counting forms x{n,m}, x{n,}, and x{n} +reject forms that create a minimum or maximum repetition count above 1000. +Unlimited repetitions are not subject to this restriction. Grouping: (re) numbered capturing group (submatch) (?P<name>re) named & numbered capturing group (submatch) - (?:re) non-capturing group (submatch) + (?:re) non-capturing group (?flags) set flags within current group; non-capturing (?flags:re) set flags during re; non-capturing @@ -69,7 +69,7 @@ Empty strings: $ at end of text (like \z not \Z) or line (flag m=true) \A at beginning of text \b at ASCII word boundary (\w on one side and \W, \A, or \z on the other) - \B not an ASCII word boundary + \B not at ASCII word boundary \z at end of text Escape sequences: @@ -103,29 +103,29 @@ Named character classes as character class elements: [\p{Name}] named Unicode property inside character class (== \p{Name}) [^\p{Name}] named Unicode property inside negated character class (== \P{Name}) -Perl character classes: +Perl character classes (all ASCII-only): \d digits (== [0-9]) \D not digits (== [^0-9]) \s whitespace (== [\t\n\f\r ]) \S not whitespace (== [^\t\n\f\r ]) - \w ASCII word characters (== [0-9A-Za-z_]) - \W not ASCII word characters (== [^0-9A-Za-z_]) + \w word characters (== [0-9A-Za-z_]) + \W not word characters (== [^0-9A-Za-z_]) ASCII character classes: - [:alnum:] alphanumeric (== [0-9A-Za-z]) - [:alpha:] alphabetic (== [A-Za-z]) - [:ascii:] ASCII (== [\x00-\x7F]) - [:blank:] blank (== [\t ]) - [:cntrl:] control (== [\x00-\x1F\x7F]) - [:digit:] digits (== [0-9]) - [:graph:] graphical (== [!-~] == [A-Za-z0-9!"#$%&'()*+,\-./:;<=>?@[\\\]^_`{|}~]) - [:lower:] lower case (== [a-z]) - [:print:] printable (== [ -~] == [ [:graph:]]) - [:punct:] punctuation (== [!-/:-@[-`{-~]) - [:space:] whitespace (== [\t\n\v\f\r ]) - [:upper:] upper case (== [A-Z]) - [:word:] word characters (== [0-9A-Za-z_]) - [:xdigit:] hex digit (== [0-9A-Fa-f]) + [[:alnum:]] alphanumeric (== [0-9A-Za-z]) + [[:alpha:]] alphabetic (== [A-Za-z]) + [[:ascii:]] ASCII (== [\x00-\x7F]) + [[:blank:]] blank (== [\t ]) + [[:cntrl:]] control (== [\x00-\x1F\x7F]) + [[:digit:]] digits (== [0-9]) + [[:graph:]] graphical (== [!-~] == [A-Za-z0-9!"#$%&'()*+,\-./:;<=>?@[\\\]^_`{|}~]) + [[:lower:]] lower case (== [a-z]) + [[:print:]] printable (== [ -~] == [ [:graph:]]) + [[:punct:]] punctuation (== [!-/:-@[-`{-~]) + [[:space:]] whitespace (== [\t\n\v\f\r ]) + [[:upper:]] upper case (== [A-Z]) + [[:word:]] word characters (== [0-9A-Za-z_]) + [[:xdigit:]] hex digit (== [0-9A-Fa-f]) */ package syntax diff --git a/libgo/go/regexp/syntax/parse.go b/libgo/go/regexp/syntax/parse.go index cb25dca3956..d579a4069b1 100644 --- a/libgo/go/regexp/syntax/parse.go +++ b/libgo/go/regexp/syntax/parse.go @@ -244,6 +244,7 @@ func (p *parser) repeat(op Op, min, max int, before, after, lastRepeat string) ( if sub.Op >= opPseudo { return "", &Error{ErrMissingRepeatArgument, before[:len(before)-len(after)]} } + re := p.newRegexp(op) re.Min = min re.Max = max @@ -251,9 +252,47 @@ func (p *parser) repeat(op Op, min, max int, before, after, lastRepeat string) ( re.Sub = re.Sub0[:1] re.Sub[0] = sub p.stack[n-1] = re + + if op == OpRepeat && (min >= 2 || max >= 2) && !repeatIsValid(re, 1000) { + return "", &Error{ErrInvalidRepeatSize, before[:len(before)-len(after)]} + } + return after, nil } +// repeatIsValid reports whether the repetition re is valid. +// Valid means that the combination of the top-level repetition +// and any inner repetitions does not exceed n copies of the +// innermost thing. +// This function rewalks the regexp tree and is called for every repetition, +// so we have to worry about inducing quadratic behavior in the parser. +// We avoid this by only calling repeatIsValid when min or max >= 2. +// In that case the depth of any >= 2 nesting can only get to 9 without +// triggering a parse error, so each subtree can only be rewalked 9 times. +func repeatIsValid(re *Regexp, n int) bool { + if re.Op == OpRepeat { + m := re.Max + if m == 0 { + return true + } + if m < 0 { + m = re.Min + } + if m > n { + return false + } + if m > 0 { + n /= m + } + } + for _, sub := range re.Sub { + if !repeatIsValid(sub, n) { + return false + } + } + return true +} + // concat replaces the top of the stack (above the topmost '|' or '(') with its concatenation. func (p *parser) concat() *Regexp { p.maybeConcat(-1, 0) @@ -1639,7 +1678,7 @@ const ( // minimum and maximum runes involved in folding. // checked during test. minFold = 0x0041 - maxFold = 0x1044f + maxFold = 0x118df ) // appendFoldedRange returns the result of appending the range lo-hi diff --git a/libgo/go/regexp/syntax/parse_test.go b/libgo/go/regexp/syntax/parse_test.go index f3089294c6a..c4a1117ff86 100644 --- a/libgo/go/regexp/syntax/parse_test.go +++ b/libgo/go/regexp/syntax/parse_test.go @@ -200,6 +200,10 @@ var parseTests = []parseTest{ `cat{rep{2,2 lit{x}}alt{emp{}cc{0x30-0x39}}}`}, {`x{2}y|x{2}[0-9]y`, `cat{rep{2,2 lit{x}}alt{lit{y}cat{cc{0x30-0x39}lit{y}}}}`}, + + // Valid repetitions. + {`((((((((((x{2}){2}){2}){2}){2}){2}){2}){2}){2}))`, ``}, + {`((((((((((x{1}){2}){2}){2}){2}){2}){2}){2}){2}){2})`, ``}, } const testFlags = MatchNL | PerlX | UnicodeGroups @@ -262,6 +266,10 @@ func testParseDump(t *testing.T, tests []parseTest, flags Flags) { t.Errorf("Parse(%#q): %v", tt.Regexp, err) continue } + if tt.Dump == "" { + // It parsed. That's all we care about. + continue + } d := dump(re) if d != tt.Dump { t.Errorf("Parse(%#q).Dump() = %#q want %#q", tt.Regexp, d, tt.Dump) @@ -470,6 +478,7 @@ var invalidRegexps = []string{ `(?i)[a-Z]`, `a{100000}`, `a{100000,}`, + "((((((((((x{2}){2}){2}){2}){2}){2}){2}){2}){2}){2})", } var onlyPerl = []string{ @@ -527,6 +536,10 @@ func TestToStringEquivalentParse(t *testing.T) { t.Errorf("Parse(%#q): %v", tt.Regexp, err) continue } + if tt.Dump == "" { + // It parsed. That's all we care about. + continue + } d := dump(re) if d != tt.Dump { t.Errorf("Parse(%#q).Dump() = %#q want %#q", tt.Regexp, d, tt.Dump) diff --git a/libgo/go/regexp/syntax/regexp.go b/libgo/go/regexp/syntax/regexp.go index 329a90e0129..cea7d9e04fe 100644 --- a/libgo/go/regexp/syntax/regexp.go +++ b/libgo/go/regexp/syntax/regexp.go @@ -39,7 +39,7 @@ const ( OpEmptyMatch // matches empty string OpLiteral // matches Runes sequence OpCharClass // matches Runes interpreted as range pair list - OpAnyCharNotNL // matches any character + OpAnyCharNotNL // matches any character except newline OpAnyChar // matches any character OpBeginLine // matches empty string at beginning of line OpEndLine // matches empty string at end of line diff --git a/libgo/go/runtime/arch_386.go b/libgo/go/runtime/arch_386.go new file mode 100644 index 00000000000..79d38c7ab1b --- /dev/null +++ b/libgo/go/runtime/arch_386.go @@ -0,0 +1,8 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +type uintreg uint32 +type intptr int32 // TODO(rsc): remove diff --git a/libgo/go/runtime/arch_amd64.go b/libgo/go/runtime/arch_amd64.go new file mode 100644 index 00000000000..270cd7b9573 --- /dev/null +++ b/libgo/go/runtime/arch_amd64.go @@ -0,0 +1,8 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +type uintreg uint64 +type intptr int64 // TODO(rsc): remove diff --git a/libgo/go/runtime/arch_amd64p32.go b/libgo/go/runtime/arch_amd64p32.go new file mode 100644 index 00000000000..5c636aeab2f --- /dev/null +++ b/libgo/go/runtime/arch_amd64p32.go @@ -0,0 +1,8 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +type uintreg uint64 +type intptr int32 // TODO(rsc): remove diff --git a/libgo/go/runtime/arch_arm.go b/libgo/go/runtime/arch_arm.go new file mode 100644 index 00000000000..79d38c7ab1b --- /dev/null +++ b/libgo/go/runtime/arch_arm.go @@ -0,0 +1,8 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +type uintreg uint32 +type intptr int32 // TODO(rsc): remove diff --git a/libgo/go/runtime/atomic.go b/libgo/go/runtime/atomic.go new file mode 100644 index 00000000000..7e9d9b3aadc --- /dev/null +++ b/libgo/go/runtime/atomic.go @@ -0,0 +1,51 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !arm + +package runtime + +import "unsafe" + +//go:noescape +func xadd(ptr *uint32, delta int32) uint32 + +//go:noescape +func xadd64(ptr *uint64, delta int64) uint64 + +//go:noescape +func xchg(ptr *uint32, new uint32) uint32 + +//go:noescape +func xchg64(ptr *uint64, new uint64) uint64 + +//go:noescape +func xchgp(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer + +//go:noescape +func xchguintptr(ptr *uintptr, new uintptr) uintptr + +//go:noescape +func atomicload(ptr *uint32) uint32 + +//go:noescape +func atomicload64(ptr *uint64) uint64 + +//go:noescape +func atomicloadp(ptr unsafe.Pointer) unsafe.Pointer + +//go:noescape +func atomicor8(ptr *uint8, val uint8) + +//go:noescape +func cas64(ptr *uint64, old, new uint64) bool + +//go:noescape +func atomicstore(ptr *uint32, val uint32) + +//go:noescape +func atomicstore64(ptr *uint64, val uint64) + +//go:noescape +func atomicstorep(ptr unsafe.Pointer, val unsafe.Pointer) diff --git a/libgo/go/runtime/cgocall.go b/libgo/go/runtime/cgocall.go new file mode 100644 index 00000000000..7fd91469eb1 --- /dev/null +++ b/libgo/go/runtime/cgocall.go @@ -0,0 +1,279 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Cgo call and callback support. +// +// To call into the C function f from Go, the cgo-generated code calls +// runtime.cgocall(_cgo_Cfunc_f, frame), where _cgo_Cfunc_f is a +// gcc-compiled function written by cgo. +// +// runtime.cgocall (below) locks g to m, calls entersyscall +// so as not to block other goroutines or the garbage collector, +// and then calls runtime.asmcgocall(_cgo_Cfunc_f, frame). +// +// runtime.asmcgocall (in asm_$GOARCH.s) switches to the m->g0 stack +// (assumed to be an operating system-allocated stack, so safe to run +// gcc-compiled code on) and calls _cgo_Cfunc_f(frame). +// +// _cgo_Cfunc_f invokes the actual C function f with arguments +// taken from the frame structure, records the results in the frame, +// and returns to runtime.asmcgocall. +// +// After it regains control, runtime.asmcgocall switches back to the +// original g (m->curg)'s stack and returns to runtime.cgocall. +// +// After it regains control, runtime.cgocall calls exitsyscall, which blocks +// until this m can run Go code without violating the $GOMAXPROCS limit, +// and then unlocks g from m. +// +// The above description skipped over the possibility of the gcc-compiled +// function f calling back into Go. If that happens, we continue down +// the rabbit hole during the execution of f. +// +// To make it possible for gcc-compiled C code to call a Go function p.GoF, +// cgo writes a gcc-compiled function named GoF (not p.GoF, since gcc doesn't +// know about packages). The gcc-compiled C function f calls GoF. +// +// GoF calls crosscall2(_cgoexp_GoF, frame, framesize). Crosscall2 +// (in cgo/gcc_$GOARCH.S, a gcc-compiled assembly file) is a two-argument +// adapter from the gcc function call ABI to the 6c function call ABI. +// It is called from gcc to call 6c functions. In this case it calls +// _cgoexp_GoF(frame, framesize), still running on m->g0's stack +// and outside the $GOMAXPROCS limit. Thus, this code cannot yet +// call arbitrary Go code directly and must be careful not to allocate +// memory or use up m->g0's stack. +// +// _cgoexp_GoF calls runtime.cgocallback(p.GoF, frame, framesize). +// (The reason for having _cgoexp_GoF instead of writing a crosscall3 +// to make this call directly is that _cgoexp_GoF, because it is compiled +// with 6c instead of gcc, can refer to dotted names like +// runtime.cgocallback and p.GoF.) +// +// runtime.cgocallback (in asm_$GOARCH.s) switches from m->g0's +// stack to the original g (m->curg)'s stack, on which it calls +// runtime.cgocallbackg(p.GoF, frame, framesize). +// As part of the stack switch, runtime.cgocallback saves the current +// SP as m->g0->sched.sp, so that any use of m->g0's stack during the +// execution of the callback will be done below the existing stack frames. +// Before overwriting m->g0->sched.sp, it pushes the old value on the +// m->g0 stack, so that it can be restored later. +// +// runtime.cgocallbackg (below) is now running on a real goroutine +// stack (not an m->g0 stack). First it calls runtime.exitsyscall, which will +// block until the $GOMAXPROCS limit allows running this goroutine. +// Once exitsyscall has returned, it is safe to do things like call the memory +// allocator or invoke the Go callback function p.GoF. runtime.cgocallbackg +// first defers a function to unwind m->g0.sched.sp, so that if p.GoF +// panics, m->g0.sched.sp will be restored to its old value: the m->g0 stack +// and the m->curg stack will be unwound in lock step. +// Then it calls p.GoF. Finally it pops but does not execute the deferred +// function, calls runtime.entersyscall, and returns to runtime.cgocallback. +// +// After it regains control, runtime.cgocallback switches back to +// m->g0's stack (the pointer is still in m->g0.sched.sp), restores the old +// m->g0.sched.sp value from the stack, and returns to _cgoexp_GoF. +// +// _cgoexp_GoF immediately returns to crosscall2, which restores the +// callee-save registers for gcc and returns to GoF, which returns to f. + +package runtime + +import "unsafe" + +// Call from Go to C. +//go:nosplit +func cgocall(fn, arg unsafe.Pointer) { + cgocall_errno(fn, arg) +} + +//go:nosplit +func cgocall_errno(fn, arg unsafe.Pointer) int32 { + if !iscgo && GOOS != "solaris" && GOOS != "windows" { + gothrow("cgocall unavailable") + } + + if fn == nil { + gothrow("cgocall nil") + } + + if raceenabled { + racereleasemerge(unsafe.Pointer(&racecgosync)) + } + + // Create an extra M for callbacks on threads not created by Go on first cgo call. + if needextram == 1 && cas(&needextram, 1, 0) { + onM(newextram) + } + + /* + * Lock g to m to ensure we stay on the same stack if we do a + * cgo callback. Add entry to defer stack in case of panic. + */ + lockOSThread() + mp := getg().m + mp.ncgocall++ + mp.ncgo++ + defer endcgo(mp) + + /* + * Announce we are entering a system call + * so that the scheduler knows to create another + * M to run goroutines while we are in the + * foreign code. + * + * The call to asmcgocall is guaranteed not to + * split the stack and does not allocate memory, + * so it is safe to call while "in a system call", outside + * the $GOMAXPROCS accounting. + */ + entersyscall() + errno := asmcgocall_errno(fn, arg) + exitsyscall() + + return errno +} + +//go:nosplit +func endcgo(mp *m) { + mp.ncgo-- + if mp.ncgo == 0 { + // We are going back to Go and are not in a recursive + // call. Let the GC collect any memory allocated via + // _cgo_allocate that is no longer referenced. + mp.cgomal = nil + } + + if raceenabled { + raceacquire(unsafe.Pointer(&racecgosync)) + } + + unlockOSThread() // invalidates mp +} + +// Helper functions for cgo code. + +// Filled by schedinit from corresponding C variables, +// which are in turn filled in by dynamic linker when Cgo is available. +var cgoMalloc, cgoFree unsafe.Pointer + +func cmalloc(n uintptr) unsafe.Pointer { + var args struct { + n uint64 + ret unsafe.Pointer + } + args.n = uint64(n) + cgocall(cgoMalloc, unsafe.Pointer(&args)) + if args.ret == nil { + gothrow("C malloc failed") + } + return args.ret +} + +func cfree(p unsafe.Pointer) { + cgocall(cgoFree, p) +} + +// Call from C back to Go. +//go:nosplit +func cgocallbackg() { + gp := getg() + if gp != gp.m.curg { + println("runtime: bad g in cgocallback") + exit(2) + } + + // entersyscall saves the caller's SP to allow the GC to trace the Go + // stack. However, since we're returning to an earlier stack frame and + // need to pair with the entersyscall() call made by cgocall, we must + // save syscall* and let reentersyscall restore them. + savedsp := unsafe.Pointer(gp.syscallsp) + savedpc := gp.syscallpc + exitsyscall() // coming out of cgo call + cgocallbackg1() + // going back to cgo call + reentersyscall(savedpc, savedsp) +} + +func cgocallbackg1() { + gp := getg() + if gp.m.needextram { + gp.m.needextram = false + onM(newextram) + } + + // Add entry to defer stack in case of panic. + restore := true + defer unwindm(&restore) + + if raceenabled { + raceacquire(unsafe.Pointer(&racecgosync)) + } + + type args struct { + fn *funcval + arg unsafe.Pointer + argsize uintptr + } + var cb *args + + // Location of callback arguments depends on stack frame layout + // and size of stack frame of cgocallback_gofunc. + sp := gp.m.g0.sched.sp + switch GOARCH { + default: + gothrow("cgocallbackg is unimplemented on arch") + case "arm": + // On arm, stack frame is two words and there's a saved LR between + // SP and the stack frame and between the stack frame and the arguments. + cb = (*args)(unsafe.Pointer(sp + 4*ptrSize)) + case "amd64": + // On amd64, stack frame is one word, plus caller PC. + cb = (*args)(unsafe.Pointer(sp + 2*ptrSize)) + case "386": + // On 386, stack frame is three words, plus caller PC. + cb = (*args)(unsafe.Pointer(sp + 4*ptrSize)) + } + + // Invoke callback. + reflectcall(unsafe.Pointer(cb.fn), unsafe.Pointer(cb.arg), uint32(cb.argsize), 0) + + if raceenabled { + racereleasemerge(unsafe.Pointer(&racecgosync)) + } + + // Do not unwind m->g0->sched.sp. + // Our caller, cgocallback, will do that. + restore = false +} + +func unwindm(restore *bool) { + if !*restore { + return + } + // Restore sp saved by cgocallback during + // unwind of g's stack (see comment at top of file). + mp := acquirem() + sched := &mp.g0.sched + switch GOARCH { + default: + gothrow("unwindm not implemented") + case "386", "amd64": + sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp)) + case "arm": + sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + 4)) + } + releasem(mp) +} + +// called from assembly +func badcgocallback() { + gothrow("misaligned stack in cgocallback") +} + +// called from (incomplete) assembly +func cgounimpl() { + gothrow("cgo not implemented") +} + +var racecgosync uint64 // represents possible synchronization in C code diff --git a/libgo/go/runtime/cgocallback.go b/libgo/go/runtime/cgocallback.go new file mode 100644 index 00000000000..2c89143208a --- /dev/null +++ b/libgo/go/runtime/cgocallback.go @@ -0,0 +1,40 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +// These functions are called from C code via cgo/callbacks.c. + +// Allocate memory. This allocates the requested number of bytes in +// memory controlled by the Go runtime. The allocated memory will be +// zeroed. You are responsible for ensuring that the Go garbage +// collector can see a pointer to the allocated memory for as long as +// it is valid, e.g., by storing a pointer in a local variable in your +// C function, or in memory allocated by the Go runtime. If the only +// pointers are in a C global variable or in memory allocated via +// malloc, then the Go garbage collector may collect the memory. +// +// TODO(rsc,iant): This memory is untyped. +// Either we need to add types or we need to stop using it. + +func _cgo_allocate_internal(len uintptr) unsafe.Pointer { + if len == 0 { + len = 1 + } + ret := unsafe.Pointer(&make([]unsafe.Pointer, (len+ptrSize-1)/ptrSize)[0]) + c := new(cgomal) + c.alloc = ret + gp := getg() + c.next = gp.m.cgomal + gp.m.cgomal = c + return ret +} + +// Panic. + +func _cgo_panic_internal(p *byte) { + panic(gostringnocopy(p)) +} diff --git a/libgo/go/runtime/chan.go b/libgo/go/runtime/chan.go new file mode 100644 index 00000000000..0eb87df74f7 --- /dev/null +++ b/libgo/go/runtime/chan.go @@ -0,0 +1,655 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +// This file contains the implementation of Go channels. + +import "unsafe" + +const ( + maxAlign = 8 + hchanSize = unsafe.Sizeof(hchan{}) + uintptr(-int(unsafe.Sizeof(hchan{}))&(maxAlign-1)) + debugChan = false +) + +// TODO(khr): make hchan.buf an unsafe.Pointer, not a *uint8 + +func makechan(t *chantype, size int64) *hchan { + elem := t.elem + + // compiler checks this but be safe. + if elem.size >= 1<<16 { + gothrow("makechan: invalid channel element type") + } + if hchanSize%maxAlign != 0 || elem.align > maxAlign { + gothrow("makechan: bad alignment") + } + if size < 0 || int64(uintptr(size)) != size || (elem.size > 0 && uintptr(size) > (maxmem-hchanSize)/uintptr(elem.size)) { + panic("makechan: size out of range") + } + + var c *hchan + if elem.kind&kindNoPointers != 0 || size == 0 { + // Allocate memory in one call. + // Hchan does not contain pointers interesting for GC in this case: + // buf points into the same allocation, elemtype is persistent. + // SudoG's are referenced from their owning thread so they can't be collected. + // TODO(dvyukov,rlh): Rethink when collector can move allocated objects. + c = (*hchan)(mallocgc(hchanSize+uintptr(size)*uintptr(elem.size), nil, flagNoScan)) + if size > 0 && elem.size != 0 { + c.buf = (*uint8)(add(unsafe.Pointer(c), hchanSize)) + } else { + c.buf = (*uint8)(unsafe.Pointer(c)) // race detector uses this location for synchronization + } + } else { + c = new(hchan) + c.buf = (*uint8)(newarray(elem, uintptr(size))) + } + c.elemsize = uint16(elem.size) + c.elemtype = elem + c.dataqsiz = uint(size) + + if debugChan { + print("makechan: chan=", c, "; elemsize=", elem.size, "; elemalg=", elem.alg, "; dataqsiz=", size, "\n") + } + return c +} + +// chanbuf(c, i) is pointer to the i'th slot in the buffer. +func chanbuf(c *hchan, i uint) unsafe.Pointer { + return add(unsafe.Pointer(c.buf), uintptr(i)*uintptr(c.elemsize)) +} + +// entry point for c <- x from compiled code +//go:nosplit +func chansend1(t *chantype, c *hchan, elem unsafe.Pointer) { + chansend(t, c, elem, true, getcallerpc(unsafe.Pointer(&t))) +} + +/* + * generic single channel send/recv + * If block is not nil, + * then the protocol will not + * sleep but return if it could + * not complete. + * + * sleep can wake up with g.param == nil + * when a channel involved in the sleep has + * been closed. it is easiest to loop and re-run + * the operation; we'll see that it's now closed. + */ +func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool { + if raceenabled { + raceReadObjectPC(t.elem, ep, callerpc, funcPC(chansend)) + } + + if c == nil { + if !block { + return false + } + gopark(nil, nil, "chan send (nil chan)") + gothrow("unreachable") + } + + if debugChan { + print("chansend: chan=", c, "\n") + } + + if raceenabled { + racereadpc(unsafe.Pointer(c), callerpc, funcPC(chansend)) + } + + // Fast path: check for failed non-blocking operation without acquiring the lock. + // + // After observing that the channel is not closed, we observe that the channel is + // not ready for sending. Each of these observations is a single word-sized read + // (first c.closed and second c.recvq.first or c.qcount depending on kind of channel). + // Because a closed channel cannot transition from 'ready for sending' to + // 'not ready for sending', even if the channel is closed between the two observations, + // they imply a moment between the two when the channel was both not yet closed + // and not ready for sending. We behave as if we observed the channel at that moment, + // and report that the send cannot proceed. + // + // It is okay if the reads are reordered here: if we observe that the channel is not + // ready for sending and then observe that it is not closed, that implies that the + // channel wasn't closed during the first observation. + if !block && c.closed == 0 && ((c.dataqsiz == 0 && c.recvq.first == nil) || + (c.dataqsiz > 0 && c.qcount == c.dataqsiz)) { + return false + } + + var t0 int64 + if blockprofilerate > 0 { + t0 = cputicks() + } + + lock(&c.lock) + if c.closed != 0 { + unlock(&c.lock) + panic("send on closed channel") + } + + if c.dataqsiz == 0 { // synchronous channel + sg := c.recvq.dequeue() + if sg != nil { // found a waiting receiver + if raceenabled { + racesync(c, sg) + } + unlock(&c.lock) + + recvg := sg.g + if sg.elem != nil { + memmove(unsafe.Pointer(sg.elem), ep, uintptr(c.elemsize)) + sg.elem = nil + } + recvg.param = unsafe.Pointer(sg) + if sg.releasetime != 0 { + sg.releasetime = cputicks() + } + goready(recvg) + return true + } + + if !block { + unlock(&c.lock) + return false + } + + // no receiver available: block on this channel. + gp := getg() + mysg := acquireSudog() + mysg.releasetime = 0 + if t0 != 0 { + mysg.releasetime = -1 + } + mysg.elem = ep + mysg.waitlink = nil + gp.waiting = mysg + mysg.g = gp + mysg.selectdone = nil + gp.param = nil + c.sendq.enqueue(mysg) + goparkunlock(&c.lock, "chan send") + + // someone woke us up. + if mysg != gp.waiting { + gothrow("G waiting list is corrupted!") + } + gp.waiting = nil + if gp.param == nil { + if c.closed == 0 { + gothrow("chansend: spurious wakeup") + } + panic("send on closed channel") + } + gp.param = nil + if mysg.releasetime > 0 { + blockevent(int64(mysg.releasetime)-t0, 2) + } + releaseSudog(mysg) + return true + } + + // asynchronous channel + // wait for some space to write our data + var t1 int64 + for c.qcount >= c.dataqsiz { + if !block { + unlock(&c.lock) + return false + } + gp := getg() + mysg := acquireSudog() + mysg.releasetime = 0 + if t0 != 0 { + mysg.releasetime = -1 + } + mysg.g = gp + mysg.elem = nil + mysg.selectdone = nil + c.sendq.enqueue(mysg) + goparkunlock(&c.lock, "chan send") + + // someone woke us up - try again + if mysg.releasetime > 0 { + t1 = mysg.releasetime + } + releaseSudog(mysg) + lock(&c.lock) + if c.closed != 0 { + unlock(&c.lock) + panic("send on closed channel") + } + } + + // write our data into the channel buffer + if raceenabled { + raceacquire(chanbuf(c, c.sendx)) + racerelease(chanbuf(c, c.sendx)) + } + memmove(chanbuf(c, c.sendx), ep, uintptr(c.elemsize)) + c.sendx++ + if c.sendx == c.dataqsiz { + c.sendx = 0 + } + c.qcount++ + + // wake up a waiting receiver + sg := c.recvq.dequeue() + if sg != nil { + recvg := sg.g + unlock(&c.lock) + if sg.releasetime != 0 { + sg.releasetime = cputicks() + } + goready(recvg) + } else { + unlock(&c.lock) + } + if t1 > 0 { + blockevent(t1-t0, 2) + } + return true +} + +func closechan(c *hchan) { + if c == nil { + panic("close of nil channel") + } + + lock(&c.lock) + if c.closed != 0 { + unlock(&c.lock) + panic("close of closed channel") + } + + if raceenabled { + callerpc := getcallerpc(unsafe.Pointer(&c)) + racewritepc(unsafe.Pointer(c), callerpc, funcPC(closechan)) + racerelease(unsafe.Pointer(c)) + } + + c.closed = 1 + + // release all readers + for { + sg := c.recvq.dequeue() + if sg == nil { + break + } + gp := sg.g + sg.elem = nil + gp.param = nil + if sg.releasetime != 0 { + sg.releasetime = cputicks() + } + goready(gp) + } + + // release all writers + for { + sg := c.sendq.dequeue() + if sg == nil { + break + } + gp := sg.g + sg.elem = nil + gp.param = nil + if sg.releasetime != 0 { + sg.releasetime = cputicks() + } + goready(gp) + } + unlock(&c.lock) +} + +// entry points for <- c from compiled code +//go:nosplit +func chanrecv1(t *chantype, c *hchan, elem unsafe.Pointer) { + chanrecv(t, c, elem, true) +} + +//go:nosplit +func chanrecv2(t *chantype, c *hchan, elem unsafe.Pointer) (received bool) { + _, received = chanrecv(t, c, elem, true) + return +} + +// chanrecv receives on channel c and writes the received data to ep. +// ep may be nil, in which case received data is ignored. +// If block == false and no elements are available, returns (false, false). +// Otherwise, if c is closed, zeros *ep and returns (true, false). +// Otherwise, fills in *ep with an element and returns (true, true). +func chanrecv(t *chantype, c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) { + // raceenabled: don't need to check ep, as it is always on the stack. + + if debugChan { + print("chanrecv: chan=", c, "\n") + } + + if c == nil { + if !block { + return + } + gopark(nil, nil, "chan receive (nil chan)") + gothrow("unreachable") + } + + // Fast path: check for failed non-blocking operation without acquiring the lock. + // + // After observing that the channel is not ready for receiving, we observe that the + // channel is not closed. Each of these observations is a single word-sized read + // (first c.sendq.first or c.qcount, and second c.closed). + // Because a channel cannot be reopened, the later observation of the channel + // being not closed implies that it was also not closed at the moment of the + // first observation. We behave as if we observed the channel at that moment + // and report that the receive cannot proceed. + // + // The order of operations is important here: reversing the operations can lead to + // incorrect behavior when racing with a close. + if !block && (c.dataqsiz == 0 && c.sendq.first == nil || + c.dataqsiz > 0 && atomicloaduint(&c.qcount) == 0) && + atomicload(&c.closed) == 0 { + return + } + + var t0 int64 + if blockprofilerate > 0 { + t0 = cputicks() + } + + lock(&c.lock) + if c.dataqsiz == 0 { // synchronous channel + if c.closed != 0 { + return recvclosed(c, ep) + } + + sg := c.sendq.dequeue() + if sg != nil { + if raceenabled { + racesync(c, sg) + } + unlock(&c.lock) + + if ep != nil { + memmove(ep, sg.elem, uintptr(c.elemsize)) + } + sg.elem = nil + gp := sg.g + gp.param = unsafe.Pointer(sg) + if sg.releasetime != 0 { + sg.releasetime = cputicks() + } + goready(gp) + selected = true + received = true + return + } + + if !block { + unlock(&c.lock) + return + } + + // no sender available: block on this channel. + gp := getg() + mysg := acquireSudog() + mysg.releasetime = 0 + if t0 != 0 { + mysg.releasetime = -1 + } + mysg.elem = ep + mysg.waitlink = nil + gp.waiting = mysg + mysg.g = gp + mysg.selectdone = nil + gp.param = nil + c.recvq.enqueue(mysg) + goparkunlock(&c.lock, "chan receive") + + // someone woke us up + if mysg != gp.waiting { + gothrow("G waiting list is corrupted!") + } + gp.waiting = nil + if mysg.releasetime > 0 { + blockevent(mysg.releasetime-t0, 2) + } + haveData := gp.param != nil + gp.param = nil + releaseSudog(mysg) + + if haveData { + // a sender sent us some data. It already wrote to ep. + selected = true + received = true + return + } + + lock(&c.lock) + if c.closed == 0 { + gothrow("chanrecv: spurious wakeup") + } + return recvclosed(c, ep) + } + + // asynchronous channel + // wait for some data to appear + var t1 int64 + for c.qcount <= 0 { + if c.closed != 0 { + selected, received = recvclosed(c, ep) + if t1 > 0 { + blockevent(t1-t0, 2) + } + return + } + + if !block { + unlock(&c.lock) + return + } + + // wait for someone to send an element + gp := getg() + mysg := acquireSudog() + mysg.releasetime = 0 + if t0 != 0 { + mysg.releasetime = -1 + } + mysg.elem = nil + mysg.g = gp + mysg.selectdone = nil + + c.recvq.enqueue(mysg) + goparkunlock(&c.lock, "chan receive") + + // someone woke us up - try again + if mysg.releasetime > 0 { + t1 = mysg.releasetime + } + releaseSudog(mysg) + lock(&c.lock) + } + + if raceenabled { + raceacquire(chanbuf(c, c.recvx)) + racerelease(chanbuf(c, c.recvx)) + } + if ep != nil { + memmove(ep, chanbuf(c, c.recvx), uintptr(c.elemsize)) + } + memclr(chanbuf(c, c.recvx), uintptr(c.elemsize)) + + c.recvx++ + if c.recvx == c.dataqsiz { + c.recvx = 0 + } + c.qcount-- + + // ping a sender now that there is space + sg := c.sendq.dequeue() + if sg != nil { + gp := sg.g + unlock(&c.lock) + if sg.releasetime != 0 { + sg.releasetime = cputicks() + } + goready(gp) + } else { + unlock(&c.lock) + } + + if t1 > 0 { + blockevent(t1-t0, 2) + } + selected = true + received = true + return +} + +// recvclosed is a helper function for chanrecv. Handles cleanup +// when the receiver encounters a closed channel. +// Caller must hold c.lock, recvclosed will release the lock. +func recvclosed(c *hchan, ep unsafe.Pointer) (selected, recevied bool) { + if raceenabled { + raceacquire(unsafe.Pointer(c)) + } + unlock(&c.lock) + if ep != nil { + memclr(ep, uintptr(c.elemsize)) + } + return true, false +} + +// compiler implements +// +// select { +// case c <- v: +// ... foo +// default: +// ... bar +// } +// +// as +// +// if selectnbsend(c, v) { +// ... foo +// } else { +// ... bar +// } +// +func selectnbsend(t *chantype, c *hchan, elem unsafe.Pointer) (selected bool) { + return chansend(t, c, elem, false, getcallerpc(unsafe.Pointer(&t))) +} + +// compiler implements +// +// select { +// case v = <-c: +// ... foo +// default: +// ... bar +// } +// +// as +// +// if selectnbrecv(&v, c) { +// ... foo +// } else { +// ... bar +// } +// +func selectnbrecv(t *chantype, elem unsafe.Pointer, c *hchan) (selected bool) { + selected, _ = chanrecv(t, c, elem, false) + return +} + +// compiler implements +// +// select { +// case v, ok = <-c: +// ... foo +// default: +// ... bar +// } +// +// as +// +// if c != nil && selectnbrecv2(&v, &ok, c) { +// ... foo +// } else { +// ... bar +// } +// +func selectnbrecv2(t *chantype, elem unsafe.Pointer, received *bool, c *hchan) (selected bool) { + // TODO(khr): just return 2 values from this function, now that it is in Go. + selected, *received = chanrecv(t, c, elem, false) + return +} + +func reflect_chansend(t *chantype, c *hchan, elem unsafe.Pointer, nb bool) (selected bool) { + return chansend(t, c, elem, !nb, getcallerpc(unsafe.Pointer(&t))) +} + +func reflect_chanrecv(t *chantype, c *hchan, nb bool, elem unsafe.Pointer) (selected bool, received bool) { + return chanrecv(t, c, elem, !nb) +} + +func reflect_chanlen(c *hchan) int { + if c == nil { + return 0 + } + return int(c.qcount) +} + +func reflect_chancap(c *hchan) int { + if c == nil { + return 0 + } + return int(c.dataqsiz) +} + +func (q *waitq) enqueue(sgp *sudog) { + sgp.next = nil + if q.first == nil { + q.first = sgp + q.last = sgp + return + } + q.last.next = sgp + q.last = sgp +} + +func (q *waitq) dequeue() *sudog { + for { + sgp := q.first + if sgp == nil { + return nil + } + q.first = sgp.next + sgp.next = nil + if q.last == sgp { + q.last = nil + } + + // if sgp participates in a select and is already signaled, ignore it + if sgp.selectdone != nil { + // claim the right to signal + if *sgp.selectdone != 0 || !cas(sgp.selectdone, 0, 1) { + continue + } + } + + return sgp + } +} + +func racesync(c *hchan, sg *sudog) { + racerelease(chanbuf(c, 0)) + raceacquireg(sg.g, chanbuf(c, 0)) + racereleaseg(sg.g, chanbuf(c, 0)) + raceacquire(chanbuf(c, 0)) +} diff --git a/libgo/go/runtime/chan_test.go b/libgo/go/runtime/chan_test.go index ce4b3962717..e689ceaed1e 100644 --- a/libgo/go/runtime/chan_test.go +++ b/libgo/go/runtime/chan_test.go @@ -198,6 +198,26 @@ func TestChan(t *testing.T) { } } +func TestNonblockRecvRace(t *testing.T) { + n := 10000 + if testing.Short() { + n = 100 + } + for i := 0; i < n; i++ { + c := make(chan int, 1) + c <- 1 + go func() { + select { + case <-c: + default: + t.Fatal("chan is not ready") + } + }() + close(c) + <-c + } +} + func TestSelfSelect(t *testing.T) { // Ensure that send/recv on the same chan in select // does not crash nor deadlock. @@ -430,6 +450,67 @@ func TestMultiConsumer(t *testing.T) { } } +func TestShrinkStackDuringBlockedSend(t *testing.T) { + // make sure that channel operations still work when we are + // blocked on a channel send and we shrink the stack. + // NOTE: this test probably won't fail unless stack.c:StackDebug + // is set to >= 1. + const n = 10 + c := make(chan int) + done := make(chan struct{}) + + go func() { + for i := 0; i < n; i++ { + c <- i + // use lots of stack, briefly. + stackGrowthRecursive(20) + } + done <- struct{}{} + }() + + for i := 0; i < n; i++ { + x := <-c + if x != i { + t.Errorf("bad channel read: want %d, got %d", i, x) + } + // Waste some time so sender can finish using lots of stack + // and block in channel send. + time.Sleep(1 * time.Millisecond) + // trigger GC which will shrink the stack of the sender. + runtime.GC() + } + <-done +} + +func TestSelectDuplicateChannel(t *testing.T) { + // This test makes sure we can queue a G on + // the same channel multiple times. + c := make(chan int) + d := make(chan int) + e := make(chan int) + + // goroutine A + go func() { + select { + case <-c: + case <-c: + case <-d: + } + e <- 9 + }() + time.Sleep(time.Millisecond) // make sure goroutine A gets qeueued first on c + + // goroutine B + go func() { + <-c + }() + time.Sleep(time.Millisecond) // make sure goroutine B gets queued on c before continuing + + d <- 7 // wake up A, it dequeues itself from c. This operation used to corrupt c.recvq. + <-e // A tells us it's done + c <- 8 // wake up B. This operation used to fail because c.recvq was corrupted (it tries to wake up an already running G instead of B) +} + func BenchmarkChanNonblocking(b *testing.B) { myc := make(chan int) b.RunParallel(func(pb *testing.PB) { @@ -458,7 +539,35 @@ func BenchmarkSelectUncontended(b *testing.B) { }) } -func BenchmarkSelectContended(b *testing.B) { +func BenchmarkSelectSyncContended(b *testing.B) { + myc1 := make(chan int) + myc2 := make(chan int) + myc3 := make(chan int) + done := make(chan int) + b.RunParallel(func(pb *testing.PB) { + go func() { + for { + select { + case myc1 <- 0: + case myc2 <- 0: + case myc3 <- 0: + case <-done: + return + } + } + }() + for pb.Next() { + select { + case <-myc1: + case <-myc2: + case <-myc3: + } + } + }) + close(done) +} + +func BenchmarkSelectAsyncContended(b *testing.B) { procs := runtime.GOMAXPROCS(0) myc1 := make(chan int, procs) myc2 := make(chan int, procs) @@ -476,11 +585,11 @@ func BenchmarkSelectContended(b *testing.B) { } func BenchmarkSelectNonblock(b *testing.B) { + myc1 := make(chan int) + myc2 := make(chan int) + myc3 := make(chan int, 1) + myc4 := make(chan int, 1) b.RunParallel(func(pb *testing.PB) { - myc1 := make(chan int) - myc2 := make(chan int) - myc3 := make(chan int, 1) - myc4 := make(chan int, 1) for pb.Next() { select { case <-myc1: diff --git a/libgo/go/runtime/complex.go b/libgo/go/runtime/complex.go new file mode 100644 index 00000000000..ec50f894709 --- /dev/null +++ b/libgo/go/runtime/complex.go @@ -0,0 +1,52 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +func complex128div(n complex128, d complex128) complex128 { + // Special cases as in C99. + ninf := real(n) == posinf || real(n) == neginf || + imag(n) == posinf || imag(n) == neginf + dinf := real(d) == posinf || real(d) == neginf || + imag(d) == posinf || imag(d) == neginf + + nnan := !ninf && (real(n) != real(n) || imag(n) != imag(n)) + dnan := !dinf && (real(d) != real(d) || imag(d) != imag(d)) + + switch { + case nnan || dnan: + return complex(nan, nan) + case ninf && !dinf: + return complex(posinf, posinf) + case !ninf && dinf: + return complex(0, 0) + case real(d) == 0 && imag(d) == 0: + if real(n) == 0 && imag(n) == 0 { + return complex(nan, nan) + } else { + return complex(posinf, posinf) + } + default: + // Standard complex arithmetic, factored to avoid unnecessary overflow. + a := real(d) + if a < 0 { + a = -a + } + b := imag(d) + if b < 0 { + b = -b + } + if a <= b { + ratio := real(d) / imag(d) + denom := real(d)*ratio + imag(d) + return complex((real(n)*ratio+imag(n))/denom, + (imag(n)*ratio-real(n))/denom) + } else { + ratio := imag(d) / real(d) + denom := imag(d)*ratio + real(d) + return complex((imag(n)*ratio+real(n))/denom, + (imag(n)-real(n)*ratio)/denom) + } + } +} diff --git a/libgo/go/runtime/cpuprof.go b/libgo/go/runtime/cpuprof.go new file mode 100644 index 00000000000..8b1c1c63275 --- /dev/null +++ b/libgo/go/runtime/cpuprof.go @@ -0,0 +1,425 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// CPU profiling. +// Based on algorithms and data structures used in +// http://code.google.com/p/google-perftools/. +// +// The main difference between this code and the google-perftools +// code is that this code is written to allow copying the profile data +// to an arbitrary io.Writer, while the google-perftools code always +// writes to an operating system file. +// +// The signal handler for the profiling clock tick adds a new stack trace +// to a hash table tracking counts for recent traces. Most clock ticks +// hit in the cache. In the event of a cache miss, an entry must be +// evicted from the hash table, copied to a log that will eventually be +// written as profile data. The google-perftools code flushed the +// log itself during the signal handler. This code cannot do that, because +// the io.Writer might block or need system calls or locks that are not +// safe to use from within the signal handler. Instead, we split the log +// into two halves and let the signal handler fill one half while a goroutine +// is writing out the other half. When the signal handler fills its half, it +// offers to swap with the goroutine. If the writer is not done with its half, +// we lose the stack trace for this clock tick (and record that loss). +// The goroutine interacts with the signal handler by calling getprofile() to +// get the next log piece to write, implicitly handing back the last log +// piece it obtained. +// +// The state of this dance between the signal handler and the goroutine +// is encoded in the Profile.handoff field. If handoff == 0, then the goroutine +// is not using either log half and is waiting (or will soon be waiting) for +// a new piece by calling notesleep(&p->wait). If the signal handler +// changes handoff from 0 to non-zero, it must call notewakeup(&p->wait) +// to wake the goroutine. The value indicates the number of entries in the +// log half being handed off. The goroutine leaves the non-zero value in +// place until it has finished processing the log half and then flips the number +// back to zero. Setting the high bit in handoff means that the profiling is over, +// and the goroutine is now in charge of flushing the data left in the hash table +// to the log and returning that data. +// +// The handoff field is manipulated using atomic operations. +// For the most part, the manipulation of handoff is orderly: if handoff == 0 +// then the signal handler owns it and can change it to non-zero. +// If handoff != 0 then the goroutine owns it and can change it to zero. +// If that were the end of the story then we would not need to manipulate +// handoff using atomic operations. The operations are needed, however, +// in order to let the log closer set the high bit to indicate "EOF" safely +// in the situation when normally the goroutine "owns" handoff. + +package runtime + +import "unsafe" + +const ( + numBuckets = 1 << 10 + logSize = 1 << 17 + assoc = 4 + maxCPUProfStack = 64 +) + +type cpuprofEntry struct { + count uintptr + depth uintptr + stack [maxCPUProfStack]uintptr +} + +type cpuProfile struct { + on bool // profiling is on + wait note // goroutine waits here + count uintptr // tick count + evicts uintptr // eviction count + lost uintptr // lost ticks that need to be logged + + // Active recent stack traces. + hash [numBuckets]struct { + entry [assoc]cpuprofEntry + } + + // Log of traces evicted from hash. + // Signal handler has filled log[toggle][:nlog]. + // Goroutine is writing log[1-toggle][:handoff]. + log [2][logSize / 2]uintptr + nlog uintptr + toggle int32 + handoff uint32 + + // Writer state. + // Writer maintains its own toggle to avoid races + // looking at signal handler's toggle. + wtoggle uint32 + wholding bool // holding & need to release a log half + flushing bool // flushing hash table - profile is over + eodSent bool // special end-of-data record sent; => flushing +} + +var ( + cpuprofLock mutex + cpuprof *cpuProfile + + eod = [3]uintptr{0, 1, 0} +) + +func setcpuprofilerate_m() // proc.c + +func setcpuprofilerate(hz int32) { + g := getg() + g.m.scalararg[0] = uintptr(hz) + onM(setcpuprofilerate_m) +} + +// lostProfileData is a no-op function used in profiles +// to mark the number of profiling stack traces that were +// discarded due to slow data writers. +func lostProfileData() {} + +// SetCPUProfileRate sets the CPU profiling rate to hz samples per second. +// If hz <= 0, SetCPUProfileRate turns off profiling. +// If the profiler is on, the rate cannot be changed without first turning it off. +// +// Most clients should use the runtime/pprof package or +// the testing package's -test.cpuprofile flag instead of calling +// SetCPUProfileRate directly. +func SetCPUProfileRate(hz int) { + // Clamp hz to something reasonable. + if hz < 0 { + hz = 0 + } + if hz > 1000000 { + hz = 1000000 + } + + lock(&cpuprofLock) + if hz > 0 { + if cpuprof == nil { + cpuprof = (*cpuProfile)(sysAlloc(unsafe.Sizeof(cpuProfile{}), &memstats.other_sys)) + if cpuprof == nil { + print("runtime: cpu profiling cannot allocate memory\n") + unlock(&cpuprofLock) + return + } + } + if cpuprof.on || cpuprof.handoff != 0 { + print("runtime: cannot set cpu profile rate until previous profile has finished.\n") + unlock(&cpuprofLock) + return + } + + cpuprof.on = true + // pprof binary header format. + // http://code.google.com/p/google-perftools/source/browse/trunk/src/profiledata.cc#117 + p := &cpuprof.log[0] + p[0] = 0 // count for header + p[1] = 3 // depth for header + p[2] = 0 // version number + p[3] = uintptr(1e6 / hz) // period (microseconds) + p[4] = 0 + cpuprof.nlog = 5 + cpuprof.toggle = 0 + cpuprof.wholding = false + cpuprof.wtoggle = 0 + cpuprof.flushing = false + cpuprof.eodSent = false + noteclear(&cpuprof.wait) + + setcpuprofilerate(int32(hz)) + } else if cpuprof != nil && cpuprof.on { + setcpuprofilerate(0) + cpuprof.on = false + + // Now add is not running anymore, and getprofile owns the entire log. + // Set the high bit in prof->handoff to tell getprofile. + for { + n := cpuprof.handoff + if n&0x80000000 != 0 { + print("runtime: setcpuprofile(off) twice\n") + } + if cas(&cpuprof.handoff, n, n|0x80000000) { + if n == 0 { + // we did the transition from 0 -> nonzero so we wake getprofile + notewakeup(&cpuprof.wait) + } + break + } + } + } + unlock(&cpuprofLock) +} + +func cpuproftick(pc *uintptr, n int32) { + if n > maxCPUProfStack { + n = maxCPUProfStack + } + s := (*[maxCPUProfStack]uintptr)(unsafe.Pointer(pc))[:n] + cpuprof.add(s) +} + +// add adds the stack trace to the profile. +// It is called from signal handlers and other limited environments +// and cannot allocate memory or acquire locks that might be +// held at the time of the signal, nor can it use substantial amounts +// of stack. It is allowed to call evict. +func (p *cpuProfile) add(pc []uintptr) { + // Compute hash. + h := uintptr(0) + for _, x := range pc { + h = h<<8 | (h >> (8 * (unsafe.Sizeof(h) - 1))) + h += x*31 + x*7 + x*3 + } + p.count++ + + // Add to entry count if already present in table. + b := &p.hash[h%numBuckets] +Assoc: + for i := range b.entry { + e := &b.entry[i] + if e.depth != uintptr(len(pc)) { + continue + } + for j := range pc { + if e.stack[j] != pc[j] { + continue Assoc + } + } + e.count++ + return + } + + // Evict entry with smallest count. + var e *cpuprofEntry + for i := range b.entry { + if e == nil || b.entry[i].count < e.count { + e = &b.entry[i] + } + } + if e.count > 0 { + if !p.evict(e) { + // Could not evict entry. Record lost stack. + p.lost++ + return + } + p.evicts++ + } + + // Reuse the newly evicted entry. + e.depth = uintptr(len(pc)) + e.count = 1 + copy(e.stack[:], pc) +} + +// evict copies the given entry's data into the log, so that +// the entry can be reused. evict is called from add, which +// is called from the profiling signal handler, so it must not +// allocate memory or block. It is safe to call flushlog. +// evict returns true if the entry was copied to the log, +// false if there was no room available. +func (p *cpuProfile) evict(e *cpuprofEntry) bool { + d := e.depth + nslot := d + 2 + log := &p.log[p.toggle] + if p.nlog+nslot > uintptr(len(p.log[0])) { + if !p.flushlog() { + return false + } + log = &p.log[p.toggle] + } + + q := p.nlog + log[q] = e.count + q++ + log[q] = d + q++ + copy(log[q:], e.stack[:d]) + q += d + p.nlog = q + e.count = 0 + return true +} + +// flushlog tries to flush the current log and switch to the other one. +// flushlog is called from evict, called from add, called from the signal handler, +// so it cannot allocate memory or block. It can try to swap logs with +// the writing goroutine, as explained in the comment at the top of this file. +func (p *cpuProfile) flushlog() bool { + if !cas(&p.handoff, 0, uint32(p.nlog)) { + return false + } + notewakeup(&p.wait) + + p.toggle = 1 - p.toggle + log := &p.log[p.toggle] + q := uintptr(0) + if p.lost > 0 { + lostPC := funcPC(lostProfileData) + log[0] = p.lost + log[1] = 1 + log[2] = lostPC + q = 3 + p.lost = 0 + } + p.nlog = q + return true +} + +// getprofile blocks until the next block of profiling data is available +// and returns it as a []byte. It is called from the writing goroutine. +func (p *cpuProfile) getprofile() []byte { + if p == nil { + return nil + } + + if p.wholding { + // Release previous log to signal handling side. + // Loop because we are racing against SetCPUProfileRate(0). + for { + n := p.handoff + if n == 0 { + print("runtime: phase error during cpu profile handoff\n") + return nil + } + if n&0x80000000 != 0 { + p.wtoggle = 1 - p.wtoggle + p.wholding = false + p.flushing = true + goto Flush + } + if cas(&p.handoff, n, 0) { + break + } + } + p.wtoggle = 1 - p.wtoggle + p.wholding = false + } + + if p.flushing { + goto Flush + } + + if !p.on && p.handoff == 0 { + return nil + } + + // Wait for new log. + notetsleepg(&p.wait, -1) + noteclear(&p.wait) + + switch n := p.handoff; { + case n == 0: + print("runtime: phase error during cpu profile wait\n") + return nil + case n == 0x80000000: + p.flushing = true + goto Flush + default: + n &^= 0x80000000 + + // Return new log to caller. + p.wholding = true + + return uintptrBytes(p.log[p.wtoggle][:n]) + } + + // In flush mode. + // Add is no longer being called. We own the log. + // Also, p->handoff is non-zero, so flushlog will return false. + // Evict the hash table into the log and return it. +Flush: + for i := range p.hash { + b := &p.hash[i] + for j := range b.entry { + e := &b.entry[j] + if e.count > 0 && !p.evict(e) { + // Filled the log. Stop the loop and return what we've got. + break Flush + } + } + } + + // Return pending log data. + if p.nlog > 0 { + // Note that we're using toggle now, not wtoggle, + // because we're working on the log directly. + n := p.nlog + p.nlog = 0 + return uintptrBytes(p.log[p.toggle][:n]) + } + + // Made it through the table without finding anything to log. + if !p.eodSent { + // We may not have space to append this to the partial log buf, + // so we always return a new slice for the end-of-data marker. + p.eodSent = true + return uintptrBytes(eod[:]) + } + + // Finally done. Clean up and return nil. + p.flushing = false + if !cas(&p.handoff, p.handoff, 0) { + print("runtime: profile flush racing with something\n") + } + return nil +} + +func uintptrBytes(p []uintptr) (ret []byte) { + pp := (*sliceStruct)(unsafe.Pointer(&p)) + rp := (*sliceStruct)(unsafe.Pointer(&ret)) + + rp.array = pp.array + rp.len = pp.len * int(unsafe.Sizeof(p[0])) + rp.cap = rp.len + + return +} + +// CPUProfile returns the next chunk of binary CPU profiling stack trace data, +// blocking until data is available. If profiling is turned off and all the profile +// data accumulated while it was on has been returned, CPUProfile returns nil. +// The caller must save the returned data before calling CPUProfile again. +// +// Most clients should use the runtime/pprof package or +// the testing package's -test.cpuprofile flag instead of calling +// CPUProfile directly. +func CPUProfile() []byte { + return cpuprof.getprofile() +} diff --git a/libgo/go/runtime/crash_cgo_test.go b/libgo/go/runtime/crash_cgo_test.go index b534b89e559..972eedc624e 100644 --- a/libgo/go/runtime/crash_cgo_test.go +++ b/libgo/go/runtime/crash_cgo_test.go @@ -8,6 +8,7 @@ package runtime_test import ( "runtime" + "strings" "testing" ) @@ -19,7 +20,6 @@ func TestCgoSignalDeadlock(t *testing.T) { if testing.Short() && runtime.GOOS == "windows" { t.Skip("Skipping in short mode") // takes up to 64 seconds } - t.Skip("gccgo does not have a go command") got := executeTest(t, cgoSignalDeadlockSource, nil) want := "OK\n" if got != want { @@ -35,6 +35,21 @@ func TestCgoTraceback(t *testing.T) { } } +func TestCgoExternalThreadPanic(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skipf("no pthreads on %s", runtime.GOOS) + } + csrc := cgoExternalThreadPanicC + if runtime.GOOS == "windows" { + csrc = cgoExternalThreadPanicC_windows + } + got := executeTest(t, cgoExternalThreadPanicSource, nil, "main.c", csrc) + want := "panic: BOOM" + if !strings.Contains(got, want) { + t.Fatalf("want failure containing %q. output:\n%s\n", want, got) + } +} + const cgoSignalDeadlockSource = ` package main @@ -118,3 +133,64 @@ func main() { fmt.Printf("OK\n") } ` + +const cgoExternalThreadPanicSource = ` +package main + +// void start(void); +import "C" + +func main() { + C.start() + select {} +} + +//export gopanic +func gopanic() { + panic("BOOM") +} +` + +const cgoExternalThreadPanicC = ` +#include <stdlib.h> +#include <stdio.h> +#include <pthread.h> + +void gopanic(void); + +static void* +die(void* x) +{ + gopanic(); + return 0; +} + +void +start(void) +{ + pthread_t t; + if(pthread_create(&t, 0, die, 0) != 0) + printf("pthread_create failed\n"); +} +` + +const cgoExternalThreadPanicC_windows = ` +#include <stdlib.h> +#include <stdio.h> + +void gopanic(void); + +static void* +die(void* x) +{ + gopanic(); + return 0; +} + +void +start(void) +{ + if(_beginthreadex(0, 0, die, 0, 0, 0) != 0) + printf("_beginthreadex failed\n"); +} +` diff --git a/libgo/go/runtime/crash_test.go b/libgo/go/runtime/crash_test.go index 39e04345a10..7e8a2e45f0e 100644 --- a/libgo/go/runtime/crash_test.go +++ b/libgo/go/runtime/crash_test.go @@ -31,10 +31,11 @@ func testEnv(cmd *exec.Cmd) *exec.Cmd { return cmd } -func executeTest(t *testing.T, templ string, data interface{}) string { +func executeTest(t *testing.T, templ string, data interface{}, extra ...string) string { t.Skip("gccgo does not have a go command") - if runtime.GOOS == "nacl" { - t.Skip("skipping on nacl") + switch runtime.GOOS { + case "android", "nacl": + t.Skipf("skipping on %s", runtime.GOOS) } checkStaleRuntime(t) @@ -61,7 +62,20 @@ func executeTest(t *testing.T, templ string, data interface{}) string { t.Fatalf("failed to close file: %v", err) } - got, _ := testEnv(exec.Command("go", "run", src)).CombinedOutput() + for i := 0; i < len(extra); i += 2 { + if err := ioutil.WriteFile(filepath.Join(dir, extra[i]), []byte(extra[i+1]), 0666); err != nil { + t.Fatal(err) + } + } + + cmd := exec.Command("go", "build", "-o", "a.exe") + cmd.Dir = dir + out, err := testEnv(cmd).CombinedOutput() + if err != nil { + t.Fatalf("building source: %v\n%s", err, out) + } + + got, _ := testEnv(exec.Command(filepath.Join(dir, "a.exe"))).CombinedOutput() return string(got) } @@ -159,6 +173,22 @@ func TestGoexitCrash(t *testing.T) { } } +func TestGoexitDefer(t *testing.T) { + c := make(chan struct{}) + go func() { + defer func() { + r := recover() + if r != nil { + t.Errorf("non-nil recover during Goexit") + } + c <- struct{}{} + }() + runtime.Goexit() + }() + // Note: if the defer fails to run, we will get a deadlock here + <-c +} + func TestGoNil(t *testing.T) { output := executeTest(t, goNilSource, nil) want := "go of nil func value" @@ -167,6 +197,22 @@ func TestGoNil(t *testing.T) { } } +func TestMainGoroutineId(t *testing.T) { + output := executeTest(t, mainGoroutineIdSource, nil) + want := "panic: test\n\ngoroutine 1 [running]:\n" + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } +} + +func TestBreakpoint(t *testing.T) { + output := executeTest(t, breakpointSource, nil) + want := "runtime.Breakpoint()" + if !strings.Contains(output, want) { + t.Fatalf("output:\n%s\n\nwant output containing: %s", output, want) + } +} + const crashSource = ` package main @@ -365,3 +411,106 @@ func main() { select{} } ` + +const mainGoroutineIdSource = ` +package main +func main() { + panic("test") +} +` + +const breakpointSource = ` +package main +import "runtime" +func main() { + runtime.Breakpoint() +} +` + +func TestGoexitInPanic(t *testing.T) { + // see issue 8774: this code used to trigger an infinite recursion + output := executeTest(t, goexitInPanicSource, nil) + want := "fatal error: no goroutines (main called runtime.Goexit) - deadlock!" + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } +} + +const goexitInPanicSource = ` +package main +import "runtime" +func main() { + go func() { + defer func() { + runtime.Goexit() + }() + panic("hello") + }() + runtime.Goexit() +} +` + +func TestPanicAfterGoexit(t *testing.T) { + // an uncaught panic should still work after goexit + output := executeTest(t, panicAfterGoexitSource, nil) + want := "panic: hello" + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } +} + +const panicAfterGoexitSource = ` +package main +import "runtime" +func main() { + defer func() { + panic("hello") + }() + runtime.Goexit() +} +` + +func TestRecoveredPanicAfterGoexit(t *testing.T) { + output := executeTest(t, recoveredPanicAfterGoexitSource, nil) + want := "fatal error: no goroutines (main called runtime.Goexit) - deadlock!" + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } +} + +const recoveredPanicAfterGoexitSource = ` +package main +import "runtime" +func main() { + defer func() { + defer func() { + r := recover() + if r == nil { + panic("bad recover") + } + }() + panic("hello") + }() + runtime.Goexit() +} +` + +func TestRecoverBeforePanicAfterGoexit(t *testing.T) { + // 1. defer a function that recovers + // 2. defer a function that panics + // 3. call goexit + // Goexit should run the #2 defer. Its panic + // should be caught by the #1 defer, and execution + // should resume in the caller. Like the Goexit + // never happened! + defer func() { + r := recover() + if r == nil { + panic("bad recover") + } + }() + defer func() { + panic("hello") + }() + runtime.Goexit() +} diff --git a/libgo/go/runtime/env_posix.go b/libgo/go/runtime/env_posix.go new file mode 100644 index 00000000000..8b1dbb7994d --- /dev/null +++ b/libgo/go/runtime/env_posix.go @@ -0,0 +1,58 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows + +package runtime + +import "unsafe" + +func environ() []string + +func getenv(s *byte) *byte { + val := gogetenv(gostringnocopy(s)) + if val == "" { + return nil + } + // Strings found in environment are NUL-terminated. + return &bytes(val)[0] +} + +func gogetenv(key string) string { + env := environ() + if env == nil { + gothrow("getenv before env init") + } + for _, s := range environ() { + if len(s) > len(key) && s[len(key)] == '=' && s[:len(key)] == key { + return s[len(key)+1:] + } + } + return "" +} + +//extern setenv +func _cgo_setenv(unsafe.Pointer, unsafe.Pointer, int32) + +//extern unsetenv +func _cgo_unsetenv(unsafe.Pointer) + +// Update the C environment if cgo is loaded. +// Called from syscall.Setenv. +func syscall_setenv_c(k string, v string) { + _cgo_setenv(cstring(k), cstring(v), 1) +} + +// Update the C environment if cgo is loaded. +// Called from syscall.unsetenv. +func syscall_unsetenv_c(k string) { + _cgo_unsetenv(cstring(k)) +} + +func cstring(s string) unsafe.Pointer { + p := make([]byte, len(s)+1) + sp := (*_string)(unsafe.Pointer(&s)) + memmove(unsafe.Pointer(&p[0]), unsafe.Pointer(sp.str), uintptr(len(s))) + return unsafe.Pointer(&p[0]) +} diff --git a/libgo/go/runtime/gc_test.go b/libgo/go/runtime/gc_test.go index 5a1e9b89c42..fe9e8390200 100644 --- a/libgo/go/runtime/gc_test.go +++ b/libgo/go/runtime/gc_test.go @@ -10,6 +10,7 @@ import ( "runtime/debug" "testing" "time" + "unsafe" ) func TestGcSys(t *testing.T) { @@ -165,6 +166,37 @@ func TestGcLastTime(t *testing.T) { if t0 > last || last > t1 { t.Fatalf("bad last GC time: got %v, want [%v, %v]", last, t0, t1) } + pause := ms.PauseNs[(ms.NumGC+255)%256] + // Due to timer granularity, pause can actually be 0 on windows + // or on virtualized environments. + if pause == 0 { + t.Logf("last GC pause was 0") + } else if pause > 10e9 { + t.Logf("bad last GC pause: got %v, want [0, 10e9]", pause) + } +} + +var hugeSink interface{} + +func TestHugeGCInfo(t *testing.T) { + // The test ensures that compiler can chew these huge types even on weakest machines. + // The types are not allocated at runtime. + if hugeSink != nil { + // 400MB on 32 bots, 4TB on 64-bits. + const n = (400 << 20) + (unsafe.Sizeof(uintptr(0))-4)<<40 + hugeSink = new([n]*byte) + hugeSink = new([n]uintptr) + hugeSink = new(struct { + x float64 + y [n]*byte + z []string + }) + hugeSink = new(struct { + x float64 + y [n]uintptr + z []string + }) + } } func BenchmarkSetTypeNoPtr1(b *testing.B) { @@ -236,3 +268,27 @@ func BenchmarkAllocation(b *testing.B) { <-result } } + +func TestPrintGC(t *testing.T) { + if testing.Short() { + t.Skip("Skipping in short mode") + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) + done := make(chan bool) + go func() { + for { + select { + case <-done: + return + default: + runtime.GC() + } + } + }() + for i := 0; i < 1e4; i++ { + func() { + defer print("") + }() + } + close(done) +} diff --git a/libgo/go/runtime/gcinfo_test.go b/libgo/go/runtime/gcinfo_test.go new file mode 100644 index 00000000000..00449929c95 --- /dev/null +++ b/libgo/go/runtime/gcinfo_test.go @@ -0,0 +1,194 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "bytes" + "runtime" + "testing" +) + +// TestGCInfo tests that various objects in heap, data and bss receive correct GC pointer type info. +func TestGCInfo(t *testing.T) { + t.Skip("skipping on gccgo for now") + verifyGCInfo(t, "bss ScalarPtr", &bssScalarPtr, nonStackInfo(infoScalarPtr)) + verifyGCInfo(t, "bss PtrScalar", &bssPtrScalar, nonStackInfo(infoPtrScalar)) + verifyGCInfo(t, "bss BigStruct", &bssBigStruct, nonStackInfo(infoBigStruct())) + verifyGCInfo(t, "bss string", &bssString, nonStackInfo(infoString)) + verifyGCInfo(t, "bss slice", &bssSlice, nonStackInfo(infoSlice)) + verifyGCInfo(t, "bss eface", &bssEface, nonStackInfo(infoEface)) + verifyGCInfo(t, "bss iface", &bssIface, nonStackInfo(infoIface)) + + verifyGCInfo(t, "data ScalarPtr", &dataScalarPtr, nonStackInfo(infoScalarPtr)) + verifyGCInfo(t, "data PtrScalar", &dataPtrScalar, nonStackInfo(infoPtrScalar)) + verifyGCInfo(t, "data BigStruct", &dataBigStruct, nonStackInfo(infoBigStruct())) + verifyGCInfo(t, "data string", &dataString, nonStackInfo(infoString)) + verifyGCInfo(t, "data slice", &dataSlice, nonStackInfo(infoSlice)) + verifyGCInfo(t, "data eface", &dataEface, nonStackInfo(infoEface)) + verifyGCInfo(t, "data iface", &dataIface, nonStackInfo(infoIface)) + + verifyGCInfo(t, "stack ScalarPtr", new(ScalarPtr), infoScalarPtr) + verifyGCInfo(t, "stack PtrScalar", new(PtrScalar), infoPtrScalar) + verifyGCInfo(t, "stack BigStruct", new(BigStruct), infoBigStruct()) + verifyGCInfo(t, "stack string", new(string), infoString) + verifyGCInfo(t, "stack slice", new([]string), infoSlice) + verifyGCInfo(t, "stack eface", new(interface{}), infoEface) + verifyGCInfo(t, "stack iface", new(Iface), infoIface) + + for i := 0; i < 10; i++ { + verifyGCInfo(t, "heap ScalarPtr", escape(new(ScalarPtr)), nonStackInfo(infoScalarPtr)) + verifyGCInfo(t, "heap PtrScalar", escape(new(PtrScalar)), nonStackInfo(infoPtrScalar)) + verifyGCInfo(t, "heap BigStruct", escape(new(BigStruct)), nonStackInfo(infoBigStruct())) + verifyGCInfo(t, "heap string", escape(new(string)), nonStackInfo(infoString)) + verifyGCInfo(t, "heap eface", escape(new(interface{})), nonStackInfo(infoEface)) + verifyGCInfo(t, "heap iface", escape(new(Iface)), nonStackInfo(infoIface)) + } + +} + +func verifyGCInfo(t *testing.T, name string, p interface{}, mask0 []byte) { + mask := /* runtime.GCMask(p) */ []byte(nil) + if len(mask) > len(mask0) { + mask0 = append(mask0, BitsDead) + mask = mask[:len(mask0)] + } + if bytes.Compare(mask, mask0) != 0 { + t.Errorf("bad GC program for %v:\nwant %+v\ngot %+v", name, mask0, mask) + return + } +} + +func nonStackInfo(mask []byte) []byte { + // BitsDead is replaced with BitsScalar everywhere except stacks. + mask1 := make([]byte, len(mask)) + mw := false + for i, v := range mask { + if !mw && v == BitsDead { + v = BitsScalar + } + mw = !mw && v == BitsMultiWord + mask1[i] = v + } + return mask1 +} + +var gcinfoSink interface{} + +func escape(p interface{}) interface{} { + gcinfoSink = p + return p +} + +const ( + BitsDead = iota + BitsScalar + BitsPointer + BitsMultiWord +) + +const ( + BitsString = iota // unused + BitsSlice // unused + BitsIface + BitsEface +) + +type ScalarPtr struct { + q int + w *int + e int + r *int + t int + y *int +} + +var infoScalarPtr = []byte{BitsScalar, BitsPointer, BitsScalar, BitsPointer, BitsScalar, BitsPointer} + +type PtrScalar struct { + q *int + w int + e *int + r int + t *int + y int +} + +var infoPtrScalar = []byte{BitsPointer, BitsScalar, BitsPointer, BitsScalar, BitsPointer, BitsScalar} + +type BigStruct struct { + q *int + w byte + e [17]byte + r []byte + t int + y uint16 + u uint64 + i string +} + +func infoBigStruct() []byte { + switch runtime.GOARCH { + case "386", "arm": + return []byte{ + BitsPointer, // q *int + BitsScalar, BitsScalar, BitsScalar, BitsScalar, BitsScalar, // w byte; e [17]byte + BitsPointer, BitsDead, BitsDead, // r []byte + BitsScalar, BitsScalar, BitsScalar, BitsScalar, // t int; y uint16; u uint64 + BitsPointer, BitsDead, // i string + } + case "amd64": + return []byte{ + BitsPointer, // q *int + BitsScalar, BitsScalar, BitsScalar, // w byte; e [17]byte + BitsPointer, BitsDead, BitsDead, // r []byte + BitsScalar, BitsScalar, BitsScalar, // t int; y uint16; u uint64 + BitsPointer, BitsDead, // i string + } + case "amd64p32": + return []byte{ + BitsPointer, // q *int + BitsScalar, BitsScalar, BitsScalar, BitsScalar, BitsScalar, // w byte; e [17]byte + BitsPointer, BitsDead, BitsDead, // r []byte + BitsScalar, BitsScalar, BitsDead, BitsScalar, BitsScalar, // t int; y uint16; u uint64 + BitsPointer, BitsDead, // i string + } + default: + panic("unknown arch") + } +} + +type Iface interface { + f() +} + +type IfaceImpl int + +func (IfaceImpl) f() { +} + +var ( + // BSS + bssScalarPtr ScalarPtr + bssPtrScalar PtrScalar + bssBigStruct BigStruct + bssString string + bssSlice []string + bssEface interface{} + bssIface Iface + + // DATA + dataScalarPtr = ScalarPtr{q: 1} + dataPtrScalar = PtrScalar{w: 1} + dataBigStruct = BigStruct{w: 1} + dataString = "foo" + dataSlice = []string{"foo"} + dataEface interface{} = 42 + dataIface Iface = IfaceImpl(42) + + infoString = []byte{BitsPointer, BitsDead} + infoSlice = []byte{BitsPointer, BitsDead, BitsDead} + infoEface = []byte{BitsMultiWord, BitsEface} + infoIface = []byte{BitsMultiWord, BitsIface} +) diff --git a/libgo/go/runtime/hashmap.go b/libgo/go/runtime/hashmap.go new file mode 100644 index 00000000000..b4e624423f6 --- /dev/null +++ b/libgo/go/runtime/hashmap.go @@ -0,0 +1,953 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +// This file contains the implementation of Go's map type. +// +// A map is just a hash table. The data is arranged +// into an array of buckets. Each bucket contains up to +// 8 key/value pairs. The low-order bits of the hash are +// used to select a bucket. Each bucket contains a few +// high-order bits of each hash to distinguish the entries +// within a single bucket. +// +// If more than 8 keys hash to a bucket, we chain on +// extra buckets. +// +// When the hashtable grows, we allocate a new array +// of buckets twice as big. Buckets are incrementally +// copied from the old bucket array to the new bucket array. +// +// Map iterators walk through the array of buckets and +// return the keys in walk order (bucket #, then overflow +// chain order, then bucket index). To maintain iteration +// semantics, we never move keys within their bucket (if +// we did, keys might be returned 0 or 2 times). When +// growing the table, iterators remain iterating through the +// old table and must check the new table if the bucket +// they are iterating through has been moved ("evacuated") +// to the new table. + +// Picking loadFactor: too large and we have lots of overflow +// buckets, too small and we waste a lot of space. I wrote +// a simple program to check some stats for different loads: +// (64-bit, 8 byte keys and values) +// loadFactor %overflow bytes/entry hitprobe missprobe +// 4.00 2.13 20.77 3.00 4.00 +// 4.50 4.05 17.30 3.25 4.50 +// 5.00 6.85 14.77 3.50 5.00 +// 5.50 10.55 12.94 3.75 5.50 +// 6.00 15.27 11.67 4.00 6.00 +// 6.50 20.90 10.79 4.25 6.50 +// 7.00 27.14 10.15 4.50 7.00 +// 7.50 34.03 9.73 4.75 7.50 +// 8.00 41.10 9.40 5.00 8.00 +// +// %overflow = percentage of buckets which have an overflow bucket +// bytes/entry = overhead bytes used per key/value pair +// hitprobe = # of entries to check when looking up a present key +// missprobe = # of entries to check when looking up an absent key +// +// Keep in mind this data is for maximally loaded tables, i.e. just +// before the table grows. Typical tables will be somewhat less loaded. + +import ( + "unsafe" +) + +const ( + // Maximum number of key/value pairs a bucket can hold. + bucketCntBits = 3 + bucketCnt = 1 << bucketCntBits + + // Maximum average load of a bucket that triggers growth. + loadFactor = 6.5 + + // Maximum key or value size to keep inline (instead of mallocing per element). + // Must fit in a uint8. + // Fast versions cannot handle big values - the cutoff size for + // fast versions in ../../cmd/gc/walk.c must be at most this value. + maxKeySize = 128 + maxValueSize = 128 + + // data offset should be the size of the bmap struct, but needs to be + // aligned correctly. For amd64p32 this means 64-bit alignment + // even though pointers are 32 bit. + dataOffset = unsafe.Offsetof(struct { + b bmap + v int64 + }{}.v) + + // Possible tophash values. We reserve a few possibilities for special marks. + // Each bucket (including its overflow buckets, if any) will have either all or none of its + // entries in the evacuated* states (except during the evacuate() method, which only happens + // during map writes and thus no one else can observe the map during that time). + empty = 0 // cell is empty + evacuatedEmpty = 1 // cell is empty, bucket is evacuated. + evacuatedX = 2 // key/value is valid. Entry has been evacuated to first half of larger table. + evacuatedY = 3 // same as above, but evacuated to second half of larger table. + minTopHash = 4 // minimum tophash for a normal filled cell. + + // flags + iterator = 1 // there may be an iterator using buckets + oldIterator = 2 // there may be an iterator using oldbuckets + + // sentinel bucket ID for iterator checks + noCheck = 1<<(8*ptrSize) - 1 + + // trigger a garbage collection at every alloc called from this code + checkgc = false +) + +// A header for a Go map. +type hmap struct { + // Note: the format of the Hmap is encoded in ../../cmd/gc/reflect.c and + // ../reflect/type.go. Don't change this structure without also changing that code! + count int // # live cells == size of map. Must be first (used by len() builtin) + flags uint32 + hash0 uint32 // hash seed + B uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items) + + buckets unsafe.Pointer // array of 2^B Buckets. may be nil if count==0. + oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing + nevacuate uintptr // progress counter for evacuation (buckets less than this have been evacuated) +} + +// A bucket for a Go map. +type bmap struct { + tophash [bucketCnt]uint8 + overflow *bmap + // Followed by bucketCnt keys and then bucketCnt values. + // NOTE: packing all the keys together and then all the values together makes the + // code a bit more complicated than alternating key/value/key/value/... but it allows + // us to eliminate padding which would be needed for, e.g., map[int64]int8. +} + +// A hash iteration structure. +// If you modify hiter, also change cmd/gc/reflect.c to indicate +// the layout of this structure. +type hiter struct { + key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/gc/range.c). + value unsafe.Pointer // Must be in second position (see cmd/gc/range.c). + t *maptype + h *hmap + buckets unsafe.Pointer // bucket ptr at hash_iter initialization time + bptr *bmap // current bucket + startBucket uintptr // bucket iteration started at + offset uint8 // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1) + wrapped bool // already wrapped around from end of bucket array to beginning + B uint8 + i uint8 + bucket uintptr + checkBucket uintptr +} + +func evacuated(b *bmap) bool { + h := b.tophash[0] + return h > empty && h < minTopHash +} + +func makemap(t *maptype, hint int64) *hmap { + if sz := unsafe.Sizeof(hmap{}); sz > 48 || sz != uintptr(t.hmap.size) { + gothrow("bad hmap size") + } + + if hint < 0 || int64(int32(hint)) != hint { + panic("makemap: size out of range") + // TODO: make hint an int, then none of this nonsense + } + + if !ismapkey(t.key) { + gothrow("runtime.makemap: unsupported map key type") + } + + // check compiler's and reflect's math + if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(ptrSize)) || + t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) { + gothrow("key size wrong") + } + if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(ptrSize)) || + t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) { + gothrow("value size wrong") + } + + // invariants we depend on. We should probably check these at compile time + // somewhere, but for now we'll do it here. + if t.key.align > bucketCnt { + gothrow("key align too big") + } + if t.elem.align > bucketCnt { + gothrow("value align too big") + } + if uintptr(t.key.size)%uintptr(t.key.align) != 0 { + gothrow("key size not a multiple of key align") + } + if uintptr(t.elem.size)%uintptr(t.elem.align) != 0 { + gothrow("value size not a multiple of value align") + } + if bucketCnt < 8 { + gothrow("bucketsize too small for proper alignment") + } + if dataOffset%uintptr(t.key.align) != 0 { + gothrow("need padding in bucket (key)") + } + if dataOffset%uintptr(t.elem.align) != 0 { + gothrow("need padding in bucket (value)") + } + + // find size parameter which will hold the requested # of elements + B := uint8(0) + for ; hint > bucketCnt && float32(hint) > loadFactor*float32(uintptr(1)<<B); B++ { + } + + // allocate initial hash table + // if B == 0, the buckets field is allocated lazily later (in mapassign) + // If hint is large zeroing this memory could take a while. + var buckets unsafe.Pointer + if B != 0 { + if checkgc { + memstats.next_gc = memstats.heap_alloc + } + buckets = newarray(t.bucket, uintptr(1)<<B) + } + + // initialize Hmap + if checkgc { + memstats.next_gc = memstats.heap_alloc + } + h := (*hmap)(newobject(t.hmap)) + h.count = 0 + h.B = B + h.flags = 0 + h.hash0 = fastrand1() + h.buckets = buckets + h.oldbuckets = nil + h.nevacuate = 0 + + return h +} + +// mapaccess1 returns a pointer to h[key]. Never returns nil, instead +// it will return a reference to the zero object for the value type if +// the key is not in the map. +// NOTE: The returned pointer may keep the whole map live, so don't +// hold onto it for very long. +func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { + if raceenabled && h != nil { + callerpc := getcallerpc(unsafe.Pointer(&t)) + pc := funcPC(mapaccess1) + racereadpc(unsafe.Pointer(h), callerpc, pc) + raceReadObjectPC(t.key, key, callerpc, pc) + } + if h == nil || h.count == 0 { + return unsafe.Pointer(t.elem.zero) + } + alg := goalg(t.key.alg) + hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0)) + m := uintptr(1)<<h.B - 1 + b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) + if c := h.oldbuckets; c != nil { + oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize))) + if !evacuated(oldb) { + b = oldb + } + } + top := uint8(hash >> (ptrSize*8 - 8)) + if top < minTopHash { + top += minTopHash + } + for { + for i := uintptr(0); i < bucketCnt; i++ { + if b.tophash[i] != top { + continue + } + k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) + if t.indirectkey { + k = *((*unsafe.Pointer)(k)) + } + if alg.equal(key, k, uintptr(t.key.size)) { + v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) + if t.indirectvalue { + v = *((*unsafe.Pointer)(v)) + } + return v + } + } + b = b.overflow + if b == nil { + return unsafe.Pointer(t.elem.zero) + } + } +} + +func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) { + if raceenabled && h != nil { + callerpc := getcallerpc(unsafe.Pointer(&t)) + pc := funcPC(mapaccess2) + racereadpc(unsafe.Pointer(h), callerpc, pc) + raceReadObjectPC(t.key, key, callerpc, pc) + } + if h == nil || h.count == 0 { + return unsafe.Pointer(t.elem.zero), false + } + alg := goalg(t.key.alg) + hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0)) + m := uintptr(1)<<h.B - 1 + b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize))) + if c := h.oldbuckets; c != nil { + oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize))) + if !evacuated(oldb) { + b = oldb + } + } + top := uint8(hash >> (ptrSize*8 - 8)) + if top < minTopHash { + top += minTopHash + } + for { + for i := uintptr(0); i < bucketCnt; i++ { + if b.tophash[i] != top { + continue + } + k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) + if t.indirectkey { + k = *((*unsafe.Pointer)(k)) + } + if alg.equal(key, k, uintptr(t.key.size)) { + v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) + if t.indirectvalue { + v = *((*unsafe.Pointer)(v)) + } + return v, true + } + } + b = b.overflow + if b == nil { + return unsafe.Pointer(t.elem.zero), false + } + } +} + +// returns both key and value. Used by map iterator +func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) { + if h == nil || h.count == 0 { + return nil, nil + } + alg := goalg(t.key.alg) + hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0)) + m := uintptr(1)<<h.B - 1 + b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize))) + if c := h.oldbuckets; c != nil { + oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize))) + if !evacuated(oldb) { + b = oldb + } + } + top := uint8(hash >> (ptrSize*8 - 8)) + if top < minTopHash { + top += minTopHash + } + for { + for i := uintptr(0); i < bucketCnt; i++ { + if b.tophash[i] != top { + continue + } + k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) + if t.indirectkey { + k = *((*unsafe.Pointer)(k)) + } + if alg.equal(key, k, uintptr(t.key.size)) { + v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) + if t.indirectvalue { + v = *((*unsafe.Pointer)(v)) + } + return k, v + } + } + b = b.overflow + if b == nil { + return nil, nil + } + } +} + +func mapassign1(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) { + if h == nil { + panic("assignment to entry in nil map") + } + if raceenabled { + callerpc := getcallerpc(unsafe.Pointer(&t)) + pc := funcPC(mapassign1) + racewritepc(unsafe.Pointer(h), callerpc, pc) + raceReadObjectPC(t.key, key, callerpc, pc) + raceReadObjectPC(t.elem, val, callerpc, pc) + } + + alg := goalg(t.key.alg) + hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0)) + + if h.buckets == nil { + if checkgc { + memstats.next_gc = memstats.heap_alloc + } + h.buckets = newarray(t.bucket, 1) + } + +again: + bucket := hash & (uintptr(1)<<h.B - 1) + if h.oldbuckets != nil { + growWork(t, h, bucket) + } + b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) + top := uint8(hash >> (ptrSize*8 - 8)) + if top < minTopHash { + top += minTopHash + } + + var inserti *uint8 + var insertk unsafe.Pointer + var insertv unsafe.Pointer + for { + for i := uintptr(0); i < bucketCnt; i++ { + if b.tophash[i] != top { + if b.tophash[i] == empty && inserti == nil { + inserti = &b.tophash[i] + insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) + insertv = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) + } + continue + } + k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) + k2 := k + if t.indirectkey { + k2 = *((*unsafe.Pointer)(k2)) + } + if !alg.equal(key, k2, uintptr(t.key.size)) { + continue + } + // already have a mapping for key. Update it. + memmove(k2, key, uintptr(t.key.size)) + v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) + v2 := v + if t.indirectvalue { + v2 = *((*unsafe.Pointer)(v2)) + } + memmove(v2, val, uintptr(t.elem.size)) + return + } + if b.overflow == nil { + break + } + b = b.overflow + } + + // did not find mapping for key. Allocate new cell & add entry. + if float32(h.count) >= loadFactor*float32((uintptr(1)<<h.B)) && h.count >= bucketCnt { + hashGrow(t, h) + goto again // Growing the table invalidates everything, so try again + } + + if inserti == nil { + // all current buckets are full, allocate a new one. + if checkgc { + memstats.next_gc = memstats.heap_alloc + } + newb := (*bmap)(newobject(t.bucket)) + b.overflow = newb + inserti = &newb.tophash[0] + insertk = add(unsafe.Pointer(newb), dataOffset) + insertv = add(insertk, bucketCnt*uintptr(t.keysize)) + } + + // store new key/value at insert position + if t.indirectkey { + if checkgc { + memstats.next_gc = memstats.heap_alloc + } + kmem := newobject(t.key) + *(*unsafe.Pointer)(insertk) = kmem + insertk = kmem + } + if t.indirectvalue { + if checkgc { + memstats.next_gc = memstats.heap_alloc + } + vmem := newobject(t.elem) + *(*unsafe.Pointer)(insertv) = vmem + insertv = vmem + } + memmove(insertk, key, uintptr(t.key.size)) + memmove(insertv, val, uintptr(t.elem.size)) + *inserti = top + h.count++ +} + +func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { + if raceenabled && h != nil { + callerpc := getcallerpc(unsafe.Pointer(&t)) + pc := funcPC(mapdelete) + racewritepc(unsafe.Pointer(h), callerpc, pc) + raceReadObjectPC(t.key, key, callerpc, pc) + } + if h == nil || h.count == 0 { + return + } + alg := goalg(t.key.alg) + hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0)) + bucket := hash & (uintptr(1)<<h.B - 1) + if h.oldbuckets != nil { + growWork(t, h, bucket) + } + b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) + top := uint8(hash >> (ptrSize*8 - 8)) + if top < minTopHash { + top += minTopHash + } + for { + for i := uintptr(0); i < bucketCnt; i++ { + if b.tophash[i] != top { + continue + } + k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) + k2 := k + if t.indirectkey { + k2 = *((*unsafe.Pointer)(k2)) + } + if !alg.equal(key, k2, uintptr(t.key.size)) { + continue + } + memclr(k, uintptr(t.keysize)) + v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*uintptr(t.keysize) + i*uintptr(t.valuesize)) + memclr(v, uintptr(t.valuesize)) + b.tophash[i] = empty + h.count-- + return + } + b = b.overflow + if b == nil { + return + } + } +} + +func mapiterinit(t *maptype, h *hmap, it *hiter) { + // Clear pointer fields so garbage collector does not complain. + it.key = nil + it.value = nil + it.t = nil + it.h = nil + it.buckets = nil + it.bptr = nil + + if raceenabled && h != nil { + callerpc := getcallerpc(unsafe.Pointer(&t)) + racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit)) + } + + if h == nil || h.count == 0 { + it.key = nil + it.value = nil + return + } + + if unsafe.Sizeof(hiter{})/ptrSize != 10 { + gothrow("hash_iter size incorrect") // see ../../cmd/gc/reflect.c + } + it.t = t + it.h = h + + // grab snapshot of bucket state + it.B = h.B + it.buckets = h.buckets + + // decide where to start + r := uintptr(fastrand1()) + if h.B > 31-bucketCntBits { + r += uintptr(fastrand1()) << 31 + } + it.startBucket = r & (uintptr(1)<<h.B - 1) + it.offset = uint8(r >> h.B & (bucketCnt - 1)) + + // iterator state + it.bucket = it.startBucket + it.wrapped = false + it.bptr = nil + + // Remember we have an iterator. + // Can run concurrently with another hash_iter_init(). + for { + old := h.flags + if old == old|iterator|oldIterator { + break + } + if cas(&h.flags, old, old|iterator|oldIterator) { + break + } + } + + mapiternext(it) +} + +func mapiternext(it *hiter) { + h := it.h + if raceenabled { + callerpc := getcallerpc(unsafe.Pointer(&it)) + racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext)) + } + t := it.t + bucket := it.bucket + b := it.bptr + i := it.i + checkBucket := it.checkBucket + alg := goalg(t.key.alg) + +next: + if b == nil { + if bucket == it.startBucket && it.wrapped { + // end of iteration + it.key = nil + it.value = nil + return + } + if h.oldbuckets != nil && it.B == h.B { + // Iterator was started in the middle of a grow, and the grow isn't done yet. + // If the bucket we're looking at hasn't been filled in yet (i.e. the old + // bucket hasn't been evacuated) then we need to iterate through the old + // bucket and only return the ones that will be migrated to this bucket. + oldbucket := bucket & (uintptr(1)<<(it.B-1) - 1) + b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) + if !evacuated(b) { + checkBucket = bucket + } else { + b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize))) + checkBucket = noCheck + } + } else { + b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize))) + checkBucket = noCheck + } + bucket++ + if bucket == uintptr(1)<<it.B { + bucket = 0 + it.wrapped = true + } + i = 0 + } + for ; i < bucketCnt; i++ { + offi := (i + it.offset) & (bucketCnt - 1) + k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize)) + v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize)) + if b.tophash[offi] != empty && b.tophash[offi] != evacuatedEmpty { + if checkBucket != noCheck { + // Special case: iterator was started during a grow and the + // grow is not done yet. We're working on a bucket whose + // oldbucket has not been evacuated yet. Or at least, it wasn't + // evacuated when we started the bucket. So we're iterating + // through the oldbucket, skipping any keys that will go + // to the other new bucket (each oldbucket expands to two + // buckets during a grow). + k2 := k + if t.indirectkey { + k2 = *((*unsafe.Pointer)(k2)) + } + if alg.equal(k2, k2, uintptr(t.key.size)) { + // If the item in the oldbucket is not destined for + // the current new bucket in the iteration, skip it. + hash := alg.hash(k2, uintptr(t.key.size), uintptr(h.hash0)) + if hash&(uintptr(1)<<it.B-1) != checkBucket { + continue + } + } else { + // Hash isn't repeatable if k != k (NaNs). We need a + // repeatable and randomish choice of which direction + // to send NaNs during evacuation. We'll use the low + // bit of tophash to decide which way NaNs go. + // NOTE: this case is why we need two evacuate tophash + // values, evacuatedX and evacuatedY, that differ in + // their low bit. + if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) { + continue + } + } + } + if b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY { + // this is the golden data, we can return it. + if t.indirectkey { + k = *((*unsafe.Pointer)(k)) + } + it.key = k + if t.indirectvalue { + v = *((*unsafe.Pointer)(v)) + } + it.value = v + } else { + // The hash table has grown since the iterator was started. + // The golden data for this key is now somewhere else. + k2 := k + if t.indirectkey { + k2 = *((*unsafe.Pointer)(k2)) + } + if alg.equal(k2, k2, uintptr(t.key.size)) { + // Check the current hash table for the data. + // This code handles the case where the key + // has been deleted, updated, or deleted and reinserted. + // NOTE: we need to regrab the key as it has potentially been + // updated to an equal() but not identical key (e.g. +0.0 vs -0.0). + rk, rv := mapaccessK(t, h, k2) + if rk == nil { + continue // key has been deleted + } + it.key = rk + it.value = rv + } else { + // if key!=key then the entry can't be deleted or + // updated, so we can just return it. That's lucky for + // us because when key!=key we can't look it up + // successfully in the current table. + it.key = k2 + if t.indirectvalue { + v = *((*unsafe.Pointer)(v)) + } + it.value = v + } + } + it.bucket = bucket + it.bptr = b + it.i = i + 1 + it.checkBucket = checkBucket + return + } + } + b = b.overflow + i = 0 + goto next +} + +func hashGrow(t *maptype, h *hmap) { + if h.oldbuckets != nil { + gothrow("evacuation not done in time") + } + oldbuckets := h.buckets + if checkgc { + memstats.next_gc = memstats.heap_alloc + } + newbuckets := newarray(t.bucket, uintptr(1)<<(h.B+1)) + flags := h.flags &^ (iterator | oldIterator) + if h.flags&iterator != 0 { + flags |= oldIterator + } + // commit the grow (atomic wrt gc) + h.B++ + h.flags = flags + h.oldbuckets = oldbuckets + h.buckets = newbuckets + h.nevacuate = 0 + + // the actual copying of the hash table data is done incrementally + // by growWork() and evacuate(). +} + +func growWork(t *maptype, h *hmap, bucket uintptr) { + noldbuckets := uintptr(1) << (h.B - 1) + + // make sure we evacuate the oldbucket corresponding + // to the bucket we're about to use + evacuate(t, h, bucket&(noldbuckets-1)) + + // evacuate one more oldbucket to make progress on growing + if h.oldbuckets != nil { + evacuate(t, h, h.nevacuate) + } +} + +func evacuate(t *maptype, h *hmap, oldbucket uintptr) { + b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) + newbit := uintptr(1) << (h.B - 1) + alg := goalg(t.key.alg) + if !evacuated(b) { + // TODO: reuse overflow buckets instead of using new ones, if there + // is no iterator using the old buckets. (If !oldIterator.) + + x := (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize))) + y := (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize))) + xi := 0 + yi := 0 + xk := add(unsafe.Pointer(x), dataOffset) + yk := add(unsafe.Pointer(y), dataOffset) + xv := add(xk, bucketCnt*uintptr(t.keysize)) + yv := add(yk, bucketCnt*uintptr(t.keysize)) + for ; b != nil; b = b.overflow { + k := add(unsafe.Pointer(b), dataOffset) + v := add(k, bucketCnt*uintptr(t.keysize)) + for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) { + top := b.tophash[i] + if top == empty { + b.tophash[i] = evacuatedEmpty + continue + } + if top < minTopHash { + gothrow("bad map state") + } + k2 := k + if t.indirectkey { + k2 = *((*unsafe.Pointer)(k2)) + } + // Compute hash to make our evacuation decision (whether we need + // to send this key/value to bucket x or bucket y). + hash := alg.hash(k2, uintptr(t.key.size), uintptr(h.hash0)) + if h.flags&iterator != 0 { + if !alg.equal(k2, k2, uintptr(t.key.size)) { + // If key != key (NaNs), then the hash could be (and probably + // will be) entirely different from the old hash. Moreover, + // it isn't reproducible. Reproducibility is required in the + // presence of iterators, as our evacuation decision must + // match whatever decision the iterator made. + // Fortunately, we have the freedom to send these keys either + // way. Also, tophash is meaningless for these kinds of keys. + // We let the low bit of tophash drive the evacuation decision. + // We recompute a new random tophash for the next level so + // these keys will get evenly distributed across all buckets + // after multiple grows. + if (top & 1) != 0 { + hash |= newbit + } else { + hash &^= newbit + } + top = uint8(hash >> (ptrSize*8 - 8)) + if top < minTopHash { + top += minTopHash + } + } + } + if (hash & newbit) == 0 { + b.tophash[i] = evacuatedX + if xi == bucketCnt { + if checkgc { + memstats.next_gc = memstats.heap_alloc + } + newx := (*bmap)(newobject(t.bucket)) + x.overflow = newx + x = newx + xi = 0 + xk = add(unsafe.Pointer(x), dataOffset) + xv = add(xk, bucketCnt*uintptr(t.keysize)) + } + x.tophash[xi] = top + if t.indirectkey { + *(*unsafe.Pointer)(xk) = k2 // copy pointer + } else { + memmove(xk, k, uintptr(t.key.size)) // copy value + } + if t.indirectvalue { + *(*unsafe.Pointer)(xv) = *(*unsafe.Pointer)(v) + } else { + memmove(xv, v, uintptr(t.elem.size)) + } + xi++ + xk = add(xk, uintptr(t.keysize)) + xv = add(xv, uintptr(t.valuesize)) + } else { + b.tophash[i] = evacuatedY + if yi == bucketCnt { + if checkgc { + memstats.next_gc = memstats.heap_alloc + } + newy := (*bmap)(newobject(t.bucket)) + y.overflow = newy + y = newy + yi = 0 + yk = add(unsafe.Pointer(y), dataOffset) + yv = add(yk, bucketCnt*uintptr(t.keysize)) + } + y.tophash[yi] = top + if t.indirectkey { + *(*unsafe.Pointer)(yk) = k2 + } else { + memmove(yk, k, uintptr(t.key.size)) + } + if t.indirectvalue { + *(*unsafe.Pointer)(yv) = *(*unsafe.Pointer)(v) + } else { + memmove(yv, v, uintptr(t.elem.size)) + } + yi++ + yk = add(yk, uintptr(t.keysize)) + yv = add(yv, uintptr(t.valuesize)) + } + } + } + // Unlink the overflow buckets & clear key/value to help GC. + if h.flags&oldIterator == 0 { + b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) + b.overflow = nil + memclr(add(unsafe.Pointer(b), dataOffset), uintptr(t.bucketsize)-dataOffset) + } + } + + // Advance evacuation mark + if oldbucket == h.nevacuate { + h.nevacuate = oldbucket + 1 + if oldbucket+1 == newbit { // newbit == # of oldbuckets + // Growing is all done. Free old main bucket array. + h.oldbuckets = nil + } + } +} + +func ismapkey(t *_type) bool { + return goalg(t.alg).hash != nil +} + +// Reflect stubs. Called from ../reflect/asm_*.s + +func reflect_makemap(t *maptype) *hmap { + return makemap(t, 0) +} + +func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { + val, ok := mapaccess2(t, h, key) + if !ok { + // reflect wants nil for a missing element + val = nil + } + return val +} + +func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) { + mapassign1(t, h, key, val) +} + +func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { + mapdelete(t, h, key) +} + +func reflect_mapiterinit(t *maptype, h *hmap) *hiter { + it := new(hiter) + mapiterinit(t, h, it) + return it +} + +func reflect_mapiternext(it *hiter) { + mapiternext(it) +} + +func reflect_mapiterkey(it *hiter) unsafe.Pointer { + return it.key +} + +func reflect_maplen(h *hmap) int { + if h == nil { + return 0 + } + if raceenabled { + callerpc := getcallerpc(unsafe.Pointer(&h)) + racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen)) + } + return h.count +} + +func reflect_ismapkey(t *_type) bool { + return ismapkey(t) +} diff --git a/libgo/go/runtime/hashmap_fast.go b/libgo/go/runtime/hashmap_fast.go new file mode 100644 index 00000000000..8e21e02d64e --- /dev/null +++ b/libgo/go/runtime/hashmap_fast.go @@ -0,0 +1,379 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "unsafe" +) + +func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { + if raceenabled && h != nil { + callerpc := getcallerpc(unsafe.Pointer(&t)) + racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32)) + } + if h == nil || h.count == 0 { + return unsafe.Pointer(t.elem.zero) + } + var b *bmap + if h.B == 0 { + // One-bucket table. No need to hash. + b = (*bmap)(h.buckets) + } else { + hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&key)), 4, uintptr(h.hash0)) + m := uintptr(1)<<h.B - 1 + b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) + if c := h.oldbuckets; c != nil { + oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize))) + if !evacuated(oldb) { + b = oldb + } + } + } + for { + for i := uintptr(0); i < bucketCnt; i++ { + k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4))) + if k != key { + continue + } + x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check + if x == empty { + continue + } + return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)) + } + b = b.overflow + if b == nil { + return unsafe.Pointer(t.elem.zero) + } + } +} + +func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) { + if raceenabled && h != nil { + callerpc := getcallerpc(unsafe.Pointer(&t)) + racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32)) + } + if h == nil || h.count == 0 { + return unsafe.Pointer(t.elem.zero), false + } + var b *bmap + if h.B == 0 { + // One-bucket table. No need to hash. + b = (*bmap)(h.buckets) + } else { + hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&key)), 4, uintptr(h.hash0)) + m := uintptr(1)<<h.B - 1 + b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) + if c := h.oldbuckets; c != nil { + oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize))) + if !evacuated(oldb) { + b = oldb + } + } + } + for { + for i := uintptr(0); i < bucketCnt; i++ { + k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4))) + if k != key { + continue + } + x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check + if x == empty { + continue + } + return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true + } + b = b.overflow + if b == nil { + return unsafe.Pointer(t.elem.zero), false + } + } +} + +func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { + if raceenabled && h != nil { + callerpc := getcallerpc(unsafe.Pointer(&t)) + racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64)) + } + if h == nil || h.count == 0 { + return unsafe.Pointer(t.elem.zero) + } + var b *bmap + if h.B == 0 { + // One-bucket table. No need to hash. + b = (*bmap)(h.buckets) + } else { + hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&key)), 8, uintptr(h.hash0)) + m := uintptr(1)<<h.B - 1 + b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) + if c := h.oldbuckets; c != nil { + oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize))) + if !evacuated(oldb) { + b = oldb + } + } + } + for { + for i := uintptr(0); i < bucketCnt; i++ { + k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8))) + if k != key { + continue + } + x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check + if x == empty { + continue + } + return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)) + } + b = b.overflow + if b == nil { + return unsafe.Pointer(t.elem.zero) + } + } +} + +func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) { + if raceenabled && h != nil { + callerpc := getcallerpc(unsafe.Pointer(&t)) + racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64)) + } + if h == nil || h.count == 0 { + return unsafe.Pointer(t.elem.zero), false + } + var b *bmap + if h.B == 0 { + // One-bucket table. No need to hash. + b = (*bmap)(h.buckets) + } else { + hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&key)), 8, uintptr(h.hash0)) + m := uintptr(1)<<h.B - 1 + b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) + if c := h.oldbuckets; c != nil { + oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize))) + if !evacuated(oldb) { + b = oldb + } + } + } + for { + for i := uintptr(0); i < bucketCnt; i++ { + k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8))) + if k != key { + continue + } + x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check + if x == empty { + continue + } + return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true + } + b = b.overflow + if b == nil { + return unsafe.Pointer(t.elem.zero), false + } + } +} + +func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { + if raceenabled && h != nil { + callerpc := getcallerpc(unsafe.Pointer(&t)) + racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr)) + } + if h == nil || h.count == 0 { + return unsafe.Pointer(t.elem.zero) + } + key := (*stringStruct)(unsafe.Pointer(&ky)) + if h.B == 0 { + // One-bucket table. + b := (*bmap)(h.buckets) + if key.len < 32 { + // short key, doing lots of comparisons is ok + for i := uintptr(0); i < bucketCnt; i++ { + x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check + if x == empty { + continue + } + k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize)) + if k.len != key.len { + continue + } + if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)) + } + } + return unsafe.Pointer(t.elem.zero) + } + // long key, try not to do more comparisons than necessary + keymaybe := uintptr(bucketCnt) + for i := uintptr(0); i < bucketCnt; i++ { + x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check + if x == empty { + continue + } + k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize)) + if k.len != key.len { + continue + } + if k.str == key.str { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)) + } + // check first 4 bytes + // TODO: on amd64/386 at least, make this compile to one 4-byte comparison instead of + // four 1-byte comparisons. + if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) { + continue + } + // check last 4 bytes + if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) { + continue + } + if keymaybe != bucketCnt { + // Two keys are potential matches. Use hash to distinguish them. + goto dohash + } + keymaybe = i + } + if keymaybe != bucketCnt { + k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*ptrSize)) + if memeq(k.str, key.str, uintptr(key.len)) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize)) + } + } + return unsafe.Pointer(t.elem.zero) + } +dohash: + hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&ky)), 2*ptrSize, uintptr(h.hash0)) + m := uintptr(1)<<h.B - 1 + b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) + if c := h.oldbuckets; c != nil { + oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize))) + if !evacuated(oldb) { + b = oldb + } + } + top := uint8(hash >> (ptrSize*8 - 8)) + if top < minTopHash { + top += minTopHash + } + for { + for i := uintptr(0); i < bucketCnt; i++ { + x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check + if x != top { + continue + } + k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize)) + if k.len != key.len { + continue + } + if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)) + } + } + b = b.overflow + if b == nil { + return unsafe.Pointer(t.elem.zero) + } + } +} + +func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { + if raceenabled && h != nil { + callerpc := getcallerpc(unsafe.Pointer(&t)) + racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr)) + } + if h == nil || h.count == 0 { + return unsafe.Pointer(t.elem.zero), false + } + key := (*stringStruct)(unsafe.Pointer(&ky)) + if h.B == 0 { + // One-bucket table. + b := (*bmap)(h.buckets) + if key.len < 32 { + // short key, doing lots of comparisons is ok + for i := uintptr(0); i < bucketCnt; i++ { + x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check + if x == empty { + continue + } + k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize)) + if k.len != key.len { + continue + } + if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true + } + } + return unsafe.Pointer(t.elem.zero), false + } + // long key, try not to do more comparisons than necessary + keymaybe := uintptr(bucketCnt) + for i := uintptr(0); i < bucketCnt; i++ { + x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check + if x == empty { + continue + } + k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize)) + if k.len != key.len { + continue + } + if k.str == key.str { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true + } + // check first 4 bytes + if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) { + continue + } + // check last 4 bytes + if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) { + continue + } + if keymaybe != bucketCnt { + // Two keys are potential matches. Use hash to distinguish them. + goto dohash + } + keymaybe = i + } + if keymaybe != bucketCnt { + k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*ptrSize)) + if memeq(k.str, key.str, uintptr(key.len)) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize)), true + } + } + return unsafe.Pointer(t.elem.zero), false + } +dohash: + hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&ky)), 2*ptrSize, uintptr(h.hash0)) + m := uintptr(1)<<h.B - 1 + b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) + if c := h.oldbuckets; c != nil { + oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize))) + if !evacuated(oldb) { + b = oldb + } + } + top := uint8(hash >> (ptrSize*8 - 8)) + if top < minTopHash { + top += minTopHash + } + for { + for i := uintptr(0); i < bucketCnt; i++ { + x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check + if x != top { + continue + } + k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize)) + if k.len != key.len { + continue + } + if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true + } + } + b = b.overflow + if b == nil { + return unsafe.Pointer(t.elem.zero), false + } + } +} diff --git a/libgo/go/runtime/lock_futex.go b/libgo/go/runtime/lock_futex.go new file mode 100644 index 00000000000..725962341d4 --- /dev/null +++ b/libgo/go/runtime/lock_futex.go @@ -0,0 +1,205 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build dragonfly freebsd linux + +package runtime + +import "unsafe" + +// This implementation depends on OS-specific implementations of +// +// runtime·futexsleep(uint32 *addr, uint32 val, int64 ns) +// Atomically, +// if(*addr == val) sleep +// Might be woken up spuriously; that's allowed. +// Don't sleep longer than ns; ns < 0 means forever. +// +// runtime·futexwakeup(uint32 *addr, uint32 cnt) +// If any procs are sleeping on addr, wake up at most cnt. + +const ( + mutex_unlocked = 0 + mutex_locked = 1 + mutex_sleeping = 2 + + active_spin = 4 + active_spin_cnt = 30 + passive_spin = 1 +) + +// Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping. +// mutex_sleeping means that there is presumably at least one sleeping thread. +// Note that there can be spinning threads during all states - they do not +// affect mutex's state. + +func futexsleep(addr *uint32, val uint32, ns int64) +func futexwakeup(addr *uint32, cnt uint32) + +// We use the uintptr mutex.key and note.key as a uint32. +func key32(p *uintptr) *uint32 { + return (*uint32)(unsafe.Pointer(p)) +} + +func lock(l *mutex) { + gp := getg() + + if gp.m.locks < 0 { + gothrow("runtime·lock: lock count") + } + gp.m.locks++ + + // Speculative grab for lock. + v := xchg(key32(&l.key), mutex_locked) + if v == mutex_unlocked { + return + } + + // wait is either MUTEX_LOCKED or MUTEX_SLEEPING + // depending on whether there is a thread sleeping + // on this mutex. If we ever change l->key from + // MUTEX_SLEEPING to some other value, we must be + // careful to change it back to MUTEX_SLEEPING before + // returning, to ensure that the sleeping thread gets + // its wakeup call. + wait := v + + // On uniprocessors, no point spinning. + // On multiprocessors, spin for ACTIVE_SPIN attempts. + spin := 0 + if ncpu > 1 { + spin = active_spin + } + for { + // Try for lock, spinning. + for i := 0; i < spin; i++ { + for l.key == mutex_unlocked { + if cas(key32(&l.key), mutex_unlocked, wait) { + return + } + } + procyield(active_spin_cnt) + } + + // Try for lock, rescheduling. + for i := 0; i < passive_spin; i++ { + for l.key == mutex_unlocked { + if cas(key32(&l.key), mutex_unlocked, wait) { + return + } + } + osyield() + } + + // Sleep. + v = xchg(key32(&l.key), mutex_sleeping) + if v == mutex_unlocked { + return + } + wait = mutex_sleeping + futexsleep(key32(&l.key), mutex_sleeping, -1) + } +} + +func unlock(l *mutex) { + v := xchg(key32(&l.key), mutex_unlocked) + if v == mutex_unlocked { + gothrow("unlock of unlocked lock") + } + if v == mutex_sleeping { + futexwakeup(key32(&l.key), 1) + } + + gp := getg() + gp.m.locks-- + if gp.m.locks < 0 { + gothrow("runtime·unlock: lock count") + } + if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack + gp.stackguard0 = stackPreempt + } +} + +// One-time notifications. +func noteclear(n *note) { + n.key = 0 +} + +func notewakeup(n *note) { + old := xchg(key32(&n.key), 1) + if old != 0 { + print("notewakeup - double wakeup (", old, ")\n") + gothrow("notewakeup - double wakeup") + } + futexwakeup(key32(&n.key), 1) +} + +func notesleep(n *note) { + gp := getg() + if gp != gp.m.g0 { + gothrow("notesleep not on g0") + } + for atomicload(key32(&n.key)) == 0 { + gp.m.blocked = true + futexsleep(key32(&n.key), 0, -1) + gp.m.blocked = false + } +} + +//go:nosplit +func notetsleep_internal(n *note, ns int64) bool { + gp := getg() + + if ns < 0 { + for atomicload(key32(&n.key)) == 0 { + gp.m.blocked = true + futexsleep(key32(&n.key), 0, -1) + gp.m.blocked = false + } + return true + } + + if atomicload(key32(&n.key)) != 0 { + return true + } + + deadline := nanotime() + ns + for { + gp.m.blocked = true + futexsleep(key32(&n.key), 0, ns) + gp.m.blocked = false + if atomicload(key32(&n.key)) != 0 { + break + } + now := nanotime() + if now >= deadline { + break + } + ns = deadline - now + } + return atomicload(key32(&n.key)) != 0 +} + +func notetsleep(n *note, ns int64) bool { + gp := getg() + if gp != gp.m.g0 && gp.m.gcing == 0 { + gothrow("notetsleep not on g0") + } + + return notetsleep_internal(n, ns) +} + +// same as runtime·notetsleep, but called on user g (not g0) +// calls only nosplit functions between entersyscallblock/exitsyscall +func notetsleepg(n *note, ns int64) bool { + gp := getg() + if gp == gp.m.g0 { + gothrow("notetsleepg on g0") + } + + entersyscallblock() + ok := notetsleep_internal(n, ns) + exitsyscall() + return ok +} diff --git a/libgo/go/runtime/lock_sema.go b/libgo/go/runtime/lock_sema.go new file mode 100644 index 00000000000..d136b828061 --- /dev/null +++ b/libgo/go/runtime/lock_sema.go @@ -0,0 +1,270 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin nacl netbsd openbsd plan9 solaris windows + +package runtime + +import "unsafe" + +// This implementation depends on OS-specific implementations of +// +// uintptr runtime·semacreate(void) +// Create a semaphore, which will be assigned to m->waitsema. +// The zero value is treated as absence of any semaphore, +// so be sure to return a non-zero value. +// +// int32 runtime·semasleep(int64 ns) +// If ns < 0, acquire m->waitsema and return 0. +// If ns >= 0, try to acquire m->waitsema for at most ns nanoseconds. +// Return 0 if the semaphore was acquired, -1 if interrupted or timed out. +// +// int32 runtime·semawakeup(M *mp) +// Wake up mp, which is or will soon be sleeping on mp->waitsema. +// +const ( + locked uintptr = 1 + + active_spin = 4 + active_spin_cnt = 30 + passive_spin = 1 +) + +func semacreate() uintptr +func semasleep(int64) int32 +func semawakeup(mp *m) + +func lock(l *mutex) { + gp := getg() + if gp.m.locks < 0 { + gothrow("runtime·lock: lock count") + } + gp.m.locks++ + + // Speculative grab for lock. + if casuintptr(&l.key, 0, locked) { + return + } + if gp.m.waitsema == 0 { + gp.m.waitsema = semacreate() + } + + // On uniprocessor's, no point spinning. + // On multiprocessors, spin for ACTIVE_SPIN attempts. + spin := 0 + if ncpu > 1 { + spin = active_spin + } +Loop: + for i := 0; ; i++ { + v := atomicloaduintptr(&l.key) + if v&locked == 0 { + // Unlocked. Try to lock. + if casuintptr(&l.key, v, v|locked) { + return + } + i = 0 + } + if i < spin { + procyield(active_spin_cnt) + } else if i < spin+passive_spin { + osyield() + } else { + // Someone else has it. + // l->waitm points to a linked list of M's waiting + // for this lock, chained through m->nextwaitm. + // Queue this M. + for { + gp.m.nextwaitm = (*m)((unsafe.Pointer)(v &^ locked)) + if casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|locked) { + break + } + v = atomicloaduintptr(&l.key) + if v&locked == 0 { + continue Loop + } + } + if v&locked != 0 { + // Queued. Wait. + semasleep(-1) + i = 0 + } + } + } +} + +func unlock(l *mutex) { + gp := getg() + var mp *m + for { + v := atomicloaduintptr(&l.key) + if v == locked { + if casuintptr(&l.key, locked, 0) { + break + } + } else { + // Other M's are waiting for the lock. + // Dequeue an M. + mp = (*m)((unsafe.Pointer)(v &^ locked)) + if casuintptr(&l.key, v, uintptr(unsafe.Pointer(mp.nextwaitm))) { + // Dequeued an M. Wake it. + semawakeup(mp) + break + } + } + } + gp.m.locks-- + if gp.m.locks < 0 { + gothrow("runtime·unlock: lock count") + } + if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack + gp.stackguard0 = stackPreempt + } +} + +// One-time notifications. +func noteclear(n *note) { + n.key = 0 +} + +func notewakeup(n *note) { + var v uintptr + for { + v = atomicloaduintptr(&n.key) + if casuintptr(&n.key, v, locked) { + break + } + } + + // Successfully set waitm to locked. + // What was it before? + switch { + case v == 0: + // Nothing was waiting. Done. + case v == locked: + // Two notewakeups! Not allowed. + gothrow("notewakeup - double wakeup") + default: + // Must be the waiting m. Wake it up. + semawakeup((*m)(unsafe.Pointer(v))) + } +} + +func notesleep(n *note) { + gp := getg() + if gp != gp.m.g0 { + gothrow("notesleep not on g0") + } + if gp.m.waitsema == 0 { + gp.m.waitsema = semacreate() + } + if !casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) { + // Must be locked (got wakeup). + if n.key != locked { + gothrow("notesleep - waitm out of sync") + } + return + } + // Queued. Sleep. + gp.m.blocked = true + semasleep(-1) + gp.m.blocked = false +} + +//go:nosplit +func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool { + // gp and deadline are logically local variables, but they are written + // as parameters so that the stack space they require is charged + // to the caller. + // This reduces the nosplit footprint of notetsleep_internal. + gp = getg() + + // Register for wakeup on n->waitm. + if !casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) { + // Must be locked (got wakeup). + if n.key != locked { + gothrow("notetsleep - waitm out of sync") + } + return true + } + if ns < 0 { + // Queued. Sleep. + gp.m.blocked = true + semasleep(-1) + gp.m.blocked = false + return true + } + + deadline = nanotime() + ns + for { + // Registered. Sleep. + gp.m.blocked = true + if semasleep(ns) >= 0 { + gp.m.blocked = false + // Acquired semaphore, semawakeup unregistered us. + // Done. + return true + } + gp.m.blocked = false + // Interrupted or timed out. Still registered. Semaphore not acquired. + ns = deadline - nanotime() + if ns <= 0 { + break + } + // Deadline hasn't arrived. Keep sleeping. + } + + // Deadline arrived. Still registered. Semaphore not acquired. + // Want to give up and return, but have to unregister first, + // so that any notewakeup racing with the return does not + // try to grant us the semaphore when we don't expect it. + for { + v := atomicloaduintptr(&n.key) + switch v { + case uintptr(unsafe.Pointer(gp.m)): + // No wakeup yet; unregister if possible. + if casuintptr(&n.key, v, 0) { + return false + } + case locked: + // Wakeup happened so semaphore is available. + // Grab it to avoid getting out of sync. + gp.m.blocked = true + if semasleep(-1) < 0 { + gothrow("runtime: unable to acquire - semaphore out of sync") + } + gp.m.blocked = false + return true + default: + gothrow("runtime: unexpected waitm - semaphore out of sync") + } + } +} + +func notetsleep(n *note, ns int64) bool { + gp := getg() + if gp != gp.m.g0 && gp.m.gcing == 0 { + gothrow("notetsleep not on g0") + } + if gp.m.waitsema == 0 { + gp.m.waitsema = semacreate() + } + return notetsleep_internal(n, ns, nil, 0) +} + +// same as runtime·notetsleep, but called on user g (not g0) +// calls only nosplit functions between entersyscallblock/exitsyscall +func notetsleepg(n *note, ns int64) bool { + gp := getg() + if gp == gp.m.g0 { + gothrow("notetsleepg on g0") + } + if gp.m.waitsema == 0 { + gp.m.waitsema = semacreate() + } + entersyscallblock() + ok := notetsleep_internal(n, ns, nil, 0) + exitsyscall() + return ok +} diff --git a/libgo/go/runtime/malloc.go b/libgo/go/runtime/malloc.go new file mode 100644 index 00000000000..11704494404 --- /dev/null +++ b/libgo/go/runtime/malloc.go @@ -0,0 +1,837 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "unsafe" +) + +const ( + debugMalloc = false + + flagNoScan = _FlagNoScan + flagNoZero = _FlagNoZero + + maxTinySize = _TinySize + tinySizeClass = _TinySizeClass + maxSmallSize = _MaxSmallSize + + pageShift = _PageShift + pageSize = _PageSize + pageMask = _PageMask + + bitsPerPointer = _BitsPerPointer + bitsMask = _BitsMask + pointersPerByte = _PointersPerByte + maxGCMask = _MaxGCMask + bitsDead = _BitsDead + bitsPointer = _BitsPointer + + mSpanInUse = _MSpanInUse + + concurrentSweep = _ConcurrentSweep != 0 +) + +// Page number (address>>pageShift) +type pageID uintptr + +// base address for all 0-byte allocations +var zerobase uintptr + +// Allocate an object of size bytes. +// Small objects are allocated from the per-P cache's free lists. +// Large objects (> 32 kB) are allocated straight from the heap. +func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer { + if size == 0 { + return unsafe.Pointer(&zerobase) + } + size0 := size + + if flags&flagNoScan == 0 && typ == nil { + gothrow("malloc missing type") + } + + // This function must be atomic wrt GC, but for performance reasons + // we don't acquirem/releasem on fast path. The code below does not have + // split stack checks, so it can't be preempted by GC. + // Functions like roundup/add are inlined. And onM/racemalloc are nosplit. + // If debugMalloc = true, these assumptions are checked below. + if debugMalloc { + mp := acquirem() + if mp.mallocing != 0 { + gothrow("malloc deadlock") + } + mp.mallocing = 1 + if mp.curg != nil { + mp.curg.stackguard0 = ^uintptr(0xfff) | 0xbad + } + } + + c := gomcache() + var s *mspan + var x unsafe.Pointer + if size <= maxSmallSize { + if flags&flagNoScan != 0 && size < maxTinySize { + // Tiny allocator. + // + // Tiny allocator combines several tiny allocation requests + // into a single memory block. The resulting memory block + // is freed when all subobjects are unreachable. The subobjects + // must be FlagNoScan (don't have pointers), this ensures that + // the amount of potentially wasted memory is bounded. + // + // Size of the memory block used for combining (maxTinySize) is tunable. + // Current setting is 16 bytes, which relates to 2x worst case memory + // wastage (when all but one subobjects are unreachable). + // 8 bytes would result in no wastage at all, but provides less + // opportunities for combining. + // 32 bytes provides more opportunities for combining, + // but can lead to 4x worst case wastage. + // The best case winning is 8x regardless of block size. + // + // Objects obtained from tiny allocator must not be freed explicitly. + // So when an object will be freed explicitly, we ensure that + // its size >= maxTinySize. + // + // SetFinalizer has a special case for objects potentially coming + // from tiny allocator, it such case it allows to set finalizers + // for an inner byte of a memory block. + // + // The main targets of tiny allocator are small strings and + // standalone escaping variables. On a json benchmark + // the allocator reduces number of allocations by ~12% and + // reduces heap size by ~20%. + tinysize := uintptr(c.tinysize) + if size <= tinysize { + tiny := unsafe.Pointer(c.tiny) + // Align tiny pointer for required (conservative) alignment. + if size&7 == 0 { + tiny = roundup(tiny, 8) + } else if size&3 == 0 { + tiny = roundup(tiny, 4) + } else if size&1 == 0 { + tiny = roundup(tiny, 2) + } + size1 := size + (uintptr(tiny) - uintptr(unsafe.Pointer(c.tiny))) + if size1 <= tinysize { + // The object fits into existing tiny block. + x = tiny + c.tiny = (*byte)(add(x, size)) + c.tinysize -= uintptr(size1) + c.local_tinyallocs++ + if debugMalloc { + mp := acquirem() + if mp.mallocing == 0 { + gothrow("bad malloc") + } + mp.mallocing = 0 + if mp.curg != nil { + mp.curg.stackguard0 = mp.curg.stack.lo + _StackGuard + } + // Note: one releasem for the acquirem just above. + // The other for the acquirem at start of malloc. + releasem(mp) + releasem(mp) + } + return x + } + } + // Allocate a new maxTinySize block. + s = c.alloc[tinySizeClass] + v := s.freelist + if v == nil { + mp := acquirem() + mp.scalararg[0] = tinySizeClass + onM(mcacheRefill_m) + releasem(mp) + s = c.alloc[tinySizeClass] + v = s.freelist + } + s.freelist = v.next + s.ref++ + //TODO: prefetch v.next + x = unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + // See if we need to replace the existing tiny block with the new one + // based on amount of remaining free space. + if maxTinySize-size > tinysize { + c.tiny = (*byte)(add(x, size)) + c.tinysize = uintptr(maxTinySize - size) + } + size = maxTinySize + } else { + var sizeclass int8 + if size <= 1024-8 { + sizeclass = size_to_class8[(size+7)>>3] + } else { + sizeclass = size_to_class128[(size-1024+127)>>7] + } + size = uintptr(class_to_size[sizeclass]) + s = c.alloc[sizeclass] + v := s.freelist + if v == nil { + mp := acquirem() + mp.scalararg[0] = uintptr(sizeclass) + onM(mcacheRefill_m) + releasem(mp) + s = c.alloc[sizeclass] + v = s.freelist + } + s.freelist = v.next + s.ref++ + //TODO: prefetch + x = unsafe.Pointer(v) + if flags&flagNoZero == 0 { + v.next = nil + if size > 2*ptrSize && ((*[2]uintptr)(x))[1] != 0 { + memclr(unsafe.Pointer(v), size) + } + } + } + c.local_cachealloc += intptr(size) + } else { + mp := acquirem() + mp.scalararg[0] = uintptr(size) + mp.scalararg[1] = uintptr(flags) + onM(largeAlloc_m) + s = (*mspan)(mp.ptrarg[0]) + mp.ptrarg[0] = nil + releasem(mp) + x = unsafe.Pointer(uintptr(s.start << pageShift)) + size = uintptr(s.elemsize) + } + + if flags&flagNoScan != 0 { + // All objects are pre-marked as noscan. + goto marked + } + + // If allocating a defer+arg block, now that we've picked a malloc size + // large enough to hold everything, cut the "asked for" size down to + // just the defer header, so that the GC bitmap will record the arg block + // as containing nothing at all (as if it were unused space at the end of + // a malloc block caused by size rounding). + // The defer arg areas are scanned as part of scanstack. + if typ == deferType { + size0 = unsafe.Sizeof(_defer{}) + } + + // From here till marked label marking the object as allocated + // and storing type info in the GC bitmap. + { + arena_start := uintptr(unsafe.Pointer(mheap_.arena_start)) + off := (uintptr(x) - arena_start) / ptrSize + xbits := (*uint8)(unsafe.Pointer(arena_start - off/wordsPerBitmapByte - 1)) + shift := (off % wordsPerBitmapByte) * gcBits + if debugMalloc && ((*xbits>>shift)&(bitMask|bitPtrMask)) != bitBoundary { + println("runtime: bits =", (*xbits>>shift)&(bitMask|bitPtrMask)) + gothrow("bad bits in markallocated") + } + + var ti, te uintptr + var ptrmask *uint8 + if size == ptrSize { + // It's one word and it has pointers, it must be a pointer. + *xbits |= (bitsPointer << 2) << shift + goto marked + } + if typ.kind&kindGCProg != 0 { + nptr := (uintptr(typ.size) + ptrSize - 1) / ptrSize + masksize := nptr + if masksize%2 != 0 { + masksize *= 2 // repeated + } + masksize = masksize * pointersPerByte / 8 // 4 bits per word + masksize++ // unroll flag in the beginning + if masksize > maxGCMask && typ.gc[1] != 0 { + // If the mask is too large, unroll the program directly + // into the GC bitmap. It's 7 times slower than copying + // from the pre-unrolled mask, but saves 1/16 of type size + // memory for the mask. + mp := acquirem() + mp.ptrarg[0] = x + mp.ptrarg[1] = unsafe.Pointer(typ) + mp.scalararg[0] = uintptr(size) + mp.scalararg[1] = uintptr(size0) + onM(unrollgcproginplace_m) + releasem(mp) + goto marked + } + ptrmask = (*uint8)(unsafe.Pointer(uintptr(typ.gc[0]))) + // Check whether the program is already unrolled. + if uintptr(atomicloadp(unsafe.Pointer(ptrmask)))&0xff == 0 { + mp := acquirem() + mp.ptrarg[0] = unsafe.Pointer(typ) + onM(unrollgcprog_m) + releasem(mp) + } + ptrmask = (*uint8)(add(unsafe.Pointer(ptrmask), 1)) // skip the unroll flag byte + } else { + ptrmask = (*uint8)(unsafe.Pointer(typ.gc[0])) // pointer to unrolled mask + } + if size == 2*ptrSize { + *xbits = *ptrmask | bitBoundary + goto marked + } + te = uintptr(typ.size) / ptrSize + // If the type occupies odd number of words, its mask is repeated. + if te%2 == 0 { + te /= 2 + } + // Copy pointer bitmask into the bitmap. + for i := uintptr(0); i < size0; i += 2 * ptrSize { + v := *(*uint8)(add(unsafe.Pointer(ptrmask), ti)) + ti++ + if ti == te { + ti = 0 + } + if i == 0 { + v |= bitBoundary + } + if i+ptrSize == size0 { + v &^= uint8(bitPtrMask << 4) + } + + *xbits = v + xbits = (*byte)(add(unsafe.Pointer(xbits), ^uintptr(0))) + } + if size0%(2*ptrSize) == 0 && size0 < size { + // Mark the word after last object's word as bitsDead. + *xbits = bitsDead << 2 + } + } +marked: + if raceenabled { + racemalloc(x, size) + } + + if debugMalloc { + mp := acquirem() + if mp.mallocing == 0 { + gothrow("bad malloc") + } + mp.mallocing = 0 + if mp.curg != nil { + mp.curg.stackguard0 = mp.curg.stack.lo + _StackGuard + } + // Note: one releasem for the acquirem just above. + // The other for the acquirem at start of malloc. + releasem(mp) + releasem(mp) + } + + if debug.allocfreetrace != 0 { + tracealloc(x, size, typ) + } + + if rate := MemProfileRate; rate > 0 { + if size < uintptr(rate) && int32(size) < c.next_sample { + c.next_sample -= int32(size) + } else { + mp := acquirem() + profilealloc(mp, x, size) + releasem(mp) + } + } + + if memstats.heap_alloc >= memstats.next_gc { + gogc(0) + } + + return x +} + +// implementation of new builtin +func newobject(typ *_type) unsafe.Pointer { + flags := uint32(0) + if typ.kind&kindNoPointers != 0 { + flags |= flagNoScan + } + return mallocgc(uintptr(typ.size), typ, flags) +} + +// implementation of make builtin for slices +func newarray(typ *_type, n uintptr) unsafe.Pointer { + flags := uint32(0) + if typ.kind&kindNoPointers != 0 { + flags |= flagNoScan + } + if int(n) < 0 || (typ.size > 0 && n > maxmem/uintptr(typ.size)) { + panic("runtime: allocation size out of range") + } + return mallocgc(uintptr(typ.size)*n, typ, flags) +} + +// rawmem returns a chunk of pointerless memory. It is +// not zeroed. +func rawmem(size uintptr) unsafe.Pointer { + return mallocgc(size, nil, flagNoScan|flagNoZero) +} + +// round size up to next size class +func goroundupsize(size uintptr) uintptr { + if size < maxSmallSize { + if size <= 1024-8 { + return uintptr(class_to_size[size_to_class8[(size+7)>>3]]) + } + return uintptr(class_to_size[size_to_class128[(size-1024+127)>>7]]) + } + if size+pageSize < size { + return size + } + return (size + pageSize - 1) &^ pageMask +} + +func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { + c := mp.mcache + rate := MemProfileRate + if size < uintptr(rate) { + // pick next profile time + // If you change this, also change allocmcache. + if rate > 0x3fffffff { // make 2*rate not overflow + rate = 0x3fffffff + } + next := int32(fastrand1()) % (2 * int32(rate)) + // Subtract the "remainder" of the current allocation. + // Otherwise objects that are close in size to sampling rate + // will be under-sampled, because we consistently discard this remainder. + next -= (int32(size) - c.next_sample) + if next < 0 { + next = 0 + } + c.next_sample = next + } + + mProf_Malloc(x, size) +} + +// force = 1 - do GC regardless of current heap usage +// force = 2 - go GC and eager sweep +func gogc(force int32) { + // The gc is turned off (via enablegc) until the bootstrap has completed. + // Also, malloc gets called in the guts of a number of libraries that might be + // holding locks. To avoid deadlocks during stoptheworld, don't bother + // trying to run gc while holding a lock. The next mallocgc without a lock + // will do the gc instead. + mp := acquirem() + if gp := getg(); gp == mp.g0 || mp.locks > 1 || !memstats.enablegc || panicking != 0 || gcpercent < 0 { + releasem(mp) + return + } + releasem(mp) + mp = nil + + semacquire(&worldsema, false) + + if force == 0 && memstats.heap_alloc < memstats.next_gc { + // typically threads which lost the race to grab + // worldsema exit here when gc is done. + semrelease(&worldsema) + return + } + + // Ok, we're doing it! Stop everybody else + startTime := nanotime() + mp = acquirem() + mp.gcing = 1 + releasem(mp) + onM(stoptheworld) + if mp != acquirem() { + gothrow("gogc: rescheduled") + } + + clearpools() + + // Run gc on the g0 stack. We do this so that the g stack + // we're currently running on will no longer change. Cuts + // the root set down a bit (g0 stacks are not scanned, and + // we don't need to scan gc's internal state). We also + // need to switch to g0 so we can shrink the stack. + n := 1 + if debug.gctrace > 1 { + n = 2 + } + for i := 0; i < n; i++ { + if i > 0 { + startTime = nanotime() + } + // switch to g0, call gc, then switch back + mp.scalararg[0] = uintptr(uint32(startTime)) // low 32 bits + mp.scalararg[1] = uintptr(startTime >> 32) // high 32 bits + if force >= 2 { + mp.scalararg[2] = 1 // eagersweep + } else { + mp.scalararg[2] = 0 + } + onM(gc_m) + } + + // all done + mp.gcing = 0 + semrelease(&worldsema) + onM(starttheworld) + releasem(mp) + mp = nil + + // now that gc is done, kick off finalizer thread if needed + if !concurrentSweep { + // give the queued finalizers, if any, a chance to run + Gosched() + } +} + +// GC runs a garbage collection. +func GC() { + gogc(2) +} + +// linker-provided +var noptrdata struct{} +var enoptrdata struct{} +var noptrbss struct{} +var enoptrbss struct{} + +// SetFinalizer sets the finalizer associated with x to f. +// When the garbage collector finds an unreachable block +// with an associated finalizer, it clears the association and runs +// f(x) in a separate goroutine. This makes x reachable again, but +// now without an associated finalizer. Assuming that SetFinalizer +// is not called again, the next time the garbage collector sees +// that x is unreachable, it will free x. +// +// SetFinalizer(x, nil) clears any finalizer associated with x. +// +// The argument x must be a pointer to an object allocated by +// calling new or by taking the address of a composite literal. +// The argument f must be a function that takes a single argument +// to which x's type can be assigned, and can have arbitrary ignored return +// values. If either of these is not true, SetFinalizer aborts the +// program. +// +// Finalizers are run in dependency order: if A points at B, both have +// finalizers, and they are otherwise unreachable, only the finalizer +// for A runs; once A is freed, the finalizer for B can run. +// If a cyclic structure includes a block with a finalizer, that +// cycle is not guaranteed to be garbage collected and the finalizer +// is not guaranteed to run, because there is no ordering that +// respects the dependencies. +// +// The finalizer for x is scheduled to run at some arbitrary time after +// x becomes unreachable. +// There is no guarantee that finalizers will run before a program exits, +// so typically they are useful only for releasing non-memory resources +// associated with an object during a long-running program. +// For example, an os.File object could use a finalizer to close the +// associated operating system file descriptor when a program discards +// an os.File without calling Close, but it would be a mistake +// to depend on a finalizer to flush an in-memory I/O buffer such as a +// bufio.Writer, because the buffer would not be flushed at program exit. +// +// It is not guaranteed that a finalizer will run if the size of *x is +// zero bytes. +// +// It is not guaranteed that a finalizer will run for objects allocated +// in initializers for package-level variables. Such objects may be +// linker-allocated, not heap-allocated. +// +// A single goroutine runs all finalizers for a program, sequentially. +// If a finalizer must run for a long time, it should do so by starting +// a new goroutine. +func SetFinalizer(obj interface{}, finalizer interface{}) { + e := (*eface)(unsafe.Pointer(&obj)) + etyp := e._type + if etyp == nil { + gothrow("runtime.SetFinalizer: first argument is nil") + } + if etyp.kind&kindMask != kindPtr { + gothrow("runtime.SetFinalizer: first argument is " + *etyp._string + ", not pointer") + } + ot := (*ptrtype)(unsafe.Pointer(etyp)) + if ot.elem == nil { + gothrow("nil elem type!") + } + + // find the containing object + _, base, _ := findObject(e.data) + + if base == nil { + // 0-length objects are okay. + if e.data == unsafe.Pointer(&zerobase) { + return + } + + // Global initializers might be linker-allocated. + // var Foo = &Object{} + // func main() { + // runtime.SetFinalizer(Foo, nil) + // } + // The relevant segments are: noptrdata, data, bss, noptrbss. + // We cannot assume they are in any order or even contiguous, + // due to external linking. + if uintptr(unsafe.Pointer(&noptrdata)) <= uintptr(e.data) && uintptr(e.data) < uintptr(unsafe.Pointer(&enoptrdata)) || + uintptr(unsafe.Pointer(&data)) <= uintptr(e.data) && uintptr(e.data) < uintptr(unsafe.Pointer(&edata)) || + uintptr(unsafe.Pointer(&bss)) <= uintptr(e.data) && uintptr(e.data) < uintptr(unsafe.Pointer(&ebss)) || + uintptr(unsafe.Pointer(&noptrbss)) <= uintptr(e.data) && uintptr(e.data) < uintptr(unsafe.Pointer(&enoptrbss)) { + return + } + gothrow("runtime.SetFinalizer: pointer not in allocated block") + } + + if e.data != base { + // As an implementation detail we allow to set finalizers for an inner byte + // of an object if it could come from tiny alloc (see mallocgc for details). + if ot.elem == nil || ot.elem.kind&kindNoPointers == 0 || ot.elem.size >= maxTinySize { + gothrow("runtime.SetFinalizer: pointer not at beginning of allocated block") + } + } + + f := (*eface)(unsafe.Pointer(&finalizer)) + ftyp := f._type + if ftyp == nil { + // switch to M stack and remove finalizer + mp := acquirem() + mp.ptrarg[0] = e.data + onM(removeFinalizer_m) + releasem(mp) + return + } + + if ftyp.kind&kindMask != kindFunc { + gothrow("runtime.SetFinalizer: second argument is " + *ftyp._string + ", not a function") + } + ft := (*functype)(unsafe.Pointer(ftyp)) + ins := *(*[]*_type)(unsafe.Pointer(&ft.in)) + if ft.dotdotdot || len(ins) != 1 { + gothrow("runtime.SetFinalizer: cannot pass " + *etyp._string + " to finalizer " + *ftyp._string) + } + fint := ins[0] + switch { + case fint == etyp: + // ok - same type + goto okarg + case fint.kind&kindMask == kindPtr: + if (fint.x == nil || fint.x.name == nil || etyp.x == nil || etyp.x.name == nil) && (*ptrtype)(unsafe.Pointer(fint)).elem == ot.elem { + // ok - not same type, but both pointers, + // one or the other is unnamed, and same element type, so assignable. + goto okarg + } + case fint.kind&kindMask == kindInterface: + ityp := (*interfacetype)(unsafe.Pointer(fint)) + if len(ityp.mhdr) == 0 { + // ok - satisfies empty interface + goto okarg + } + if _, ok := assertE2I2(ityp, obj); ok { + goto okarg + } + } + gothrow("runtime.SetFinalizer: cannot pass " + *etyp._string + " to finalizer " + *ftyp._string) +okarg: + // compute size needed for return parameters + nret := uintptr(0) + for _, t := range *(*[]*_type)(unsafe.Pointer(&ft.out)) { + nret = round(nret, uintptr(t.align)) + uintptr(t.size) + } + nret = round(nret, ptrSize) + + // make sure we have a finalizer goroutine + createfing() + + // switch to M stack to add finalizer record + mp := acquirem() + mp.ptrarg[0] = f.data + mp.ptrarg[1] = e.data + mp.scalararg[0] = nret + mp.ptrarg[2] = unsafe.Pointer(fint) + mp.ptrarg[3] = unsafe.Pointer(ot) + onM(setFinalizer_m) + if mp.scalararg[0] != 1 { + gothrow("runtime.SetFinalizer: finalizer already set") + } + releasem(mp) +} + +// round n up to a multiple of a. a must be a power of 2. +func round(n, a uintptr) uintptr { + return (n + a - 1) &^ (a - 1) +} + +// Look up pointer v in heap. Return the span containing the object, +// the start of the object, and the size of the object. If the object +// does not exist, return nil, nil, 0. +func findObject(v unsafe.Pointer) (s *mspan, x unsafe.Pointer, n uintptr) { + c := gomcache() + c.local_nlookup++ + if ptrSize == 4 && c.local_nlookup >= 1<<30 { + // purge cache stats to prevent overflow + lock(&mheap_.lock) + purgecachedstats(c) + unlock(&mheap_.lock) + } + + // find span + arena_start := uintptr(unsafe.Pointer(mheap_.arena_start)) + arena_used := uintptr(unsafe.Pointer(mheap_.arena_used)) + if uintptr(v) < arena_start || uintptr(v) >= arena_used { + return + } + p := uintptr(v) >> pageShift + q := p - arena_start>>pageShift + s = *(**mspan)(add(unsafe.Pointer(mheap_.spans), q*ptrSize)) + if s == nil { + return + } + x = unsafe.Pointer(uintptr(s.start) << pageShift) + + if uintptr(v) < uintptr(x) || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != mSpanInUse { + s = nil + x = nil + return + } + + n = uintptr(s.elemsize) + if s.sizeclass != 0 { + x = add(x, (uintptr(v)-uintptr(x))/n*n) + } + return +} + +var fingCreate uint32 + +func createfing() { + // start the finalizer goroutine exactly once + if fingCreate == 0 && cas(&fingCreate, 0, 1) { + go runfinq() + } +} + +// This is the goroutine that runs all of the finalizers +func runfinq() { + var ( + frame unsafe.Pointer + framecap uintptr + ) + + for { + lock(&finlock) + fb := finq + finq = nil + if fb == nil { + gp := getg() + fing = gp + fingwait = true + gp.issystem = true + goparkunlock(&finlock, "finalizer wait") + gp.issystem = false + continue + } + unlock(&finlock) + if raceenabled { + racefingo() + } + for fb != nil { + for i := int32(0); i < fb.cnt; i++ { + f := (*finalizer)(add(unsafe.Pointer(&fb.fin), uintptr(i)*unsafe.Sizeof(finalizer{}))) + + framesz := unsafe.Sizeof((interface{})(nil)) + uintptr(f.nret) + if framecap < framesz { + // The frame does not contain pointers interesting for GC, + // all not yet finalized objects are stored in finq. + // If we do not mark it as FlagNoScan, + // the last finalized object is not collected. + frame = mallocgc(framesz, nil, flagNoScan) + framecap = framesz + } + + if f.fint == nil { + gothrow("missing type in runfinq") + } + switch f.fint.kind & kindMask { + case kindPtr: + // direct use of pointer + *(*unsafe.Pointer)(frame) = f.arg + case kindInterface: + ityp := (*interfacetype)(unsafe.Pointer(f.fint)) + // set up with empty interface + (*eface)(frame)._type = &f.ot.typ + (*eface)(frame).data = f.arg + if len(ityp.mhdr) != 0 { + // convert to interface with methods + // this conversion is guaranteed to succeed - we checked in SetFinalizer + *(*fInterface)(frame) = assertE2I(ityp, *(*interface{})(frame)) + } + default: + gothrow("bad kind in runfinq") + } + reflectcall(unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz)) + + // drop finalizer queue references to finalized object + f.fn = nil + f.arg = nil + f.ot = nil + } + fb.cnt = 0 + next := fb.next + lock(&finlock) + fb.next = finc + finc = fb + unlock(&finlock) + fb = next + } + } +} + +var persistent struct { + lock mutex + pos unsafe.Pointer + end unsafe.Pointer +} + +// Wrapper around sysAlloc that can allocate small chunks. +// There is no associated free operation. +// Intended for things like function/type/debug-related persistent data. +// If align is 0, uses default align (currently 8). +func persistentalloc(size, align uintptr, stat *uint64) unsafe.Pointer { + const ( + chunk = 256 << 10 + maxBlock = 64 << 10 // VM reservation granularity is 64K on windows + ) + + if align != 0 { + if align&(align-1) != 0 { + gothrow("persistentalloc: align is not a power of 2") + } + if align > _PageSize { + gothrow("persistentalloc: align is too large") + } + } else { + align = 8 + } + + if size >= maxBlock { + return sysAlloc(size, stat) + } + + lock(&persistent.lock) + persistent.pos = roundup(persistent.pos, align) + if uintptr(persistent.pos)+size > uintptr(persistent.end) { + persistent.pos = sysAlloc(chunk, &memstats.other_sys) + if persistent.pos == nil { + unlock(&persistent.lock) + gothrow("runtime: cannot allocate memory") + } + persistent.end = add(persistent.pos, chunk) + } + p := persistent.pos + persistent.pos = add(persistent.pos, size) + unlock(&persistent.lock) + + if stat != &memstats.other_sys { + xadd64(stat, int64(size)) + xadd64(&memstats.other_sys, -int64(size)) + } + return p +} diff --git a/libgo/go/runtime/malloc1.go b/libgo/go/runtime/malloc1.go deleted file mode 100644 index da92f4c2fbf..00000000000 --- a/libgo/go/runtime/malloc1.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// trivial malloc test - -package main - -import ( - "flag" - "fmt" - "runtime" -) - -var chatty = flag.Bool("v", false, "chatty") - -func main() { - memstats := new(runtime.MemStats) - runtime.Free(runtime.Alloc(1)) - runtime.ReadMemStats(memstats) - if *chatty { - fmt.Printf("%+v %v\n", memstats, uint64(0)) - } -} diff --git a/libgo/go/runtime/mallocrand.go b/libgo/go/runtime/mallocrand.go deleted file mode 100644 index f1bcb89cfa4..00000000000 --- a/libgo/go/runtime/mallocrand.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Random malloc test. - -package main - -import ( - "flag" - "math/rand" - "runtime" - "unsafe" -) - -var chatty = flag.Bool("v", false, "chatty") - -var footprint uint64 -var allocated uint64 - -func bigger() { - memstats := new(runtime.MemStats) - runtime.ReadMemStats(memstats) - if f := memstats.Sys; footprint < f { - footprint = f - if *chatty { - println("Footprint", footprint, " for ", allocated) - } - if footprint > 1e9 { - println("too big") - panic("fail") - } - } -} - -// Prime the data structures by allocating one of -// each block in order. After this, there should be -// little reason to ask for more memory from the OS. -func prime() { - for i := 0; i < 16; i++ { - b := runtime.Alloc(1 << uint(i)) - runtime.Free(b) - } - for i := uintptr(0); i < 256; i++ { - b := runtime.Alloc(i << 12) - runtime.Free(b) - } -} - -func memset(b *byte, c byte, n uintptr) { - np := uintptr(n) - for i := uintptr(0); i < np; i++ { - *(*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(b)) + i)) = c - } -} - -func main() { - flag.Parse() - // prime() - var blocks [1]struct { - base *byte - siz uintptr - } - for i := 0; i < 1<<10; i++ { - if i%(1<<10) == 0 && *chatty { - println(i) - } - b := rand.Int() % len(blocks) - if blocks[b].base != nil { - // println("Free", blocks[b].siz, blocks[b].base) - runtime.Free(blocks[b].base) - blocks[b].base = nil - allocated -= uint64(blocks[b].siz) - continue - } - siz := uintptr(rand.Int() >> (11 + rand.Uint32()%20)) - base := runtime.Alloc(siz) - // ptr := uintptr(syscall.BytePtr(base))+uintptr(siz/2) - // obj, size, ref, ok := allocator.find(ptr) - // if obj != base || *ref != 0 || !ok { - // println("find", siz, obj, ref, ok) - // panic("fail") - // } - blocks[b].base = base - blocks[b].siz = siz - allocated += uint64(siz) - // println("Alloc", siz, base) - memset(base, 0xbb, siz) - bigger() - } -} diff --git a/libgo/go/runtime/mallocrep.go b/libgo/go/runtime/mallocrep.go deleted file mode 100644 index 03ee71edb42..00000000000 --- a/libgo/go/runtime/mallocrep.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Repeated malloc test. - -// +build ignore - -package main - -import ( - "flag" - "runtime" -) - -var chatty = flag.Bool("v", false, "chatty") - -var oldsys uint64 -var memstats runtime.MemStats - -func bigger() { - st := &memstats - runtime.ReadMemStats(st) - if oldsys < st.Sys { - oldsys = st.Sys - if *chatty { - println(st.Sys, " system bytes for ", st.Alloc, " Go bytes") - } - if st.Sys > 1e9 { - println("too big") - panic("fail") - } - } -} - -func main() { - runtime.GC() // clean up garbage from init - runtime.ReadMemStats(&memstats) // first call can do some allocations - runtime.MemProfileRate = 0 // disable profiler - stacks := memstats.Alloc // ignore stacks - flag.Parse() - for i := 0; i < 1<<7; i++ { - for j := 1; j <= 1<<22; j <<= 1 { - if i == 0 && *chatty { - println("First alloc:", j) - } - if a := memstats.Alloc - stacks; a != 0 { - println("no allocations but stats report", a, "bytes allocated") - panic("fail") - } - b := runtime.Alloc(uintptr(j)) - runtime.ReadMemStats(&memstats) - during := memstats.Alloc - stacks - runtime.Free(b) - runtime.ReadMemStats(&memstats) - if a := memstats.Alloc - stacks; a != 0 { - println("allocated ", j, ": wrong stats: during=", during, " after=", a, " (want 0)") - panic("fail") - } - bigger() - } - if i%(1<<10) == 0 && *chatty { - println(i) - } - if i == 0 { - if *chatty { - println("Primed", i) - } - // runtime.frozen = true - } - } -} diff --git a/libgo/go/runtime/mallocrep1.go b/libgo/go/runtime/mallocrep1.go deleted file mode 100644 index bc33e3a6b4b..00000000000 --- a/libgo/go/runtime/mallocrep1.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Repeated malloc test. - -package main - -import ( - "flag" - "fmt" - "runtime" - "strconv" -) - -var chatty = flag.Bool("v", false, "chatty") -var reverse = flag.Bool("r", false, "reverse") -var longtest = flag.Bool("l", false, "long test") - -var b []*byte -var stats = new(runtime.MemStats) - -func OkAmount(size, n uintptr) bool { - if n < size { - return false - } - if size < 16*8 { - if n > size+16 { - return false - } - } else { - if n > size*9/8 { - return false - } - } - return true -} - -func AllocAndFree(size, count int) { - defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1)) - if *chatty { - fmt.Printf("size=%d count=%d ...\n", size, count) - } - runtime.ReadMemStats(stats) - n1 := stats.Alloc - for i := 0; i < count; i++ { - b[i] = runtime.Alloc(uintptr(size)) - base, n := runtime.Lookup(b[i]) - if base != b[i] || !OkAmount(uintptr(size), n) { - println("lookup failed: got", base, n, "for", b[i]) - panic("fail") - } - runtime.ReadMemStats(stats) - if stats.Sys > 1e9 { - println("too much memory allocated") - panic("fail") - } - } - runtime.ReadMemStats(stats) - n2 := stats.Alloc - if *chatty { - fmt.Printf("size=%d count=%d stats=%+v\n", size, count, *stats) - } - n3 := stats.Alloc - for j := 0; j < count; j++ { - i := j - if *reverse { - i = count - 1 - j - } - alloc := uintptr(stats.Alloc) - base, n := runtime.Lookup(b[i]) - if base != b[i] || !OkAmount(uintptr(size), n) { - println("lookup failed: got", base, n, "for", b[i]) - panic("fail") - } - runtime.Free(b[i]) - runtime.ReadMemStats(stats) - if stats.Alloc != uint64(alloc-n) { - println("free alloc got", stats.Alloc, "expected", alloc-n, "after free of", n) - panic("fail") - } - if stats.Sys > 1e9 { - println("too much memory allocated") - panic("fail") - } - } - runtime.ReadMemStats(stats) - n4 := stats.Alloc - - if *chatty { - fmt.Printf("size=%d count=%d stats=%+v\n", size, count, *stats) - } - if n2-n1 != n3-n4 { - println("wrong alloc count: ", n2-n1, n3-n4) - panic("fail") - } -} - -func atoi(s string) int { - i, _ := strconv.Atoi(s) - return i -} - -func main() { - runtime.MemProfileRate = 0 // disable profiler - flag.Parse() - b = make([]*byte, 10000) - if flag.NArg() > 0 { - AllocAndFree(atoi(flag.Arg(0)), atoi(flag.Arg(1))) - return - } - maxb := 1 << 22 - if !*longtest { - maxb = 1 << 19 - } - for j := 1; j <= maxb; j <<= 1 { - n := len(b) - max := uintptr(1 << 28) - if !*longtest { - max = uintptr(maxb) - } - if uintptr(j)*uintptr(n) > max { - n = int(max / uintptr(j)) - } - if n < 10 { - n = 10 - } - for m := 1; m <= n; { - AllocAndFree(j, m) - if m == n { - break - } - m = 5 * m / 4 - if m < 4 { - m++ - } - if m > n { - m = n - } - } - } -} diff --git a/libgo/go/runtime/map_test.go b/libgo/go/runtime/map_test.go index d9690253582..7e4da902e7f 100644 --- a/libgo/go/runtime/map_test.go +++ b/libgo/go/runtime/map_test.go @@ -260,7 +260,7 @@ func testConcurrentReadsAfterGrowth(t *testing.T, useReflect bool) { for nr := 0; nr < numReader; nr++ { go func() { defer wg.Done() - for _ = range m { + for range m { } }() go func() { @@ -423,30 +423,72 @@ func TestMapIterOrder(t *testing.T) { } for _, n := range [...]int{3, 7, 9, 15} { - // Make m be {0: true, 1: true, ..., n-1: true}. + for i := 0; i < 1000; i++ { + // Make m be {0: true, 1: true, ..., n-1: true}. + m := make(map[int]bool) + for i := 0; i < n; i++ { + m[i] = true + } + // Check that iterating over the map produces at least two different orderings. + ord := func() []int { + var s []int + for key := range m { + s = append(s, key) + } + return s + } + first := ord() + ok := false + for try := 0; try < 100; try++ { + if !reflect.DeepEqual(first, ord()) { + ok = true + break + } + } + if !ok { + t.Errorf("Map with n=%d elements had consistent iteration order: %v", n, first) + break + } + } + } +} + +// Issue 8410 +func TestMapSparseIterOrder(t *testing.T) { + // Run several rounds to increase the probability + // of failure. One is not enough. + if runtime.Compiler == "gccgo" { + t.Skip("skipping for gccgo") + } +NextRound: + for round := 0; round < 10; round++ { m := make(map[int]bool) - for i := 0; i < n; i++ { + // Add 1000 items, remove 980. + for i := 0; i < 1000; i++ { m[i] = true } - // Check that iterating over the map produces at least two different orderings. - ord := func() []int { - var s []int - for key := range m { - s = append(s, key) - } - return s + for i := 20; i < 1000; i++ { + delete(m, i) } - first := ord() - ok := false - for try := 0; try < 100; try++ { - if !reflect.DeepEqual(first, ord()) { - ok = true - break - } + + var first []int + for i := range m { + first = append(first, i) } - if !ok { - t.Errorf("Map with n=%d elements had consistent iteration order: %v", n, first) + + // 800 chances to get a different iteration order. + // See bug 8736 for why we need so many tries. + for n := 0; n < 800; n++ { + idx := 0 + for i := range m { + if i != first[idx] { + // iteration order changed. + continue NextRound + } + idx++ + } } + t.Fatalf("constant iteration order on round %d: %v", round, first) } } @@ -489,3 +531,24 @@ func TestMapStringBytesLookup(t *testing.T) { t.Errorf("AllocsPerRun for x,ok = m[string(buf)] = %v, want 0", n) } } + +func benchmarkMapPop(b *testing.B, n int) { + m := map[int]int{} + for i := 0; i < b.N; i++ { + for j := 0; j < n; j++ { + m[j] = j + } + for j := 0; j < n; j++ { + // Use iterator to pop an element. + // We want this to be fast, see issue 8412. + for k := range m { + delete(m, k) + break + } + } + } +} + +func BenchmarkMapPop100(b *testing.B) { benchmarkMapPop(b, 100) } +func BenchmarkMapPop1000(b *testing.B) { benchmarkMapPop(b, 1000) } +func BenchmarkMapPop10000(b *testing.B) { benchmarkMapPop(b, 10000) } diff --git a/libgo/go/runtime/mapspeed_test.go b/libgo/go/runtime/mapspeed_test.go index da45ea11e49..119eb3f39c7 100644 --- a/libgo/go/runtime/mapspeed_test.go +++ b/libgo/go/runtime/mapspeed_test.go @@ -241,7 +241,7 @@ func BenchmarkMapIter(b *testing.B) { } b.ResetTimer() for i := 0; i < b.N; i++ { - for _, _ = range m { + for range m { } } } @@ -250,7 +250,7 @@ func BenchmarkMapIterEmpty(b *testing.B) { m := make(map[int]bool) b.ResetTimer() for i := 0; i < b.N; i++ { - for _, _ = range m { + for range m { } } } diff --git a/libgo/go/runtime/memmove_test.go b/libgo/go/runtime/memmove_test.go index 540f0feb549..ffda4fe6c53 100644 --- a/libgo/go/runtime/memmove_test.go +++ b/libgo/go/runtime/memmove_test.go @@ -162,43 +162,95 @@ func BenchmarkMemclr256(b *testing.B) { bmMemclr(b, 256) } func BenchmarkMemclr4096(b *testing.B) { bmMemclr(b, 4096) } func BenchmarkMemclr65536(b *testing.B) { bmMemclr(b, 65536) } +func BenchmarkClearFat8(b *testing.B) { + for i := 0; i < b.N; i++ { + var x [8 / 4]uint32 + _ = x + } +} +func BenchmarkClearFat12(b *testing.B) { + for i := 0; i < b.N; i++ { + var x [12 / 4]uint32 + _ = x + } +} +func BenchmarkClearFat16(b *testing.B) { + for i := 0; i < b.N; i++ { + var x [16 / 4]uint32 + _ = x + } +} +func BenchmarkClearFat24(b *testing.B) { + for i := 0; i < b.N; i++ { + var x [24 / 4]uint32 + _ = x + } +} func BenchmarkClearFat32(b *testing.B) { for i := 0; i < b.N; i++ { - var x [32]byte + var x [32 / 4]uint32 _ = x } } func BenchmarkClearFat64(b *testing.B) { for i := 0; i < b.N; i++ { - var x [64]byte + var x [64 / 4]uint32 _ = x } } func BenchmarkClearFat128(b *testing.B) { for i := 0; i < b.N; i++ { - var x [128]byte + var x [128 / 4]uint32 _ = x } } func BenchmarkClearFat256(b *testing.B) { for i := 0; i < b.N; i++ { - var x [256]byte + var x [256 / 4]uint32 _ = x } } func BenchmarkClearFat512(b *testing.B) { for i := 0; i < b.N; i++ { - var x [512]byte + var x [512 / 4]uint32 _ = x } } func BenchmarkClearFat1024(b *testing.B) { for i := 0; i < b.N; i++ { - var x [1024]byte + var x [1024 / 4]uint32 _ = x } } +func BenchmarkCopyFat8(b *testing.B) { + var x [8 / 4]uint32 + for i := 0; i < b.N; i++ { + y := x + _ = y + } +} +func BenchmarkCopyFat12(b *testing.B) { + var x [12 / 4]uint32 + for i := 0; i < b.N; i++ { + y := x + _ = y + } +} +func BenchmarkCopyFat16(b *testing.B) { + var x [16 / 4]uint32 + for i := 0; i < b.N; i++ { + y := x + _ = y + } +} +func BenchmarkCopyFat24(b *testing.B) { + var x [24 / 4]uint32 + for i := 0; i < b.N; i++ { + y := x + _ = y + } +} func BenchmarkCopyFat32(b *testing.B) { var x [32 / 4]uint32 for i := 0; i < b.N; i++ { diff --git a/libgo/go/runtime/mfinal_test.go b/libgo/go/runtime/mfinal_test.go index b47f83c3923..c51bfc68819 100644 --- a/libgo/go/runtime/mfinal_test.go +++ b/libgo/go/runtime/mfinal_test.go @@ -44,10 +44,17 @@ func TestFinalizerType(t *testing.T) { {func(x *int) interface{} { return (*Tint)(x) }, func(v Tinter) { finalize((*int)(v.(*Tint))) }}, } - for _, tt := range finalizerTests { + for i, tt := range finalizerTests { done := make(chan bool, 1) go func() { - v := new(int) + // allocate struct with pointer to avoid hitting tinyalloc. + // Otherwise we can't be sure when the allocation will + // be freed. + type T struct { + v int + p unsafe.Pointer + } + v := &new(T).v *v = 97531 runtime.SetFinalizer(tt.convert(v), tt.finalizer) v = nil @@ -58,7 +65,7 @@ func TestFinalizerType(t *testing.T) { select { case <-ch: case <-time.After(time.Second * 4): - t.Errorf("finalizer for type %T didn't run", tt.finalizer) + t.Errorf("#%d: finalizer for type %T didn't run", i, tt.finalizer) } } } diff --git a/libgo/go/runtime/mgc0.go b/libgo/go/runtime/mgc0.go index 624485d18bf..cbf5e9cfdef 100644 --- a/libgo/go/runtime/mgc0.go +++ b/libgo/go/runtime/mgc0.go @@ -4,6 +4,8 @@ package runtime +import "unsafe" + // Called from C. Returns the Go type *m. func gc_m_ptr(ret *interface{}) { *ret = (*m)(nil) @@ -19,9 +21,132 @@ func gc_itab_ptr(ret *interface{}) { *ret = (*itab)(nil) } -func timenow() (sec int64, nsec int32) - func gc_unixnanotime(now *int64) { sec, nsec := timenow() *now = sec*1e9 + int64(nsec) } + +func freeOSMemory() { + gogc(2) // force GC and do eager sweep + onM(scavenge_m) +} + +var poolcleanup func() + +func registerPoolCleanup(f func()) { + poolcleanup = f +} + +func clearpools() { + // clear sync.Pools + if poolcleanup != nil { + poolcleanup() + } + + for _, p := range &allp { + if p == nil { + break + } + // clear tinyalloc pool + if c := p.mcache; c != nil { + c.tiny = nil + c.tinysize = 0 + + // disconnect cached list before dropping it on the floor, + // so that a dangling ref to one entry does not pin all of them. + var sg, sgnext *sudog + for sg = c.sudogcache; sg != nil; sg = sgnext { + sgnext = sg.next + sg.next = nil + } + c.sudogcache = nil + } + + // clear defer pools + for i := range p.deferpool { + // disconnect cached list before dropping it on the floor, + // so that a dangling ref to one entry does not pin all of them. + var d, dlink *_defer + for d = p.deferpool[i]; d != nil; d = dlink { + dlink = d.link + d.link = nil + } + p.deferpool[i] = nil + } + } +} + +func gosweepone() uintptr +func gosweepdone() bool + +func bgsweep() { + getg().issystem = true + for { + for gosweepone() != ^uintptr(0) { + sweep.nbgsweep++ + Gosched() + } + lock(&gclock) + if !gosweepdone() { + // This can happen if a GC runs between + // gosweepone returning ^0 above + // and the lock being acquired. + unlock(&gclock) + continue + } + sweep.parked = true + goparkunlock(&gclock, "GC sweep wait") + } +} + +// NOTE: Really dst *unsafe.Pointer, src unsafe.Pointer, +// but if we do that, Go inserts a write barrier on *dst = src. +//go:nosplit +func writebarrierptr(dst *uintptr, src uintptr) { + *dst = src +} + +//go:nosplit +func writebarrierstring(dst *[2]uintptr, src [2]uintptr) { + dst[0] = src[0] + dst[1] = src[1] +} + +//go:nosplit +func writebarrierslice(dst *[3]uintptr, src [3]uintptr) { + dst[0] = src[0] + dst[1] = src[1] + dst[2] = src[2] +} + +//go:nosplit +func writebarrieriface(dst *[2]uintptr, src [2]uintptr) { + dst[0] = src[0] + dst[1] = src[1] +} + +//go:nosplit +func writebarrierfat2(dst *[2]uintptr, _ *byte, src [2]uintptr) { + dst[0] = src[0] + dst[1] = src[1] +} + +//go:nosplit +func writebarrierfat3(dst *[3]uintptr, _ *byte, src [3]uintptr) { + dst[0] = src[0] + dst[1] = src[1] + dst[2] = src[2] +} + +//go:nosplit +func writebarrierfat4(dst *[4]uintptr, _ *byte, src [4]uintptr) { + dst[0] = src[0] + dst[1] = src[1] + dst[2] = src[2] + dst[3] = src[3] +} + +//go:nosplit +func writebarrierfat(typ *_type, dst, src unsafe.Pointer) { + memmove(dst, src, typ.size) +} diff --git a/libgo/go/runtime/mprof.go b/libgo/go/runtime/mprof.go new file mode 100644 index 00000000000..d409c6c306a --- /dev/null +++ b/libgo/go/runtime/mprof.go @@ -0,0 +1,672 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Malloc profiling. +// Patterned after tcmalloc's algorithms; shorter code. + +package runtime + +import ( + "unsafe" +) + +// NOTE(rsc): Everything here could use cas if contention became an issue. +var proflock mutex + +// All memory allocations are local and do not escape outside of the profiler. +// The profiler is forbidden from referring to garbage-collected memory. + +const ( + // profile types + memProfile bucketType = 1 + iota + blockProfile + + // size of bucket hash table + buckHashSize = 179999 + + // max depth of stack to record in bucket + maxStack = 32 +) + +type bucketType int + +// A bucket holds per-call-stack profiling information. +// The representation is a bit sleazy, inherited from C. +// This struct defines the bucket header. It is followed in +// memory by the stack words and then the actual record +// data, either a memRecord or a blockRecord. +// +// Per-call-stack profiling information. +// Lookup by hashing call stack into a linked-list hash table. +type bucket struct { + next *bucket + allnext *bucket + typ bucketType // memBucket or blockBucket + hash uintptr + size uintptr + nstk uintptr +} + +// A memRecord is the bucket data for a bucket of type memProfile, +// part of the memory profile. +type memRecord struct { + // The following complex 3-stage scheme of stats accumulation + // is required to obtain a consistent picture of mallocs and frees + // for some point in time. + // The problem is that mallocs come in real time, while frees + // come only after a GC during concurrent sweeping. So if we would + // naively count them, we would get a skew toward mallocs. + // + // Mallocs are accounted in recent stats. + // Explicit frees are accounted in recent stats. + // GC frees are accounted in prev stats. + // After GC prev stats are added to final stats and + // recent stats are moved into prev stats. + allocs uintptr + frees uintptr + alloc_bytes uintptr + free_bytes uintptr + + // changes between next-to-last GC and last GC + prev_allocs uintptr + prev_frees uintptr + prev_alloc_bytes uintptr + prev_free_bytes uintptr + + // changes since last GC + recent_allocs uintptr + recent_frees uintptr + recent_alloc_bytes uintptr + recent_free_bytes uintptr +} + +// A blockRecord is the bucket data for a bucket of type blockProfile, +// part of the blocking profile. +type blockRecord struct { + count int64 + cycles int64 +} + +var ( + mbuckets *bucket // memory profile buckets + bbuckets *bucket // blocking profile buckets + buckhash *[179999]*bucket + bucketmem uintptr +) + +// newBucket allocates a bucket with the given type and number of stack entries. +func newBucket(typ bucketType, nstk int) *bucket { + size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0)) + switch typ { + default: + gothrow("invalid profile bucket type") + case memProfile: + size += unsafe.Sizeof(memRecord{}) + case blockProfile: + size += unsafe.Sizeof(blockRecord{}) + } + + b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys)) + bucketmem += size + b.typ = typ + b.nstk = uintptr(nstk) + return b +} + +// stk returns the slice in b holding the stack. +func (b *bucket) stk() []uintptr { + stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b))) + return stk[:b.nstk:b.nstk] +} + +// mp returns the memRecord associated with the memProfile bucket b. +func (b *bucket) mp() *memRecord { + if b.typ != memProfile { + gothrow("bad use of bucket.mp") + } + data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0))) + return (*memRecord)(data) +} + +// bp returns the blockRecord associated with the blockProfile bucket b. +func (b *bucket) bp() *blockRecord { + if b.typ != blockProfile { + gothrow("bad use of bucket.bp") + } + data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0))) + return (*blockRecord)(data) +} + +// Return the bucket for stk[0:nstk], allocating new bucket if needed. +func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket { + if buckhash == nil { + buckhash = (*[buckHashSize]*bucket)(sysAlloc(unsafe.Sizeof(*buckhash), &memstats.buckhash_sys)) + if buckhash == nil { + gothrow("runtime: cannot allocate memory") + } + } + + // Hash stack. + var h uintptr + for _, pc := range stk { + h += pc + h += h << 10 + h ^= h >> 6 + } + // hash in size + h += size + h += h << 10 + h ^= h >> 6 + // finalize + h += h << 3 + h ^= h >> 11 + + i := int(h % buckHashSize) + for b := buckhash[i]; b != nil; b = b.next { + if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) { + return b + } + } + + if !alloc { + return nil + } + + // Create new bucket. + b := newBucket(typ, len(stk)) + copy(b.stk(), stk) + b.hash = h + b.size = size + b.next = buckhash[i] + buckhash[i] = b + if typ == memProfile { + b.allnext = mbuckets + mbuckets = b + } else { + b.allnext = bbuckets + bbuckets = b + } + return b +} + +func sysAlloc(n uintptr, stat *uint64) unsafe.Pointer + +func eqslice(x, y []uintptr) bool { + if len(x) != len(y) { + return false + } + for i, xi := range x { + if xi != y[i] { + return false + } + } + return true +} + +func mprof_GC() { + for b := mbuckets; b != nil; b = b.allnext { + mp := b.mp() + mp.allocs += mp.prev_allocs + mp.frees += mp.prev_frees + mp.alloc_bytes += mp.prev_alloc_bytes + mp.free_bytes += mp.prev_free_bytes + + mp.prev_allocs = mp.recent_allocs + mp.prev_frees = mp.recent_frees + mp.prev_alloc_bytes = mp.recent_alloc_bytes + mp.prev_free_bytes = mp.recent_free_bytes + + mp.recent_allocs = 0 + mp.recent_frees = 0 + mp.recent_alloc_bytes = 0 + mp.recent_free_bytes = 0 + } +} + +// Record that a gc just happened: all the 'recent' statistics are now real. +func mProf_GC() { + lock(&proflock) + mprof_GC() + unlock(&proflock) +} + +// Called by malloc to record a profiled block. +func mProf_Malloc(p unsafe.Pointer, size uintptr) { + var stk [maxStack]uintptr + nstk := callers(4, &stk[0], len(stk)) + lock(&proflock) + b := stkbucket(memProfile, size, stk[:nstk], true) + mp := b.mp() + mp.recent_allocs++ + mp.recent_alloc_bytes += size + unlock(&proflock) + + // Setprofilebucket locks a bunch of other mutexes, so we call it outside of proflock. + // This reduces potential contention and chances of deadlocks. + // Since the object must be alive during call to mProf_Malloc, + // it's fine to do this non-atomically. + setprofilebucket(p, b) +} + +func setprofilebucket_m() // mheap.c + +func setprofilebucket(p unsafe.Pointer, b *bucket) { + g := getg() + g.m.ptrarg[0] = p + g.m.ptrarg[1] = unsafe.Pointer(b) + onM(setprofilebucket_m) +} + +// Called when freeing a profiled block. +func mProf_Free(b *bucket, size uintptr, freed bool) { + lock(&proflock) + mp := b.mp() + if freed { + mp.recent_frees++ + mp.recent_free_bytes += size + } else { + mp.prev_frees++ + mp.prev_free_bytes += size + } + unlock(&proflock) +} + +var blockprofilerate uint64 // in CPU ticks + +// SetBlockProfileRate controls the fraction of goroutine blocking events +// that are reported in the blocking profile. The profiler aims to sample +// an average of one blocking event per rate nanoseconds spent blocked. +// +// To include every blocking event in the profile, pass rate = 1. +// To turn off profiling entirely, pass rate <= 0. +func SetBlockProfileRate(rate int) { + var r int64 + if rate <= 0 { + r = 0 // disable profiling + } else if rate == 1 { + r = 1 // profile everything + } else { + // convert ns to cycles, use float64 to prevent overflow during multiplication + r = int64(float64(rate) * float64(tickspersecond()) / (1000 * 1000 * 1000)) + if r == 0 { + r = 1 + } + } + + atomicstore64(&blockprofilerate, uint64(r)) +} + +func blockevent(cycles int64, skip int) { + if cycles <= 0 { + cycles = 1 + } + rate := int64(atomicload64(&blockprofilerate)) + if rate <= 0 || (rate > cycles && int64(fastrand1())%rate > cycles) { + return + } + gp := getg() + var nstk int + var stk [maxStack]uintptr + if gp.m.curg == nil || gp.m.curg == gp { + nstk = callers(skip, &stk[0], len(stk)) + } else { + nstk = gcallers(gp.m.curg, skip, &stk[0], len(stk)) + } + lock(&proflock) + b := stkbucket(blockProfile, 0, stk[:nstk], true) + b.bp().count++ + b.bp().cycles += cycles + unlock(&proflock) +} + +// Go interface to profile data. + +// A StackRecord describes a single execution stack. +type StackRecord struct { + Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry +} + +// Stack returns the stack trace associated with the record, +// a prefix of r.Stack0. +func (r *StackRecord) Stack() []uintptr { + for i, v := range r.Stack0 { + if v == 0 { + return r.Stack0[0:i] + } + } + return r.Stack0[0:] +} + +// MemProfileRate controls the fraction of memory allocations +// that are recorded and reported in the memory profile. +// The profiler aims to sample an average of +// one allocation per MemProfileRate bytes allocated. +// +// To include every allocated block in the profile, set MemProfileRate to 1. +// To turn off profiling entirely, set MemProfileRate to 0. +// +// The tools that process the memory profiles assume that the +// profile rate is constant across the lifetime of the program +// and equal to the current value. Programs that change the +// memory profiling rate should do so just once, as early as +// possible in the execution of the program (for example, +// at the beginning of main). +var MemProfileRate int = 512 * 1024 + +// A MemProfileRecord describes the live objects allocated +// by a particular call sequence (stack trace). +type MemProfileRecord struct { + AllocBytes, FreeBytes int64 // number of bytes allocated, freed + AllocObjects, FreeObjects int64 // number of objects allocated, freed + Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry +} + +// InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes). +func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes } + +// InUseObjects returns the number of objects in use (AllocObjects - FreeObjects). +func (r *MemProfileRecord) InUseObjects() int64 { + return r.AllocObjects - r.FreeObjects +} + +// Stack returns the stack trace associated with the record, +// a prefix of r.Stack0. +func (r *MemProfileRecord) Stack() []uintptr { + for i, v := range r.Stack0 { + if v == 0 { + return r.Stack0[0:i] + } + } + return r.Stack0[0:] +} + +// MemProfile returns n, the number of records in the current memory profile. +// If len(p) >= n, MemProfile copies the profile into p and returns n, true. +// If len(p) < n, MemProfile does not change p and returns n, false. +// +// If inuseZero is true, the profile includes allocation records +// where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes. +// These are sites where memory was allocated, but it has all +// been released back to the runtime. +// +// Most clients should use the runtime/pprof package or +// the testing package's -test.memprofile flag instead +// of calling MemProfile directly. +func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) { + lock(&proflock) + clear := true + for b := mbuckets; b != nil; b = b.allnext { + mp := b.mp() + if inuseZero || mp.alloc_bytes != mp.free_bytes { + n++ + } + if mp.allocs != 0 || mp.frees != 0 { + clear = false + } + } + if clear { + // Absolutely no data, suggesting that a garbage collection + // has not yet happened. In order to allow profiling when + // garbage collection is disabled from the beginning of execution, + // accumulate stats as if a GC just happened, and recount buckets. + mprof_GC() + mprof_GC() + n = 0 + for b := mbuckets; b != nil; b = b.allnext { + mp := b.mp() + if inuseZero || mp.alloc_bytes != mp.free_bytes { + n++ + } + } + } + if n <= len(p) { + ok = true + idx := 0 + for b := mbuckets; b != nil; b = b.allnext { + mp := b.mp() + if inuseZero || mp.alloc_bytes != mp.free_bytes { + record(&p[idx], b) + idx++ + } + } + } + unlock(&proflock) + return +} + +// Write b's data to r. +func record(r *MemProfileRecord, b *bucket) { + mp := b.mp() + r.AllocBytes = int64(mp.alloc_bytes) + r.FreeBytes = int64(mp.free_bytes) + r.AllocObjects = int64(mp.allocs) + r.FreeObjects = int64(mp.frees) + copy(r.Stack0[:], b.stk()) + for i := int(b.nstk); i < len(r.Stack0); i++ { + r.Stack0[i] = 0 + } +} + +func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) { + lock(&proflock) + for b := mbuckets; b != nil; b = b.allnext { + mp := b.mp() + fn(b, uintptr(b.nstk), &b.stk()[0], b.size, mp.allocs, mp.frees) + } + unlock(&proflock) +} + +// BlockProfileRecord describes blocking events originated +// at a particular call sequence (stack trace). +type BlockProfileRecord struct { + Count int64 + Cycles int64 + StackRecord +} + +// BlockProfile returns n, the number of records in the current blocking profile. +// If len(p) >= n, BlockProfile copies the profile into p and returns n, true. +// If len(p) < n, BlockProfile does not change p and returns n, false. +// +// Most clients should use the runtime/pprof package or +// the testing package's -test.blockprofile flag instead +// of calling BlockProfile directly. +func BlockProfile(p []BlockProfileRecord) (n int, ok bool) { + lock(&proflock) + for b := bbuckets; b != nil; b = b.allnext { + n++ + } + if n <= len(p) { + ok = true + for b := bbuckets; b != nil; b = b.allnext { + bp := b.bp() + r := &p[0] + r.Count = int64(bp.count) + r.Cycles = int64(bp.cycles) + i := copy(r.Stack0[:], b.stk()) + for ; i < len(r.Stack0); i++ { + r.Stack0[i] = 0 + } + p = p[1:] + } + } + unlock(&proflock) + return +} + +// ThreadCreateProfile returns n, the number of records in the thread creation profile. +// If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true. +// If len(p) < n, ThreadCreateProfile does not change p and returns n, false. +// +// Most clients should use the runtime/pprof package instead +// of calling ThreadCreateProfile directly. +func ThreadCreateProfile(p []StackRecord) (n int, ok bool) { + first := (*m)(atomicloadp(unsafe.Pointer(&allm))) + for mp := first; mp != nil; mp = mp.alllink { + n++ + } + if n <= len(p) { + ok = true + i := 0 + for mp := first; mp != nil; mp = mp.alllink { + for s := range mp.createstack { + p[i].Stack0[s] = uintptr(mp.createstack[s]) + } + i++ + } + } + return +} + +var allgs []*g // proc.c + +// GoroutineProfile returns n, the number of records in the active goroutine stack profile. +// If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true. +// If len(p) < n, GoroutineProfile does not change p and returns n, false. +// +// Most clients should use the runtime/pprof package instead +// of calling GoroutineProfile directly. +func GoroutineProfile(p []StackRecord) (n int, ok bool) { + + n = NumGoroutine() + if n <= len(p) { + gp := getg() + semacquire(&worldsema, false) + gp.m.gcing = 1 + onM(stoptheworld) + + n = NumGoroutine() + if n <= len(p) { + ok = true + r := p + sp := getcallersp(unsafe.Pointer(&p)) + pc := getcallerpc(unsafe.Pointer(&p)) + onM(func() { + saveg(pc, sp, gp, &r[0]) + }) + r = r[1:] + for _, gp1 := range allgs { + if gp1 == gp || readgstatus(gp1) == _Gdead { + continue + } + saveg(^uintptr(0), ^uintptr(0), gp1, &r[0]) + r = r[1:] + } + } + + gp.m.gcing = 0 + semrelease(&worldsema) + onM(starttheworld) + } + + return n, ok +} + +func saveg(pc, sp uintptr, gp *g, r *StackRecord) { + n := gentraceback(pc, sp, 0, gp, 0, &r.Stack0[0], len(r.Stack0), nil, nil, 0) + if n < len(r.Stack0) { + r.Stack0[n] = 0 + } +} + +// Stack formats a stack trace of the calling goroutine into buf +// and returns the number of bytes written to buf. +// If all is true, Stack formats stack traces of all other goroutines +// into buf after the trace for the current goroutine. +func Stack(buf []byte, all bool) int { + mp := acquirem() + gp := mp.curg + if all { + semacquire(&worldsema, false) + mp.gcing = 1 + releasem(mp) + onM(stoptheworld) + if mp != acquirem() { + gothrow("Stack: rescheduled") + } + } + + n := 0 + if len(buf) > 0 { + sp := getcallersp(unsafe.Pointer(&buf)) + pc := getcallerpc(unsafe.Pointer(&buf)) + onM(func() { + g0 := getg() + g0.writebuf = buf[0:0:len(buf)] + goroutineheader(gp) + traceback(pc, sp, 0, gp) + if all { + tracebackothers(gp) + } + n = len(g0.writebuf) + g0.writebuf = nil + }) + } + + if all { + mp.gcing = 0 + semrelease(&worldsema) + onM(starttheworld) + } + releasem(mp) + return n +} + +// Tracing of alloc/free/gc. + +var tracelock mutex + +func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) { + lock(&tracelock) + gp := getg() + gp.m.traceback = 2 + if typ == nil { + print("tracealloc(", p, ", ", hex(size), ")\n") + } else { + print("tracealloc(", p, ", ", hex(size), ", ", *typ._string, ")\n") + } + if gp.m.curg == nil || gp == gp.m.curg { + goroutineheader(gp) + pc := getcallerpc(unsafe.Pointer(&p)) + sp := getcallersp(unsafe.Pointer(&p)) + onM(func() { + traceback(pc, sp, 0, gp) + }) + } else { + goroutineheader(gp.m.curg) + traceback(^uintptr(0), ^uintptr(0), 0, gp.m.curg) + } + print("\n") + gp.m.traceback = 0 + unlock(&tracelock) +} + +func tracefree(p unsafe.Pointer, size uintptr) { + lock(&tracelock) + gp := getg() + gp.m.traceback = 2 + print("tracefree(", p, ", ", hex(size), ")\n") + goroutineheader(gp) + pc := getcallerpc(unsafe.Pointer(&p)) + sp := getcallersp(unsafe.Pointer(&p)) + onM(func() { + traceback(pc, sp, 0, gp) + }) + print("\n") + gp.m.traceback = 0 + unlock(&tracelock) +} + +func tracegc() { + lock(&tracelock) + gp := getg() + gp.m.traceback = 2 + print("tracegc()\n") + // running on m->g0 stack; show all non-g0 goroutines + tracebackothers(gp) + print("end tracegc\n") + print("\n") + gp.m.traceback = 0 + unlock(&tracelock) +} diff --git a/libgo/go/runtime/netpoll.go b/libgo/go/runtime/netpoll.go new file mode 100644 index 00000000000..3456e020810 --- /dev/null +++ b/libgo/go/runtime/netpoll.go @@ -0,0 +1,455 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows + +package runtime + +import "unsafe" + +// Integrated network poller (platform-independent part). +// A particular implementation (epoll/kqueue) must define the following functions: +// func netpollinit() // to initialize the poller +// func netpollopen(fd uintptr, pd *pollDesc) int32 // to arm edge-triggered notifications +// and associate fd with pd. +// An implementation must call the following function to denote that the pd is ready. +// func netpollready(gpp **g, pd *pollDesc, mode int32) + +// pollDesc contains 2 binary semaphores, rg and wg, to park reader and writer +// goroutines respectively. The semaphore can be in the following states: +// pdReady - io readiness notification is pending; +// a goroutine consumes the notification by changing the state to nil. +// pdWait - a goroutine prepares to park on the semaphore, but not yet parked; +// the goroutine commits to park by changing the state to G pointer, +// or, alternatively, concurrent io notification changes the state to READY, +// or, alternatively, concurrent timeout/close changes the state to nil. +// G pointer - the goroutine is blocked on the semaphore; +// io notification or timeout/close changes the state to READY or nil respectively +// and unparks the goroutine. +// nil - nothing of the above. +const ( + pdReady uintptr = 1 + pdWait uintptr = 2 +) + +const pollBlockSize = 4 * 1024 + +// Network poller descriptor. +type pollDesc struct { + link *pollDesc // in pollcache, protected by pollcache.lock + + // The lock protects pollOpen, pollSetDeadline, pollUnblock and deadlineimpl operations. + // This fully covers seq, rt and wt variables. fd is constant throughout the PollDesc lifetime. + // pollReset, pollWait, pollWaitCanceled and runtime·netpollready (IO readiness notification) + // proceed w/o taking the lock. So closing, rg, rd, wg and wd are manipulated + // in a lock-free way by all operations. + // NOTE(dvyukov): the following code uses uintptr to store *g (rg/wg), + // that will blow up when GC starts moving objects. + lock mutex // protectes the following fields + fd uintptr + closing bool + seq uintptr // protects from stale timers and ready notifications + rg uintptr // pdReady, pdWait, G waiting for read or nil + rt timer // read deadline timer (set if rt.f != nil) + rd int64 // read deadline + wg uintptr // pdReady, pdWait, G waiting for write or nil + wt timer // write deadline timer + wd int64 // write deadline + user unsafe.Pointer // user settable cookie +} + +type pollCache struct { + lock mutex + first *pollDesc + // PollDesc objects must be type-stable, + // because we can get ready notification from epoll/kqueue + // after the descriptor is closed/reused. + // Stale notifications are detected using seq variable, + // seq is incremented when deadlines are changed or descriptor is reused. +} + +var pollcache pollCache + +func netpollServerInit() { + onM(netpollinit) +} + +func netpollOpen(fd uintptr) (*pollDesc, int) { + pd := pollcache.alloc() + lock(&pd.lock) + if pd.wg != 0 && pd.wg != pdReady { + gothrow("netpollOpen: blocked write on free descriptor") + } + if pd.rg != 0 && pd.rg != pdReady { + gothrow("netpollOpen: blocked read on free descriptor") + } + pd.fd = fd + pd.closing = false + pd.seq++ + pd.rg = 0 + pd.rd = 0 + pd.wg = 0 + pd.wd = 0 + unlock(&pd.lock) + + var errno int32 + onM(func() { + errno = netpollopen(fd, pd) + }) + return pd, int(errno) +} + +func netpollClose(pd *pollDesc) { + if !pd.closing { + gothrow("netpollClose: close w/o unblock") + } + if pd.wg != 0 && pd.wg != pdReady { + gothrow("netpollClose: blocked write on closing descriptor") + } + if pd.rg != 0 && pd.rg != pdReady { + gothrow("netpollClose: blocked read on closing descriptor") + } + onM(func() { + netpollclose(uintptr(pd.fd)) + }) + pollcache.free(pd) +} + +func (c *pollCache) free(pd *pollDesc) { + lock(&c.lock) + pd.link = c.first + c.first = pd + unlock(&c.lock) +} + +func netpollReset(pd *pollDesc, mode int) int { + err := netpollcheckerr(pd, int32(mode)) + if err != 0 { + return err + } + if mode == 'r' { + pd.rg = 0 + } else if mode == 'w' { + pd.wg = 0 + } + return 0 +} + +func netpollWait(pd *pollDesc, mode int) int { + err := netpollcheckerr(pd, int32(mode)) + if err != 0 { + return err + } + // As for now only Solaris uses level-triggered IO. + if GOOS == "solaris" { + onM(func() { + netpollarm(pd, mode) + }) + } + for !netpollblock(pd, int32(mode), false) { + err = netpollcheckerr(pd, int32(mode)) + if err != 0 { + return err + } + // Can happen if timeout has fired and unblocked us, + // but before we had a chance to run, timeout has been reset. + // Pretend it has not happened and retry. + } + return 0 +} + +func netpollWaitCanceled(pd *pollDesc, mode int) { + // This function is used only on windows after a failed attempt to cancel + // a pending async IO operation. Wait for ioready, ignore closing or timeouts. + for !netpollblock(pd, int32(mode), true) { + } +} + +func netpollSetDeadline(pd *pollDesc, d int64, mode int) { + lock(&pd.lock) + if pd.closing { + unlock(&pd.lock) + return + } + pd.seq++ // invalidate current timers + // Reset current timers. + if pd.rt.f != nil { + deltimer(&pd.rt) + pd.rt.f = nil + } + if pd.wt.f != nil { + deltimer(&pd.wt) + pd.wt.f = nil + } + // Setup new timers. + if d != 0 && d <= nanotime() { + d = -1 + } + if mode == 'r' || mode == 'r'+'w' { + pd.rd = d + } + if mode == 'w' || mode == 'r'+'w' { + pd.wd = d + } + if pd.rd > 0 && pd.rd == pd.wd { + pd.rt.f = netpollDeadline + pd.rt.when = pd.rd + // Copy current seq into the timer arg. + // Timer func will check the seq against current descriptor seq, + // if they differ the descriptor was reused or timers were reset. + pd.rt.arg = pd + pd.rt.seq = pd.seq + addtimer(&pd.rt) + } else { + if pd.rd > 0 { + pd.rt.f = netpollReadDeadline + pd.rt.when = pd.rd + pd.rt.arg = pd + pd.rt.seq = pd.seq + addtimer(&pd.rt) + } + if pd.wd > 0 { + pd.wt.f = netpollWriteDeadline + pd.wt.when = pd.wd + pd.wt.arg = pd + pd.wt.seq = pd.seq + addtimer(&pd.wt) + } + } + // If we set the new deadline in the past, unblock currently pending IO if any. + var rg, wg *g + atomicstorep(unsafe.Pointer(&wg), nil) // full memory barrier between stores to rd/wd and load of rg/wg in netpollunblock + if pd.rd < 0 { + rg = netpollunblock(pd, 'r', false) + } + if pd.wd < 0 { + wg = netpollunblock(pd, 'w', false) + } + unlock(&pd.lock) + if rg != nil { + goready(rg) + } + if wg != nil { + goready(wg) + } +} + +func netpollUnblock(pd *pollDesc) { + lock(&pd.lock) + if pd.closing { + gothrow("netpollUnblock: already closing") + } + pd.closing = true + pd.seq++ + var rg, wg *g + atomicstorep(unsafe.Pointer(&rg), nil) // full memory barrier between store to closing and read of rg/wg in netpollunblock + rg = netpollunblock(pd, 'r', false) + wg = netpollunblock(pd, 'w', false) + if pd.rt.f != nil { + deltimer(&pd.rt) + pd.rt.f = nil + } + if pd.wt.f != nil { + deltimer(&pd.wt) + pd.wt.f = nil + } + unlock(&pd.lock) + if rg != nil { + goready(rg) + } + if wg != nil { + goready(wg) + } +} + +func netpollfd(pd *pollDesc) uintptr { + return pd.fd +} + +func netpolluser(pd *pollDesc) *unsafe.Pointer { + return &pd.user +} + +func netpollclosing(pd *pollDesc) bool { + return pd.closing +} + +func netpolllock(pd *pollDesc) { + lock(&pd.lock) +} + +func netpollunlock(pd *pollDesc) { + unlock(&pd.lock) +} + +// make pd ready, newly runnable goroutines (if any) are returned in rg/wg +func netpollready(gpp **g, pd *pollDesc, mode int32) { + var rg, wg *g + if mode == 'r' || mode == 'r'+'w' { + rg = netpollunblock(pd, 'r', true) + } + if mode == 'w' || mode == 'r'+'w' { + wg = netpollunblock(pd, 'w', true) + } + if rg != nil { + rg.schedlink = *gpp + *gpp = rg + } + if wg != nil { + wg.schedlink = *gpp + *gpp = wg + } +} + +func netpollcheckerr(pd *pollDesc, mode int32) int { + if pd.closing { + return 1 // errClosing + } + if (mode == 'r' && pd.rd < 0) || (mode == 'w' && pd.wd < 0) { + return 2 // errTimeout + } + return 0 +} + +func netpollblockcommit(gp *g, gpp unsafe.Pointer) bool { + return casuintptr((*uintptr)(gpp), pdWait, uintptr(unsafe.Pointer(gp))) +} + +// returns true if IO is ready, or false if timedout or closed +// waitio - wait only for completed IO, ignore errors +func netpollblock(pd *pollDesc, mode int32, waitio bool) bool { + gpp := &pd.rg + if mode == 'w' { + gpp = &pd.wg + } + + // set the gpp semaphore to WAIT + for { + old := *gpp + if old == pdReady { + *gpp = 0 + return true + } + if old != 0 { + gothrow("netpollblock: double wait") + } + if casuintptr(gpp, 0, pdWait) { + break + } + } + + // need to recheck error states after setting gpp to WAIT + // this is necessary because runtime_pollUnblock/runtime_pollSetDeadline/deadlineimpl + // do the opposite: store to closing/rd/wd, membarrier, load of rg/wg + if waitio || netpollcheckerr(pd, mode) == 0 { + f := netpollblockcommit + gopark(**(**unsafe.Pointer)(unsafe.Pointer(&f)), unsafe.Pointer(gpp), "IO wait") + } + // be careful to not lose concurrent READY notification + old := xchguintptr(gpp, 0) + if old > pdWait { + gothrow("netpollblock: corrupted state") + } + return old == pdReady +} + +func netpollunblock(pd *pollDesc, mode int32, ioready bool) *g { + gpp := &pd.rg + if mode == 'w' { + gpp = &pd.wg + } + + for { + old := *gpp + if old == pdReady { + return nil + } + if old == 0 && !ioready { + // Only set READY for ioready. runtime_pollWait + // will check for timeout/cancel before waiting. + return nil + } + var new uintptr + if ioready { + new = pdReady + } + if casuintptr(gpp, old, new) { + if old == pdReady || old == pdWait { + old = 0 + } + return (*g)(unsafe.Pointer(old)) + } + } +} + +func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) { + lock(&pd.lock) + // Seq arg is seq when the timer was set. + // If it's stale, ignore the timer event. + if seq != pd.seq { + // The descriptor was reused or timers were reset. + unlock(&pd.lock) + return + } + var rg *g + if read { + if pd.rd <= 0 || pd.rt.f == nil { + gothrow("netpolldeadlineimpl: inconsistent read deadline") + } + pd.rd = -1 + atomicstorep(unsafe.Pointer(&pd.rt.f), nil) // full memory barrier between store to rd and load of rg in netpollunblock + rg = netpollunblock(pd, 'r', false) + } + var wg *g + if write { + if pd.wd <= 0 || pd.wt.f == nil && !read { + gothrow("netpolldeadlineimpl: inconsistent write deadline") + } + pd.wd = -1 + atomicstorep(unsafe.Pointer(&pd.wt.f), nil) // full memory barrier between store to wd and load of wg in netpollunblock + wg = netpollunblock(pd, 'w', false) + } + unlock(&pd.lock) + if rg != nil { + goready(rg) + } + if wg != nil { + goready(wg) + } +} + +func netpollDeadline(arg interface{}, seq uintptr) { + netpolldeadlineimpl(arg.(*pollDesc), seq, true, true) +} + +func netpollReadDeadline(arg interface{}, seq uintptr) { + netpolldeadlineimpl(arg.(*pollDesc), seq, true, false) +} + +func netpollWriteDeadline(arg interface{}, seq uintptr) { + netpolldeadlineimpl(arg.(*pollDesc), seq, false, true) +} + +func (c *pollCache) alloc() *pollDesc { + lock(&c.lock) + if c.first == nil { + const pdSize = unsafe.Sizeof(pollDesc{}) + n := pollBlockSize / pdSize + if n == 0 { + n = 1 + } + // Must be in non-GC memory because can be referenced + // only from epoll/kqueue internals. + mem := persistentalloc(n*pdSize, 0, &memstats.other_sys) + for i := uintptr(0); i < n; i++ { + pd := (*pollDesc)(add(mem, i*pdSize)) + pd.link = c.first + c.first = pd + } + } + pd := c.first + c.first = pd.link + unlock(&c.lock) + return pd +} diff --git a/libgo/go/runtime/netpoll_epoll.go b/libgo/go/runtime/netpoll_epoll.go new file mode 100644 index 00000000000..ecfc9cdde8f --- /dev/null +++ b/libgo/go/runtime/netpoll_epoll.go @@ -0,0 +1,97 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package runtime + +import "unsafe" + +func epollcreate(size int32) int32 +func epollcreate1(flags int32) int32 + +//go:noescape +func epollctl(epfd, op, fd int32, ev *epollevent) int32 + +//go:noescape +func epollwait(epfd int32, ev *epollevent, nev, timeout int32) int32 +func closeonexec(fd int32) + +var ( + epfd int32 = -1 // epoll descriptor + netpolllasterr int32 +) + +func netpollinit() { + epfd = epollcreate1(_EPOLL_CLOEXEC) + if epfd >= 0 { + return + } + epfd = epollcreate(1024) + if epfd >= 0 { + closeonexec(epfd) + return + } + println("netpollinit: failed to create epoll descriptor", -epfd) + gothrow("netpollinit: failed to create descriptor") +} + +func netpollopen(fd uintptr, pd *pollDesc) int32 { + var ev epollevent + ev.events = _EPOLLIN | _EPOLLOUT | _EPOLLRDHUP | _EPOLLET + *(**pollDesc)(unsafe.Pointer(&ev.data)) = pd + return -epollctl(epfd, _EPOLL_CTL_ADD, int32(fd), &ev) +} + +func netpollclose(fd uintptr) int32 { + var ev epollevent + return -epollctl(epfd, _EPOLL_CTL_DEL, int32(fd), &ev) +} + +func netpollarm(pd *pollDesc, mode int) { + gothrow("unused") +} + +// polls for ready network connections +// returns list of goroutines that become runnable +func netpoll(block bool) (gp *g) { + if epfd == -1 { + return + } + waitms := int32(-1) + if !block { + waitms = 0 + } + var events [128]epollevent +retry: + n := epollwait(epfd, &events[0], int32(len(events)), waitms) + if n < 0 { + if n != -_EINTR && n != netpolllasterr { + netpolllasterr = n + println("runtime: epollwait on fd", epfd, "failed with", -n) + } + goto retry + } + for i := int32(0); i < n; i++ { + ev := &events[i] + if ev.events == 0 { + continue + } + var mode int32 + if ev.events&(_EPOLLIN|_EPOLLRDHUP|_EPOLLHUP|_EPOLLERR) != 0 { + mode += 'r' + } + if ev.events&(_EPOLLOUT|_EPOLLHUP|_EPOLLERR) != 0 { + mode += 'w' + } + if mode != 0 { + pd := *(**pollDesc)(unsafe.Pointer(&ev.data)) + netpollready((**g)(noescape(unsafe.Pointer(&gp))), pd, mode) + } + } + if block && gp == nil { + goto retry + } + return gp +} diff --git a/libgo/go/runtime/netpoll_kqueue.go b/libgo/go/runtime/netpoll_kqueue.go new file mode 100644 index 00000000000..d6d55b97b8d --- /dev/null +++ b/libgo/go/runtime/netpoll_kqueue.go @@ -0,0 +1,101 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package runtime + +// Integrated network poller (kqueue-based implementation). + +import "unsafe" + +func kqueue() int32 + +//go:noescape +func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32 +func closeonexec(fd int32) + +var ( + kq int32 = -1 + netpolllasterr int32 +) + +func netpollinit() { + kq = kqueue() + if kq < 0 { + println("netpollinit: kqueue failed with", -kq) + gothrow("netpollinit: kqueue failed") + } + closeonexec(kq) +} + +func netpollopen(fd uintptr, pd *pollDesc) int32 { + // Arm both EVFILT_READ and EVFILT_WRITE in edge-triggered mode (EV_CLEAR) + // for the whole fd lifetime. The notifications are automatically unregistered + // when fd is closed. + var ev [2]keventt + *(*uintptr)(unsafe.Pointer(&ev[0].ident)) = fd + ev[0].filter = _EVFILT_READ + ev[0].flags = _EV_ADD | _EV_CLEAR + ev[0].fflags = 0 + ev[0].data = 0 + ev[0].udata = (*byte)(unsafe.Pointer(pd)) + ev[1] = ev[0] + ev[1].filter = _EVFILT_WRITE + n := kevent(kq, &ev[0], 2, nil, 0, nil) + if n < 0 { + return -n + } + return 0 +} + +func netpollclose(fd uintptr) int32 { + // Don't need to unregister because calling close() + // on fd will remove any kevents that reference the descriptor. + return 0 +} + +func netpollarm(pd *pollDesc, mode int) { + gothrow("unused") +} + +// Polls for ready network connections. +// Returns list of goroutines that become runnable. +func netpoll(block bool) (gp *g) { + if kq == -1 { + return + } + var tp *timespec + var ts timespec + if !block { + tp = &ts + } + var events [64]keventt +retry: + n := kevent(kq, nil, 0, &events[0], int32(len(events)), tp) + if n < 0 { + if n != -_EINTR && n != netpolllasterr { + netpolllasterr = n + println("runtime: kevent on fd", kq, "failed with", -n) + } + goto retry + } + for i := 0; i < int(n); i++ { + ev := &events[i] + var mode int32 + if ev.filter == _EVFILT_READ { + mode += 'r' + } + if ev.filter == _EVFILT_WRITE { + mode += 'w' + } + if mode != 0 { + netpollready((**g)(noescape(unsafe.Pointer(&gp))), (*pollDesc)(unsafe.Pointer(ev.udata)), mode) + } + } + if block && gp == nil { + goto retry + } + return gp +} diff --git a/libgo/go/runtime/netpoll_nacl.go b/libgo/go/runtime/netpoll_nacl.go new file mode 100644 index 00000000000..5cbc3003214 --- /dev/null +++ b/libgo/go/runtime/netpoll_nacl.go @@ -0,0 +1,26 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Fake network poller for NaCl. +// Should never be used, because NaCl network connections do not honor "SetNonblock". + +package runtime + +func netpollinit() { +} + +func netpollopen(fd uintptr, pd *pollDesc) int32 { + return 0 +} + +func netpollclose(fd uintptr) int32 { + return 0 +} + +func netpollarm(pd *pollDesc, mode int) { +} + +func netpoll(block bool) *g { + return nil +} diff --git a/libgo/go/runtime/noasm_arm.go b/libgo/go/runtime/noasm_arm.go new file mode 100644 index 00000000000..dd3ef826766 --- /dev/null +++ b/libgo/go/runtime/noasm_arm.go @@ -0,0 +1,54 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Routines that are implemented in assembly in asm_{amd64,386}.s +// but are implemented in Go for arm. + +package runtime + +func cmpstring(s1, s2 string) int { + l := len(s1) + if len(s2) < l { + l = len(s2) + } + for i := 0; i < l; i++ { + c1, c2 := s1[i], s2[i] + if c1 < c2 { + return -1 + } + if c1 > c2 { + return +1 + } + } + if len(s1) < len(s2) { + return -1 + } + if len(s1) > len(s2) { + return +1 + } + return 0 +} + +func cmpbytes(s1, s2 []byte) int { + l := len(s1) + if len(s2) < l { + l = len(s2) + } + for i := 0; i < l; i++ { + c1, c2 := s1[i], s2[i] + if c1 < c2 { + return -1 + } + if c1 > c2 { + return +1 + } + } + if len(s1) < len(s2) { + return -1 + } + if len(s1) > len(s2) { + return +1 + } + return 0 +} diff --git a/libgo/go/runtime/os_darwin.go b/libgo/go/runtime/os_darwin.go new file mode 100644 index 00000000000..4327ced9148 --- /dev/null +++ b/libgo/go/runtime/os_darwin.go @@ -0,0 +1,24 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +func bsdthread_create(stk, mm, gg, fn unsafe.Pointer) int32 +func bsdthread_register() int32 +func mach_msg_trap(h unsafe.Pointer, op int32, send_size, rcv_size, rcv_name, timeout, notify uint32) int32 +func mach_reply_port() uint32 +func mach_task_self() uint32 +func mach_thread_self() uint32 +func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32 +func sigprocmask(sig int32, new, old unsafe.Pointer) +func sigaction(mode uint32, new, old unsafe.Pointer) +func sigaltstack(new, old unsafe.Pointer) +func sigtramp() +func setitimer(mode int32, new, old unsafe.Pointer) +func mach_semaphore_wait(sema uint32) int32 +func mach_semaphore_timedwait(sema, sec, nsec uint32) int32 +func mach_semaphore_signal(sema uint32) int32 +func mach_semaphore_signal_all(sema uint32) int32 diff --git a/libgo/go/runtime/os_dragonfly.go b/libgo/go/runtime/os_dragonfly.go new file mode 100644 index 00000000000..cdaa06986ee --- /dev/null +++ b/libgo/go/runtime/os_dragonfly.go @@ -0,0 +1,20 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +func lwp_create(param unsafe.Pointer) int32 +func sigaltstack(new, old unsafe.Pointer) +func sigaction(sig int32, new, old unsafe.Pointer) +func sigprocmask(new, old unsafe.Pointer) +func setitimer(mode int32, new, old unsafe.Pointer) +func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32 +func getrlimit(kind int32, limit unsafe.Pointer) int32 +func raise(sig int32) +func sys_umtx_sleep(addr unsafe.Pointer, val, timeout int32) int32 +func sys_umtx_wakeup(addr unsafe.Pointer, val int32) int32 + +const stackSystem = 0 diff --git a/libgo/go/runtime/os_freebsd.go b/libgo/go/runtime/os_freebsd.go new file mode 100644 index 00000000000..59708049c8a --- /dev/null +++ b/libgo/go/runtime/os_freebsd.go @@ -0,0 +1,17 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +func thr_new(param unsafe.Pointer, size int32) +func sigaltstack(new, old unsafe.Pointer) +func sigaction(sig int32, new, old unsafe.Pointer) +func sigprocmask(new, old unsafe.Pointer) +func setitimer(mode int32, new, old unsafe.Pointer) +func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32 +func getrlimit(kind int32, limit unsafe.Pointer) int32 +func raise(sig int32) +func sys_umtx_op(addr unsafe.Pointer, mode int32, val uint32, ptr2, ts unsafe.Pointer) int32 diff --git a/libgo/go/runtime/os_linux.go b/libgo/go/runtime/os_linux.go new file mode 100644 index 00000000000..41123ad5709 --- /dev/null +++ b/libgo/go/runtime/os_linux.go @@ -0,0 +1,17 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +func futex(addr unsafe.Pointer, op int32, val uint32, ts, addr2 unsafe.Pointer, val3 uint32) int32 +func clone(flags int32, stk, mm, gg, fn unsafe.Pointer) int32 +func rt_sigaction(sig uintptr, new, old unsafe.Pointer, size uintptr) int32 +func sigaltstack(new, old unsafe.Pointer) +func setitimer(mode int32, new, old unsafe.Pointer) +func rtsigprocmask(sig int32, new, old unsafe.Pointer, size int32) +func getrlimit(kind int32, limit unsafe.Pointer) int32 +func raise(sig int32) +func sched_getaffinity(pid, len uintptr, buf *uintptr) int32 diff --git a/libgo/go/runtime/os_nacl.go b/libgo/go/runtime/os_nacl.go new file mode 100644 index 00000000000..8dd43ff06f1 --- /dev/null +++ b/libgo/go/runtime/os_nacl.go @@ -0,0 +1,39 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +func nacl_exception_stack(p unsafe.Pointer, size int32) int32 +func nacl_exception_handler(fn, arg unsafe.Pointer) int32 +func nacl_sem_create(flag int32) int32 +func nacl_sem_wait(sem int32) int32 +func nacl_sem_post(sem int32) int32 +func nacl_mutex_create(flag int32) int32 +func nacl_mutex_lock(mutex int32) int32 +func nacl_mutex_trylock(mutex int32) int32 +func nacl_mutex_unlock(mutex int32) int32 +func nacl_cond_create(flag int32) int32 +func nacl_cond_wait(cond, n int32) int32 +func nacl_cond_signal(cond int32) int32 +func nacl_cond_broadcast(cond int32) int32 +func nacl_cond_timed_wait_abs(cond, lock int32, ts unsafe.Pointer) int32 +func nacl_thread_create(fn, stk, tls, xx unsafe.Pointer) int32 +func nacl_nanosleep(ts, extra unsafe.Pointer) int32 + +func os_sigpipe() { + gothrow("too many writes on closed pipe") +} + +func sigpanic() { + g := getg() + if !canpanic(g) { + gothrow("unexpected signal during runtime execution") + } + + // Native Client only invokes the exception handler for memory faults. + g.sig = _SIGSEGV + panicmem() +} diff --git a/libgo/go/runtime/os_netbsd.go b/libgo/go/runtime/os_netbsd.go new file mode 100644 index 00000000000..f000c5e9f64 --- /dev/null +++ b/libgo/go/runtime/os_netbsd.go @@ -0,0 +1,20 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +func setitimer(mode int32, new, old unsafe.Pointer) +func sigaction(sig int32, new, old unsafe.Pointer) +func sigaltstack(new, old unsafe.Pointer) +func sigprocmask(mode int32, new, old unsafe.Pointer) +func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32 +func lwp_tramp() +func raise(sig int32) +func getcontext(ctxt unsafe.Pointer) +func lwp_create(ctxt unsafe.Pointer, flags uintptr, lwpid unsafe.Pointer) int32 +func lwp_park(abstime unsafe.Pointer, unpark int32, hint, unparkhint unsafe.Pointer) int32 +func lwp_unpark(lwp int32, hint unsafe.Pointer) int32 +func lwp_self() int32 diff --git a/libgo/go/runtime/os_openbsd.go b/libgo/go/runtime/os_openbsd.go new file mode 100644 index 00000000000..a000f963e33 --- /dev/null +++ b/libgo/go/runtime/os_openbsd.go @@ -0,0 +1,17 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +func setitimer(mode int32, new, old unsafe.Pointer) +func sigaction(sig int32, new, old unsafe.Pointer) +func sigaltstack(new, old unsafe.Pointer) +func sigprocmask(mode int32, new uint32) uint32 +func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32 +func raise(sig int32) +func tfork(param unsafe.Pointer, psize uintptr, mm, gg, fn unsafe.Pointer) int32 +func thrsleep(ident unsafe.Pointer, clock_id int32, tsp, lock, abort unsafe.Pointer) int32 +func thrwakeup(ident unsafe.Pointer, n int32) int32 diff --git a/libgo/go/runtime/os_plan9.go b/libgo/go/runtime/os_plan9.go new file mode 100644 index 00000000000..20e47bf42e5 --- /dev/null +++ b/libgo/go/runtime/os_plan9.go @@ -0,0 +1,103 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +func pread(fd int32, buf unsafe.Pointer, nbytes int32, offset int64) int32 +func pwrite(fd int32, buf unsafe.Pointer, nbytes int32, offset int64) int32 +func seek(fd int32, offset int64, whence int32) int64 +func exits(msg *byte) +func brk_(addr unsafe.Pointer) uintptr +func sleep(ms int32) int32 +func rfork(flags int32) int32 +func plan9_semacquire(addr *uint32, block int32) int32 +func plan9_tsemacquire(addr *uint32, ms int32) int32 +func plan9_semrelease(addr *uint32, count int32) int32 +func notify(fn unsafe.Pointer) int32 +func noted(mode int32) int32 +func nsec(*int64) int64 +func sigtramp(ureg, msg unsafe.Pointer) +func setfpmasks() +func tstart_plan9(newm *m) +func errstr() string + +type _Plink uintptr + +func os_sigpipe() { + gothrow("too many writes on closed pipe") +} + +func sigpanic() { + g := getg() + if !canpanic(g) { + gothrow("unexpected signal during runtime execution") + } + + note := gostringnocopy((*byte)(unsafe.Pointer(g.m.notesig))) + switch g.sig { + case _SIGRFAULT, _SIGWFAULT: + addr := note[index(note, "addr=")+5:] + g.sigcode1 = uintptr(atolwhex(addr)) + if g.sigcode1 < 0x1000 || g.paniconfault { + panicmem() + } + print("unexpected fault address ", hex(g.sigcode1), "\n") + gothrow("fault") + case _SIGTRAP: + if g.paniconfault { + panicmem() + } + gothrow(note) + case _SIGINTDIV: + panicdivide() + case _SIGFLOAT: + panicfloat() + default: + panic(errorString(note)) + } +} + +func atolwhex(p string) int64 { + for hasprefix(p, " ") || hasprefix(p, "\t") { + p = p[1:] + } + neg := false + if hasprefix(p, "-") || hasprefix(p, "+") { + neg = p[0] == '-' + p = p[1:] + for hasprefix(p, " ") || hasprefix(p, "\t") { + p = p[1:] + } + } + var n int64 + switch { + case hasprefix(p, "0x"), hasprefix(p, "0X"): + p = p[2:] + for ; len(p) > 0; p = p[1:] { + if '0' <= p[0] && p[0] <= '9' { + n = n*16 + int64(p[0]-'0') + } else if 'a' <= p[0] && p[0] <= 'f' { + n = n*16 + int64(p[0]-'a'+10) + } else if 'A' <= p[0] && p[0] <= 'F' { + n = n*16 + int64(p[0]-'A'+10) + } else { + break + } + } + case hasprefix(p, "0"): + for ; len(p) > 0 && '0' <= p[0] && p[0] <= '7'; p = p[1:] { + n = n*8 + int64(p[0]-'0') + } + default: + for ; len(p) > 0 && '0' <= p[0] && p[0] <= '9'; p = p[1:] { + n = n*10 + int64(p[0]-'0') + } + } + if neg { + n = -n + } + return n +} diff --git a/libgo/go/runtime/os_solaris.go b/libgo/go/runtime/os_solaris.go new file mode 100644 index 00000000000..ca13151204a --- /dev/null +++ b/libgo/go/runtime/os_solaris.go @@ -0,0 +1,100 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +func setitimer(mode int32, new, old unsafe.Pointer) +func sigaction(sig int32, new, old unsafe.Pointer) +func sigaltstack(new, old unsafe.Pointer) +func sigprocmask(mode int32, new, old unsafe.Pointer) +func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32 +func getrlimit(kind int32, limit unsafe.Pointer) +func miniterrno(fn unsafe.Pointer) +func raise(sig int32) +func getcontext(ctxt unsafe.Pointer) +func tstart_sysvicall(mm unsafe.Pointer) uint32 +func nanotime1() int64 +func usleep1(usec uint32) +func osyield1() +func netpollinit() +func netpollopen(fd uintptr, pd *pollDesc) int32 +func netpollclose(fd uintptr) int32 +func netpollarm(pd *pollDesc, mode int) + +type libcFunc byte + +var asmsysvicall6 libcFunc + +//go:nosplit +func sysvicall0(fn *libcFunc) uintptr { + libcall := &getg().m.libcall + libcall.fn = uintptr(unsafe.Pointer(fn)) + libcall.n = 0 + // TODO(rsc): Why is noescape necessary here and below? + libcall.args = uintptr(noescape(unsafe.Pointer(&fn))) // it's unused but must be non-nil, otherwise crashes + asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall)) + return libcall.r1 +} + +//go:nosplit +func sysvicall1(fn *libcFunc, a1 uintptr) uintptr { + libcall := &getg().m.libcall + libcall.fn = uintptr(unsafe.Pointer(fn)) + libcall.n = 1 + libcall.args = uintptr(noescape(unsafe.Pointer(&a1))) + asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall)) + return libcall.r1 +} + +//go:nosplit +func sysvicall2(fn *libcFunc, a1, a2 uintptr) uintptr { + libcall := &getg().m.libcall + libcall.fn = uintptr(unsafe.Pointer(fn)) + libcall.n = 2 + libcall.args = uintptr(noescape(unsafe.Pointer(&a1))) + asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall)) + return libcall.r1 +} + +//go:nosplit +func sysvicall3(fn *libcFunc, a1, a2, a3 uintptr) uintptr { + libcall := &getg().m.libcall + libcall.fn = uintptr(unsafe.Pointer(fn)) + libcall.n = 3 + libcall.args = uintptr(noescape(unsafe.Pointer(&a1))) + asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall)) + return libcall.r1 +} + +//go:nosplit +func sysvicall4(fn *libcFunc, a1, a2, a3, a4 uintptr) uintptr { + libcall := &getg().m.libcall + libcall.fn = uintptr(unsafe.Pointer(fn)) + libcall.n = 4 + libcall.args = uintptr(noescape(unsafe.Pointer(&a1))) + asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall)) + return libcall.r1 +} + +//go:nosplit +func sysvicall5(fn *libcFunc, a1, a2, a3, a4, a5 uintptr) uintptr { + libcall := &getg().m.libcall + libcall.fn = uintptr(unsafe.Pointer(fn)) + libcall.n = 5 + libcall.args = uintptr(noescape(unsafe.Pointer(&a1))) + asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall)) + return libcall.r1 +} + +//go:nosplit +func sysvicall6(fn *libcFunc, a1, a2, a3, a4, a5, a6 uintptr) uintptr { + libcall := &getg().m.libcall + libcall.fn = uintptr(unsafe.Pointer(fn)) + libcall.n = 6 + libcall.args = uintptr(noescape(unsafe.Pointer(&a1))) + asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall)) + return libcall.r1 +} diff --git a/libgo/go/runtime/os_windows.go b/libgo/go/runtime/os_windows.go new file mode 100644 index 00000000000..1528d2fd139 --- /dev/null +++ b/libgo/go/runtime/os_windows.go @@ -0,0 +1,58 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +type stdFunction *byte + +func stdcall0(fn stdFunction) uintptr +func stdcall1(fn stdFunction, a0 uintptr) uintptr +func stdcall2(fn stdFunction, a0, a1 uintptr) uintptr +func stdcall3(fn stdFunction, a0, a1, a2 uintptr) uintptr +func stdcall4(fn stdFunction, a0, a1, a2, a3 uintptr) uintptr +func stdcall5(fn stdFunction, a0, a1, a2, a3, a4 uintptr) uintptr +func stdcall6(fn stdFunction, a0, a1, a2, a3, a4, a5 uintptr) uintptr +func stdcall7(fn stdFunction, a0, a1, a2, a3, a4, a5, a6 uintptr) uintptr + +func asmstdcall(fn unsafe.Pointer) +func getlasterror() uint32 +func setlasterror(err uint32) +func usleep1(usec uint32) +func netpollinit() +func netpollopen(fd uintptr, pd *pollDesc) int32 +func netpollclose(fd uintptr) int32 +func netpollarm(pd *pollDesc, mode int) + +func os_sigpipe() { + gothrow("too many writes on closed pipe") +} + +func sigpanic() { + g := getg() + if !canpanic(g) { + gothrow("unexpected signal during runtime execution") + } + + switch uint32(g.sig) { + case _EXCEPTION_ACCESS_VIOLATION: + if g.sigcode1 < 0x1000 || g.paniconfault { + panicmem() + } + print("unexpected fault address ", hex(g.sigcode1), "\n") + gothrow("fault") + case _EXCEPTION_INT_DIVIDE_BY_ZERO: + panicdivide() + case _EXCEPTION_INT_OVERFLOW: + panicoverflow() + case _EXCEPTION_FLT_DENORMAL_OPERAND, + _EXCEPTION_FLT_DIVIDE_BY_ZERO, + _EXCEPTION_FLT_INEXACT_RESULT, + _EXCEPTION_FLT_OVERFLOW, + _EXCEPTION_FLT_UNDERFLOW: + panicfloat() + } + gothrow("fault") +} diff --git a/libgo/go/runtime/os_windows_386.go b/libgo/go/runtime/os_windows_386.go new file mode 100644 index 00000000000..86a1906c0c9 --- /dev/null +++ b/libgo/go/runtime/os_windows_386.go @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +// contextPC returns the EIP (program counter) register from the context. +func contextPC(r *context) uintptr { return uintptr(r.eip) } + +// contextSP returns the ESP (stack pointer) register from the context. +func contextSP(r *context) uintptr { return uintptr(r.esp) } diff --git a/libgo/go/runtime/os_windows_amd64.go b/libgo/go/runtime/os_windows_amd64.go new file mode 100644 index 00000000000..3f4d4d07cbf --- /dev/null +++ b/libgo/go/runtime/os_windows_amd64.go @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +// contextPC returns the RIP (program counter) register from the context. +func contextPC(r *context) uintptr { return uintptr(r.rip) } + +// contextSP returns the RSP (stack pointer) register from the context. +func contextSP(r *context) uintptr { return uintptr(r.rsp) } diff --git a/libgo/go/runtime/panic.go b/libgo/go/runtime/panic.go new file mode 100644 index 00000000000..685ff5ca0bc --- /dev/null +++ b/libgo/go/runtime/panic.go @@ -0,0 +1,505 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +var indexError = error(errorString("index out of range")) + +func panicindex() { + panic(indexError) +} + +var sliceError = error(errorString("slice bounds out of range")) + +func panicslice() { + panic(sliceError) +} + +var divideError = error(errorString("integer divide by zero")) + +func panicdivide() { + panic(divideError) +} + +var overflowError = error(errorString("integer overflow")) + +func panicoverflow() { + panic(overflowError) +} + +var floatError = error(errorString("floating point error")) + +func panicfloat() { + panic(floatError) +} + +var memoryError = error(errorString("invalid memory address or nil pointer dereference")) + +func panicmem() { + panic(memoryError) +} + +func throwreturn() { + gothrow("no return at end of a typed function - compiler is broken") +} + +func throwinit() { + gothrow("recursive call during initialization - linker skew") +} + +// Create a new deferred function fn with siz bytes of arguments. +// The compiler turns a defer statement into a call to this. +//go:nosplit +func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn + // the arguments of fn are in a perilous state. The stack map + // for deferproc does not describe them. So we can't let garbage + // collection or stack copying trigger until we've copied them out + // to somewhere safe. deferproc_m does that. Until deferproc_m, + // we can only call nosplit routines. + argp := uintptr(unsafe.Pointer(&fn)) + argp += unsafe.Sizeof(fn) + if GOARCH == "arm" { + argp += ptrSize // skip caller's saved link register + } + mp := acquirem() + mp.scalararg[0] = uintptr(siz) + mp.ptrarg[0] = unsafe.Pointer(fn) + mp.scalararg[1] = argp + mp.scalararg[2] = getcallerpc(unsafe.Pointer(&siz)) + + if mp.curg != getg() { + // go code on the m stack can't defer + gothrow("defer on m") + } + + onM(deferproc_m) + + releasem(mp) + + // deferproc returns 0 normally. + // a deferred func that stops a panic + // makes the deferproc return 1. + // the code the compiler generates always + // checks the return value and jumps to the + // end of the function if deferproc returns != 0. + return0() + // No code can go here - the C return register has + // been set and must not be clobbered. +} + +// Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ... +// Each P holds a pool for defers with small arg sizes. +// Assign defer allocations to pools by rounding to 16, to match malloc size classes. + +const ( + deferHeaderSize = unsafe.Sizeof(_defer{}) + minDeferAlloc = (deferHeaderSize + 15) &^ 15 + minDeferArgs = minDeferAlloc - deferHeaderSize +) + +// defer size class for arg size sz +//go:nosplit +func deferclass(siz uintptr) uintptr { + if siz <= minDeferArgs { + return 0 + } + return (siz - minDeferArgs + 15) / 16 +} + +// total size of memory block for defer with arg size sz +func totaldefersize(siz uintptr) uintptr { + if siz <= minDeferArgs { + return minDeferAlloc + } + return deferHeaderSize + siz +} + +// Ensure that defer arg sizes that map to the same defer size class +// also map to the same malloc size class. +func testdefersizes() { + var m [len(p{}.deferpool)]int32 + + for i := range m { + m[i] = -1 + } + for i := uintptr(0); ; i++ { + defersc := deferclass(i) + if defersc >= uintptr(len(m)) { + break + } + siz := goroundupsize(totaldefersize(i)) + if m[defersc] < 0 { + m[defersc] = int32(siz) + continue + } + if m[defersc] != int32(siz) { + print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n") + gothrow("bad defer size class") + } + } +} + +// The arguments associated with a deferred call are stored +// immediately after the _defer header in memory. +//go:nosplit +func deferArgs(d *_defer) unsafe.Pointer { + return add(unsafe.Pointer(d), unsafe.Sizeof(*d)) +} + +var deferType *_type // type of _defer struct + +func init() { + var x interface{} + x = (*_defer)(nil) + deferType = (*(**ptrtype)(unsafe.Pointer(&x))).elem +} + +// Allocate a Defer, usually using per-P pool. +// Each defer must be released with freedefer. +// Note: runs on M stack +func newdefer(siz int32) *_defer { + var d *_defer + sc := deferclass(uintptr(siz)) + mp := acquirem() + if sc < uintptr(len(p{}.deferpool)) { + pp := mp.p + d = pp.deferpool[sc] + if d != nil { + pp.deferpool[sc] = d.link + } + } + if d == nil { + // Allocate new defer+args. + total := goroundupsize(totaldefersize(uintptr(siz))) + d = (*_defer)(mallocgc(total, deferType, 0)) + } + d.siz = siz + gp := mp.curg + d.link = gp._defer + gp._defer = d + releasem(mp) + return d +} + +// Free the given defer. +// The defer cannot be used after this call. +//go:nosplit +func freedefer(d *_defer) { + if d._panic != nil { + freedeferpanic() + } + if d.fn != nil { + freedeferfn() + } + sc := deferclass(uintptr(d.siz)) + if sc < uintptr(len(p{}.deferpool)) { + mp := acquirem() + pp := mp.p + *d = _defer{} + d.link = pp.deferpool[sc] + pp.deferpool[sc] = d + releasem(mp) + } +} + +// Separate function so that it can split stack. +// Windows otherwise runs out of stack space. +func freedeferpanic() { + // _panic must be cleared before d is unlinked from gp. + gothrow("freedefer with d._panic != nil") +} + +func freedeferfn() { + // fn must be cleared before d is unlinked from gp. + gothrow("freedefer with d.fn != nil") +} + +// Run a deferred function if there is one. +// The compiler inserts a call to this at the end of any +// function which calls defer. +// If there is a deferred function, this will call runtime·jmpdefer, +// which will jump to the deferred function such that it appears +// to have been called by the caller of deferreturn at the point +// just before deferreturn was called. The effect is that deferreturn +// is called again and again until there are no more deferred functions. +// Cannot split the stack because we reuse the caller's frame to +// call the deferred function. + +// The single argument isn't actually used - it just has its address +// taken so it can be matched against pending defers. +//go:nosplit +func deferreturn(arg0 uintptr) { + gp := getg() + d := gp._defer + if d == nil { + return + } + argp := uintptr(unsafe.Pointer(&arg0)) + if d.argp != argp { + return + } + + // Moving arguments around. + // Do not allow preemption here, because the garbage collector + // won't know the form of the arguments until the jmpdefer can + // flip the PC over to fn. + mp := acquirem() + memmove(unsafe.Pointer(argp), deferArgs(d), uintptr(d.siz)) + fn := d.fn + d.fn = nil + gp._defer = d.link + freedefer(d) + releasem(mp) + jmpdefer(fn, argp) +} + +// Goexit terminates the goroutine that calls it. No other goroutine is affected. +// Goexit runs all deferred calls before terminating the goroutine. Because Goexit +// is not panic, however, any recover calls in those deferred functions will return nil. +// +// Calling Goexit from the main goroutine terminates that goroutine +// without func main returning. Since func main has not returned, +// the program continues execution of other goroutines. +// If all other goroutines exit, the program crashes. +func Goexit() { + // Run all deferred functions for the current goroutine. + // This code is similar to gopanic, see that implementation + // for detailed comments. + gp := getg() + for { + d := gp._defer + if d == nil { + break + } + if d.started { + if d._panic != nil { + d._panic.aborted = true + d._panic = nil + } + d.fn = nil + gp._defer = d.link + freedefer(d) + continue + } + d.started = true + reflectcall(unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz)) + if gp._defer != d { + gothrow("bad defer entry in Goexit") + } + d._panic = nil + d.fn = nil + gp._defer = d.link + freedefer(d) + // Note: we ignore recovers here because Goexit isn't a panic + } + goexit() +} + +func canpanic(*g) bool + +// Print all currently active panics. Used when crashing. +func printpanics(p *_panic) { + if p.link != nil { + printpanics(p.link) + print("\t") + } + print("panic: ") + printany(p.arg) + if p.recovered { + print(" [recovered]") + } + print("\n") +} + +// The implementation of the predeclared function panic. +func gopanic(e interface{}) { + gp := getg() + if gp.m.curg != gp { + gothrow("panic on m stack") + } + + // m.softfloat is set during software floating point. + // It increments m.locks to avoid preemption. + // We moved the memory loads out, so there shouldn't be + // any reason for it to panic anymore. + if gp.m.softfloat != 0 { + gp.m.locks-- + gp.m.softfloat = 0 + gothrow("panic during softfloat") + } + if gp.m.mallocing != 0 { + print("panic: ") + printany(e) + print("\n") + gothrow("panic during malloc") + } + if gp.m.gcing != 0 { + print("panic: ") + printany(e) + print("\n") + gothrow("panic during gc") + } + if gp.m.locks != 0 { + print("panic: ") + printany(e) + print("\n") + gothrow("panic holding locks") + } + + var p _panic + p.arg = e + p.link = gp._panic + gp._panic = (*_panic)(noescape(unsafe.Pointer(&p))) + + for { + d := gp._defer + if d == nil { + break + } + + // If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic), + // take defer off list. The earlier panic or Goexit will not continue running. + if d.started { + if d._panic != nil { + d._panic.aborted = true + } + d._panic = nil + d.fn = nil + gp._defer = d.link + freedefer(d) + continue + } + + // Mark defer as started, but keep on list, so that traceback + // can find and update the defer's argument frame if stack growth + // or a garbage collection hapens before reflectcall starts executing d.fn. + d.started = true + + // Record the panic that is running the defer. + // If there is a new panic during the deferred call, that panic + // will find d in the list and will mark d._panic (this panic) aborted. + d._panic = (*_panic)(noescape((unsafe.Pointer)(&p))) + + p.argp = unsafe.Pointer(getargp(0)) + reflectcall(unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz)) + p.argp = nil + + // reflectcall did not panic. Remove d. + if gp._defer != d { + gothrow("bad defer entry in panic") + } + d._panic = nil + d.fn = nil + gp._defer = d.link + + // trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic + //GC() + + pc := d.pc + argp := unsafe.Pointer(d.argp) // must be pointer so it gets adjusted during stack copy + freedefer(d) + if p.recovered { + gp._panic = p.link + // Aborted panics are marked but remain on the g.panic list. + // Remove them from the list. + for gp._panic != nil && gp._panic.aborted { + gp._panic = gp._panic.link + } + if gp._panic == nil { // must be done with signal + gp.sig = 0 + } + // Pass information about recovering frame to recovery. + gp.sigcode0 = uintptr(argp) + gp.sigcode1 = pc + mcall(recovery_m) + gothrow("recovery failed") // mcall should not return + } + } + + // ran out of deferred calls - old-school panic now + startpanic() + printpanics(gp._panic) + dopanic(0) // should not return + *(*int)(nil) = 0 // not reached +} + +// getargp returns the location where the caller +// writes outgoing function call arguments. +//go:nosplit +func getargp(x int) uintptr { + // x is an argument mainly so that we can return its address. + // However, we need to make the function complex enough + // that it won't be inlined. We always pass x = 0, so this code + // does nothing other than keep the compiler from thinking + // the function is simple enough to inline. + if x > 0 { + return getcallersp(unsafe.Pointer(&x)) * 0 + } + return uintptr(noescape(unsafe.Pointer(&x))) +} + +// The implementation of the predeclared function recover. +// Cannot split the stack because it needs to reliably +// find the stack segment of its caller. +// +// TODO(rsc): Once we commit to CopyStackAlways, +// this doesn't need to be nosplit. +//go:nosplit +func gorecover(argp uintptr) interface{} { + // Must be in a function running as part of a deferred call during the panic. + // Must be called from the topmost function of the call + // (the function used in the defer statement). + // p.argp is the argument pointer of that topmost deferred function call. + // Compare against argp reported by caller. + // If they match, the caller is the one who can recover. + gp := getg() + p := gp._panic + if p != nil && !p.recovered && argp == uintptr(p.argp) { + p.recovered = true + return p.arg + } + return nil +} + +//go:nosplit +func startpanic() { + onM_signalok(startpanic_m) +} + +//go:nosplit +func dopanic(unused int) { + gp := getg() + mp := acquirem() + mp.ptrarg[0] = unsafe.Pointer(gp) + mp.scalararg[0] = getcallerpc((unsafe.Pointer)(&unused)) + mp.scalararg[1] = getcallersp((unsafe.Pointer)(&unused)) + onM_signalok(dopanic_m) // should never return + *(*int)(nil) = 0 +} + +//go:nosplit +func throw(s *byte) { + gp := getg() + if gp.m.throwing == 0 { + gp.m.throwing = 1 + } + startpanic() + print("fatal error: ", gostringnocopy(s), "\n") + dopanic(0) + *(*int)(nil) = 0 // not reached +} + +//go:nosplit +func gothrow(s string) { + gp := getg() + if gp.m.throwing == 0 { + gp.m.throwing = 1 + } + startpanic() + print("fatal error: ", s, "\n") + dopanic(0) + *(*int)(nil) = 0 // not reached +} diff --git a/libgo/go/runtime/pprof/mprof_test.go b/libgo/go/runtime/pprof/mprof_test.go new file mode 100644 index 00000000000..d4a948563c6 --- /dev/null +++ b/libgo/go/runtime/pprof/mprof_test.go @@ -0,0 +1,99 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pprof_test + +import ( + "bytes" + "fmt" + "regexp" + "runtime" + . "runtime/pprof" + "testing" + "unsafe" +) + +var memSink interface{} + +func allocateTransient1M() { + for i := 0; i < 1024; i++ { + memSink = &struct{ x [1024]byte }{} + } +} + +func allocateTransient2M() { + // prevent inlining + if memSink == nil { + panic("bad") + } + memSink = make([]byte, 2<<20) +} + +type Obj32 struct { + link *Obj32 + pad [32 - unsafe.Sizeof(uintptr(0))]byte +} + +var persistentMemSink *Obj32 + +func allocatePersistent1K() { + for i := 0; i < 32; i++ { + // Can't use slice because that will introduce implicit allocations. + obj := &Obj32{link: persistentMemSink} + persistentMemSink = obj + } +} + +var memoryProfilerRun = 0 + +func TestMemoryProfiler(t *testing.T) { + // Disable sampling, otherwise it's difficult to assert anything. + oldRate := runtime.MemProfileRate + runtime.MemProfileRate = 1 + defer func() { + runtime.MemProfileRate = oldRate + }() + + // Allocate a meg to ensure that mcache.next_sample is updated to 1. + for i := 0; i < 1024; i++ { + memSink = make([]byte, 1024) + } + + // Do the interesting allocations. + allocateTransient1M() + allocateTransient2M() + allocatePersistent1K() + memSink = nil + + runtime.GC() // materialize stats + var buf bytes.Buffer + if err := Lookup("heap").WriteTo(&buf, 1); err != nil { + t.Fatalf("failed to write heap profile: %v", err) + } + + memoryProfilerRun++ + + tests := []string{ + fmt.Sprintf(`%v: %v \[%v: %v\] @ 0x[0-9,a-f x]+ +# 0x[0-9,a-f]+ pprof_test\.allocatePersistent1K\+0x[0-9,a-f]+ .*/mprof_test\.go:43 +# 0x[0-9,a-f]+ runtime_pprof_test\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test\.go:66 +`, 32*memoryProfilerRun, 1024*memoryProfilerRun, 32*memoryProfilerRun, 1024*memoryProfilerRun), + + fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f x]+ +# 0x[0-9,a-f]+ pprof_test\.allocateTransient1M\+0x[0-9,a-f]+ .*/mprof_test.go:21 +# 0x[0-9,a-f]+ runtime_pprof_test\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:64 +`, (1<<10)*memoryProfilerRun, (1<<20)*memoryProfilerRun), + + fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f x]+ +# 0x[0-9,a-f]+ pprof_test\.allocateTransient2M\+0x[0-9,a-f]+ .*/mprof_test.go:30 +# 0x[0-9,a-f]+ runtime_pprof_test\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:65 +`, memoryProfilerRun, (2<<20)*memoryProfilerRun), + } + + for _, test := range tests { + if !regexp.MustCompile(test).Match(buf.Bytes()) { + t.Fatalf("The entry did not match:\n%v\n\nProfile:\n%v\n", test, buf.String()) + } + } +} diff --git a/libgo/go/runtime/pprof/pprof.go b/libgo/go/runtime/pprof/pprof.go index bd0b25f0da7..9c63ccd90b5 100644 --- a/libgo/go/runtime/pprof/pprof.go +++ b/libgo/go/runtime/pprof/pprof.go @@ -345,7 +345,10 @@ func printStackRecord(w io.Writer, stk []uintptr, allFrames bool) { // Hide runtime.goexit and any runtime functions at the beginning. // This is useful mainly for allocation traces. wasPanic = name == "runtime.panic" - if name == "runtime.goexit" || !show && strings.HasPrefix(name, "runtime.") { + if name == "runtime.goexit" || !show && (strings.HasPrefix(name, "runtime.") || strings.HasPrefix(name, "runtime_")) { + continue + } + if !show && !strings.Contains(name, ".") && strings.HasPrefix(name, "__go_") { continue } show = true @@ -579,12 +582,6 @@ func StartCPUProfile(w io.Writer) error { // each client to specify the frequency, we hard code it. const hz = 100 - // Avoid queueing behind StopCPUProfile. - // Could use TryLock instead if we had it. - if cpu.profiling { - return fmt.Errorf("cpu profiling already in use") - } - cpu.Lock() defer cpu.Unlock() if cpu.done == nil { diff --git a/libgo/go/runtime/pprof/pprof_test.go b/libgo/go/runtime/pprof/pprof_test.go index f714472fd55..10699637205 100644 --- a/libgo/go/runtime/pprof/pprof_test.go +++ b/libgo/go/runtime/pprof/pprof_test.go @@ -9,7 +9,6 @@ package pprof_test import ( "bytes" "fmt" - "hash/crc32" "math/big" "os/exec" "regexp" @@ -22,35 +21,65 @@ import ( "unsafe" ) -func TestCPUProfile(t *testing.T) { - buf := make([]byte, 100000) - testCPUProfile(t, []string{"crc32.update"}, func() { - // This loop takes about a quarter second on a 2 GHz laptop. - // We only need to get one 100 Hz clock tick, so we've got - // a 25x safety buffer. - for i := 0; i < 1000; i++ { - crc32.ChecksumIEEE(buf) +func cpuHogger(f func()) { + // We only need to get one 100 Hz clock tick, so we've got + // a 25x safety buffer. + // But do at least 500 iterations (which should take about 100ms), + // otherwise TestCPUProfileMultithreaded can fail if only one + // thread is scheduled during the 250ms period. + t0 := time.Now() + for i := 0; i < 500 || time.Since(t0) < 250*time.Millisecond; i++ { + f() + } +} + +var ( + salt1 = 0 + salt2 = 0 +) + +// The actual CPU hogging function. +// Must not call other functions nor access heap/globals in the loop, +// otherwise under race detector the samples will be in the race runtime. +func cpuHog1() { + foo := salt1 + for i := 0; i < 1e5; i++ { + if foo > 0 { + foo *= foo + } else { + foo *= foo + 1 } + } + salt1 = foo +} + +func cpuHog2() { + foo := salt2 + for i := 0; i < 1e5; i++ { + if foo > 0 { + foo *= foo + } else { + foo *= foo + 2 + } + } + salt2 = foo +} + +func TestCPUProfile(t *testing.T) { + testCPUProfile(t, []string{"pprof_test.cpuHog1"}, func() { + cpuHogger(cpuHog1) }) } func TestCPUProfileMultithreaded(t *testing.T) { - buf := make([]byte, 100000) defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) - testCPUProfile(t, []string{"crc32.update"}, func() { + testCPUProfile(t, []string{"pprof_test.cpuHog1", "pprof_test.cpuHog2"}, func() { c := make(chan int) go func() { - for i := 0; i < 2000; i++ { - crc32.Update(0, crc32.IEEETable, buf) - } + cpuHogger(cpuHog1) c <- 1 }() - // This loop takes about a quarter second on a 2 GHz laptop. - // We only need to get one 100 Hz clock tick, so we've got - // a 25x safety buffer. - for i := 0; i < 2000; i++ { - crc32.ChecksumIEEE(buf) - } + cpuHogger(cpuHog2) <-c }) } @@ -110,7 +139,7 @@ func testCPUProfile(t *testing.T, need []string, f func()) { f() StopCPUProfile() - // Check that profile is well formed and contains ChecksumIEEE. + // Check that profile is well formed and contains need. have := make([]uintptr, len(need)) parseProfile(t, prof.Bytes(), func(count uintptr, stk []uintptr) { for _, pc := range stk { @@ -118,6 +147,7 @@ func testCPUProfile(t *testing.T, need []string, f func()) { if f == nil { continue } + t.Log(f.Name(), count) for i, name := range need { if strings.Contains(f.Name(), name) { have[i] += count @@ -220,7 +250,7 @@ func TestGoroutineSwitch(t *testing.T) { // exists to record a PC without a traceback. Those are okay. if len(stk) == 2 { f := runtime.FuncForPC(stk[1]) - if f != nil && (f.Name() == "System" || f.Name() == "ExternalCode") { + if f != nil && (f.Name() == "System" || f.Name() == "ExternalCode" || f.Name() == "GC") { return } } @@ -282,39 +312,45 @@ func TestBlockProfile(t *testing.T) { tests := [...]TestCase{ {"chan recv", blockChanRecv, ` [0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ -# 0x[0-9,a-f]+ runtime\.chanrecv1\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.goc:[0-9]+ -# 0x[0-9,a-f]+ runtime/pprof_test\.blockChanRecv\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+ -# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+ +# 0x[0-9,a-f]+ runtime\.chanrecv1\+0x[0-9,a-f]+ .*/src/runtime/chan.go:[0-9]+ +# 0x[0-9,a-f]+ runtime/pprof_test\.blockChanRecv\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ +# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ `}, {"chan send", blockChanSend, ` [0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ -# 0x[0-9,a-f]+ runtime\.chansend1\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.goc:[0-9]+ -# 0x[0-9,a-f]+ runtime/pprof_test\.blockChanSend\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+ -# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+ +# 0x[0-9,a-f]+ runtime\.chansend1\+0x[0-9,a-f]+ .*/src/runtime/chan.go:[0-9]+ +# 0x[0-9,a-f]+ runtime/pprof_test\.blockChanSend\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ +# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ `}, {"chan close", blockChanClose, ` [0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ -# 0x[0-9,a-f]+ runtime\.chanrecv1\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.goc:[0-9]+ -# 0x[0-9,a-f]+ runtime/pprof_test\.blockChanClose\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+ -# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+ +# 0x[0-9,a-f]+ runtime\.chanrecv1\+0x[0-9,a-f]+ .*/src/runtime/chan.go:[0-9]+ +# 0x[0-9,a-f]+ runtime/pprof_test\.blockChanClose\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ +# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ `}, {"select recv async", blockSelectRecvAsync, ` [0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ -# 0x[0-9,a-f]+ runtime\.selectgo\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.goc:[0-9]+ -# 0x[0-9,a-f]+ runtime/pprof_test\.blockSelectRecvAsync\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+ -# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+ +# 0x[0-9,a-f]+ runtime\.selectgo\+0x[0-9,a-f]+ .*/src/runtime/select.go:[0-9]+ +# 0x[0-9,a-f]+ runtime/pprof_test\.blockSelectRecvAsync\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ +# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ `}, {"select send sync", blockSelectSendSync, ` [0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ -# 0x[0-9,a-f]+ runtime\.selectgo\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.goc:[0-9]+ -# 0x[0-9,a-f]+ runtime/pprof_test\.blockSelectSendSync\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+ -# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+ +# 0x[0-9,a-f]+ runtime\.selectgo\+0x[0-9,a-f]+ .*/src/runtime/select.go:[0-9]+ +# 0x[0-9,a-f]+ runtime/pprof_test\.blockSelectSendSync\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ +# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ `}, {"mutex", blockMutex, ` [0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ -# 0x[0-9,a-f]+ sync\.\(\*Mutex\)\.Lock\+0x[0-9,a-f]+ .*/src/pkg/sync/mutex\.go:[0-9]+ -# 0x[0-9,a-f]+ runtime/pprof_test\.blockMutex\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+ -# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+ +# 0x[0-9,a-f]+ sync\.\(\*Mutex\)\.Lock\+0x[0-9,a-f]+ .*/src/sync/mutex\.go:[0-9]+ +# 0x[0-9,a-f]+ runtime/pprof_test\.blockMutex\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ +# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ +`}, + {"cond", blockCond, ` +[0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ +# 0x[0-9,a-f]+ sync\.\(\*Cond\)\.Wait\+0x[0-9,a-f]+ .*/src/sync/cond\.go:[0-9]+ +# 0x[0-9,a-f]+ runtime/pprof_test\.blockCond\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ +# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ `}, } @@ -402,3 +438,17 @@ func blockMutex() { }() mu.Lock() } + +func blockCond() { + var mu sync.Mutex + c := sync.NewCond(&mu) + mu.Lock() + go func() { + time.Sleep(blockDelay) + mu.Lock() + c.Signal() + mu.Unlock() + }() + c.Wait() + mu.Unlock() +} diff --git a/libgo/go/runtime/print1.go b/libgo/go/runtime/print1.go new file mode 100644 index 00000000000..8f8268873b2 --- /dev/null +++ b/libgo/go/runtime/print1.go @@ -0,0 +1,323 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +// The compiler knows that a print of a value of this type +// should use printhex instead of printuint (decimal). +type hex uint64 + +func bytes(s string) (ret []byte) { + rp := (*slice)(unsafe.Pointer(&ret)) + sp := (*_string)(noescape(unsafe.Pointer(&s))) + rp.array = sp.str + rp.len = uint(sp.len) + rp.cap = uint(sp.len) + return +} + +// printf is only called from C code. It has no type information for the args, +// but C stacks are ignored by the garbage collector anyway, so having +// type information would not add anything. +//go:nosplit +func printf(s *byte) { + vprintf(gostringnocopy(s), add(unsafe.Pointer(&s), unsafe.Sizeof(s))) +} + +// sprintf is only called from C code. It has no type information for the args, +// but C stacks are ignored by the garbage collector anyway, so having +// type information would not add anything. +//go:nosplit +func snprintf(dst *byte, n int32, s *byte) { + buf := (*[1 << 30]byte)(unsafe.Pointer(dst))[0:n:n] + + gp := getg() + gp.writebuf = buf[0:0 : n-1] // leave room for NUL, this is called from C + vprintf(gostringnocopy(s), add(unsafe.Pointer(&s), unsafe.Sizeof(s))) + buf[len(gp.writebuf)] = '\x00' + gp.writebuf = nil +} + +//var debuglock mutex + +// write to goroutine-local buffer if diverting output, +// or else standard error. +func gwrite(b []byte) { + if len(b) == 0 { + return + } + gp := getg() + if gp == nil || gp.writebuf == nil { + write(2, unsafe.Pointer(&b[0]), int32(len(b))) + return + } + + n := copy(gp.writebuf[len(gp.writebuf):cap(gp.writebuf)], b) + gp.writebuf = gp.writebuf[:len(gp.writebuf)+n] +} + +func prints(s *byte) { + b := (*[1 << 30]byte)(unsafe.Pointer(s)) + for i := 0; ; i++ { + if b[i] == 0 { + gwrite(b[:i]) + return + } + } +} + +func printsp() { + print(" ") +} + +func printnl() { + print("\n") +} + +// Very simple printf. Only for debugging prints. +// Do not add to this without checking with Rob. +func vprintf(str string, arg unsafe.Pointer) { + //lock(&debuglock); + + s := bytes(str) + start := 0 + i := 0 + for ; i < len(s); i++ { + if s[i] != '%' { + continue + } + if i > start { + gwrite(s[start:i]) + } + if i++; i >= len(s) { + break + } + var siz uintptr + switch s[i] { + case 't', 'c': + siz = 1 + case 'd', 'x': // 32-bit + arg = roundup(arg, 4) + siz = 4 + case 'D', 'U', 'X', 'f': // 64-bit + arg = roundup(arg, unsafe.Sizeof(uintreg(0))) + siz = 8 + case 'C': + arg = roundup(arg, unsafe.Sizeof(uintreg(0))) + siz = 16 + case 'p', 's': // pointer-sized + arg = roundup(arg, unsafe.Sizeof(uintptr(0))) + siz = unsafe.Sizeof(uintptr(0)) + case 'S': // pointer-aligned but bigger + arg = roundup(arg, unsafe.Sizeof(uintptr(0))) + siz = unsafe.Sizeof(string("")) + case 'a': // pointer-aligned but bigger + arg = roundup(arg, unsafe.Sizeof(uintptr(0))) + siz = unsafe.Sizeof([]byte{}) + case 'i', 'e': // pointer-aligned but bigger + arg = roundup(arg, unsafe.Sizeof(uintptr(0))) + siz = unsafe.Sizeof(interface{}(nil)) + } + switch s[i] { + case 'a': + printslice(*(*[]byte)(arg)) + case 'c': + printbyte(*(*byte)(arg)) + case 'd': + printint(int64(*(*int32)(arg))) + case 'D': + printint(int64(*(*int64)(arg))) + case 'e': + printeface(*(*interface{})(arg)) + case 'f': + printfloat(*(*float64)(arg)) + case 'C': + printcomplex(*(*complex128)(arg)) + case 'i': + printiface(*(*fInterface)(arg)) + case 'p': + printpointer(*(*unsafe.Pointer)(arg)) + case 's': + prints(*(**byte)(arg)) + case 'S': + printstring(*(*string)(arg)) + case 't': + printbool(*(*bool)(arg)) + case 'U': + printuint(*(*uint64)(arg)) + case 'x': + printhex(uint64(*(*uint32)(arg))) + case 'X': + printhex(*(*uint64)(arg)) + } + arg = add(arg, siz) + start = i + 1 + } + if start < i { + gwrite(s[start:i]) + } + + //unlock(&debuglock); +} + +func printpc(p unsafe.Pointer) { + print("PC=", hex(uintptr(p))) +} + +func printbool(v bool) { + if v { + print("true") + } else { + print("false") + } +} + +func printbyte(c byte) { + gwrite((*[1]byte)(unsafe.Pointer(&c))[:]) +} + +func printfloat(v float64) { + switch { + case v != v: + print("NaN") + return + case v+v == v && v > 0: + print("+Inf") + return + case v+v == v && v < 0: + print("-Inf") + return + } + + const n = 7 // digits printed + var buf [n + 7]byte + buf[0] = '+' + e := 0 // exp + if v == 0 { + if 1/v < 0 { + buf[0] = '-' + } + } else { + if v < 0 { + v = -v + buf[0] = '-' + } + + // normalize + for v >= 10 { + e++ + v /= 10 + } + for v < 1 { + e-- + v *= 10 + } + + // round + h := 5.0 + for i := 0; i < n; i++ { + h /= 10 + } + v += h + if v >= 10 { + e++ + v /= 10 + } + } + + // format +d.dddd+edd + for i := 0; i < n; i++ { + s := int(v) + buf[i+2] = byte(s + '0') + v -= float64(s) + v *= 10 + } + buf[1] = buf[2] + buf[2] = '.' + + buf[n+2] = 'e' + buf[n+3] = '+' + if e < 0 { + e = -e + buf[n+3] = '-' + } + + buf[n+4] = byte(e/100) + '0' + buf[n+5] = byte(e/10)%10 + '0' + buf[n+6] = byte(e%10) + '0' + gwrite(buf[:]) +} + +func printcomplex(c complex128) { + print("(", real(c), imag(c), "i)") +} + +func printuint(v uint64) { + var buf [100]byte + i := len(buf) + for i--; i > 0; i-- { + buf[i] = byte(v%10 + '0') + if v < 10 { + break + } + v /= 10 + } + gwrite(buf[i:]) +} + +func printint(v int64) { + if v < 0 { + print("-") + v = -v + } + printuint(uint64(v)) +} + +func printhex(v uint64) { + const dig = "0123456789abcdef" + var buf [100]byte + i := len(buf) + for i--; i > 0; i-- { + buf[i] = dig[v%16] + if v < 16 { + break + } + v /= 16 + } + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + gwrite(buf[i:]) +} + +func printpointer(p unsafe.Pointer) { + printhex(uint64(uintptr(p))) +} + +func printstring(s string) { + if uintptr(len(s)) > maxstring { + gwrite(bytes("[string too long]")) + return + } + gwrite(bytes(s)) +} + +func printslice(s []byte) { + sp := (*slice)(unsafe.Pointer(&s)) + print("[", len(s), "/", cap(s), "]") + printpointer(unsafe.Pointer(sp.array)) +} + +func printeface(e interface{}) { + ep := (*eface)(unsafe.Pointer(&e)) + print("(", ep._type, ",", ep.data, ")") +} + +func printiface(i fInterface) { + ip := (*iface)(unsafe.Pointer(&i)) + print("(", ip.tab, ",", ip.data, ")") +} diff --git a/libgo/go/runtime/proc.go b/libgo/go/runtime/proc.go new file mode 100644 index 00000000000..517ca03df64 --- /dev/null +++ b/libgo/go/runtime/proc.go @@ -0,0 +1,246 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +func newsysmon() + +func runtime_init() +func main_init() +func main_main() + +// The main goroutine. +func main() { + g := getg() + + // Racectx of m0->g0 is used only as the parent of the main goroutine. + // It must not be used for anything else. + g.m.g0.racectx = 0 + + // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit. + // Using decimal instead of binary GB and MB because + // they look nicer in the stack overflow failure message. + if ptrSize == 8 { + maxstacksize = 1000000000 + } else { + maxstacksize = 250000000 + } + + onM(newsysmon) + + // Lock the main goroutine onto this, the main OS thread, + // during initialization. Most programs won't care, but a few + // do require certain calls to be made by the main thread. + // Those can arrange for main.main to run in the main thread + // by calling runtime.LockOSThread during initialization + // to preserve the lock. + lockOSThread() + + if g.m != &m0 { + gothrow("runtime.main not on m0") + } + + runtime_init() // must be before defer + + // Defer unlock so that runtime.Goexit during init does the unlock too. + needUnlock := true + defer func() { + if needUnlock { + unlockOSThread() + } + }() + + memstats.enablegc = true // now that runtime is initialized, GC is okay + + main_init() + + needUnlock = false + unlockOSThread() + + main_main() + if raceenabled { + racefini() + } + + // Make racy client program work: if panicking on + // another goroutine at the same time as main returns, + // let the other goroutine finish printing the panic trace. + // Once it does, it will exit. See issue 3934. + if panicking != 0 { + gopark(nil, nil, "panicwait") + } + + exit(0) + for { + var x *int32 + *x = 0 + } +} + +var parkunlock_c byte + +// start forcegc helper goroutine +func init() { + go forcegchelper() +} + +func forcegchelper() { + forcegc.g = getg() + forcegc.g.issystem = true + for { + lock(&forcegc.lock) + if forcegc.idle != 0 { + gothrow("forcegc: phase error") + } + atomicstore(&forcegc.idle, 1) + goparkunlock(&forcegc.lock, "force gc (idle)") + // this goroutine is explicitly resumed by sysmon + if debug.gctrace > 0 { + println("GC forced") + } + gogc(1) + } +} + +//go:nosplit + +// Gosched yields the processor, allowing other goroutines to run. It does not +// suspend the current goroutine, so execution resumes automatically. +func Gosched() { + mcall(gosched_m) +} + +// Puts the current goroutine into a waiting state and calls unlockf. +// If unlockf returns false, the goroutine is resumed. +func gopark(unlockf unsafe.Pointer, lock unsafe.Pointer, reason string) { + mp := acquirem() + gp := mp.curg + status := readgstatus(gp) + if status != _Grunning && status != _Gscanrunning { + gothrow("gopark: bad g status") + } + mp.waitlock = lock + mp.waitunlockf = unlockf + gp.waitreason = reason + releasem(mp) + // can't do anything that might move the G between Ms here. + mcall(park_m) +} + +// Puts the current goroutine into a waiting state and unlocks the lock. +// The goroutine can be made runnable again by calling goready(gp). +func goparkunlock(lock *mutex, reason string) { + gopark(unsafe.Pointer(&parkunlock_c), unsafe.Pointer(lock), reason) +} + +func goready(gp *g) { + mp := acquirem() + mp.ptrarg[0] = unsafe.Pointer(gp) + onM(ready_m) + releasem(mp) +} + +//go:nosplit +func acquireSudog() *sudog { + c := gomcache() + s := c.sudogcache + if s != nil { + if s.elem != nil { + gothrow("acquireSudog: found s.elem != nil in cache") + } + c.sudogcache = s.next + s.next = nil + return s + } + + // Delicate dance: the semaphore implementation calls + // acquireSudog, acquireSudog calls new(sudog), + // new calls malloc, malloc can call the garbage collector, + // and the garbage collector calls the semaphore implementation + // in stoptheworld. + // Break the cycle by doing acquirem/releasem around new(sudog). + // The acquirem/releasem increments m.locks during new(sudog), + // which keeps the garbage collector from being invoked. + mp := acquirem() + p := new(sudog) + releasem(mp) + return p +} + +//go:nosplit +func releaseSudog(s *sudog) { + if s.elem != nil { + gothrow("runtime: sudog with non-nil elem") + } + if s.selectdone != nil { + gothrow("runtime: sudog with non-nil selectdone") + } + if s.next != nil { + gothrow("runtime: sudog with non-nil next") + } + if s.prev != nil { + gothrow("runtime: sudog with non-nil prev") + } + if s.waitlink != nil { + gothrow("runtime: sudog with non-nil waitlink") + } + gp := getg() + if gp.param != nil { + gothrow("runtime: releaseSudog with non-nil gp.param") + } + c := gomcache() + s.next = c.sudogcache + c.sudogcache = s +} + +// funcPC returns the entry PC of the function f. +// It assumes that f is a func value. Otherwise the behavior is undefined. +//go:nosplit +func funcPC(f interface{}) uintptr { + return **(**uintptr)(add(unsafe.Pointer(&f), ptrSize)) +} + +// called from assembly +func badmcall(fn func(*g)) { + gothrow("runtime: mcall called on m->g0 stack") +} + +func badmcall2(fn func(*g)) { + gothrow("runtime: mcall function returned") +} + +func badreflectcall() { + panic("runtime: arg size to reflect.call more than 1GB") +} + +func lockedOSThread() bool { + gp := getg() + return gp.lockedm != nil && gp.m.lockedg != nil +} + +func newP() *p { + return new(p) +} + +func newM() *m { + return new(m) +} + +func newG() *g { + return new(g) +} + +func allgadd(gp *g) { + if readgstatus(gp) == _Gidle { + gothrow("allgadd: bad status Gidle") + } + + lock(&allglock) + allgs = append(allgs, gp) + allg = &allgs[0] + allglen = uintptr(len(allgs)) + unlock(&allglock) +} diff --git a/libgo/go/runtime/race0.go b/libgo/go/runtime/race0.go new file mode 100644 index 00000000000..5d90cc859a2 --- /dev/null +++ b/libgo/go/runtime/race0.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !race + +// Dummy race detection API, used when not built with -race. + +package runtime + +import ( + "unsafe" +) + +const raceenabled = false + +// Because raceenabled is false, none of these functions should be called. + +func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { gothrow("race") } +func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { gothrow("race") } +func raceinit() { gothrow("race") } +func racefini() { gothrow("race") } +func racemapshadow(addr unsafe.Pointer, size uintptr) { gothrow("race") } +func racewritepc(addr unsafe.Pointer, callerpc, pc uintptr) { gothrow("race") } +func racereadpc(addr unsafe.Pointer, callerpc, pc uintptr) { gothrow("race") } +func racereadrangepc(addr unsafe.Pointer, sz, callerpc, pc uintptr) { gothrow("race") } +func racewriterangepc(addr unsafe.Pointer, sz, callerpc, pc uintptr) { gothrow("race") } +func raceacquire(addr unsafe.Pointer) { gothrow("race") } +func raceacquireg(gp *g, addr unsafe.Pointer) { gothrow("race") } +func racerelease(addr unsafe.Pointer) { gothrow("race") } +func racereleaseg(gp *g, addr unsafe.Pointer) { gothrow("race") } +func racereleasemerge(addr unsafe.Pointer) { gothrow("race") } +func racereleasemergeg(gp *g, addr unsafe.Pointer) { gothrow("race") } +func racefingo() { gothrow("race") } +func racemalloc(p unsafe.Pointer, sz uintptr) { gothrow("race") } +func racegostart(pc uintptr) uintptr { gothrow("race"); return 0 } +func racegoend() { gothrow("race") } diff --git a/libgo/go/runtime/rdebug.go b/libgo/go/runtime/rdebug.go new file mode 100644 index 00000000000..e5e691122c6 --- /dev/null +++ b/libgo/go/runtime/rdebug.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +func setMaxStack(in int) (out int) { + out = int(maxstacksize) + maxstacksize = uintptr(in) + return out +} + +func setGCPercent(in int32) (out int32) { + mp := acquirem() + mp.scalararg[0] = uintptr(int(in)) + onM(setgcpercent_m) + out = int32(int(mp.scalararg[0])) + releasem(mp) + return out +} + +func setPanicOnFault(new bool) (old bool) { + mp := acquirem() + old = mp.curg.paniconfault + mp.curg.paniconfault = new + releasem(mp) + return old +} + +func setMaxThreads(in int) (out int) { + mp := acquirem() + mp.scalararg[0] = uintptr(in) + onM(setmaxthreads_m) + out = int(mp.scalararg[0]) + releasem(mp) + return out +} diff --git a/libgo/go/runtime/rune.go b/libgo/go/runtime/rune.go new file mode 100644 index 00000000000..a9f6835818d --- /dev/null +++ b/libgo/go/runtime/rune.go @@ -0,0 +1,219 @@ +/* + * The authors of this software are Rob Pike and Ken Thompson. + * Copyright (c) 2002 by Lucent Technologies. + * Portions Copyright 2009 The Go Authors. All rights reserved. + * Permission to use, copy, modify, and distribute this software for any + * purpose without fee is hereby granted, provided that this entire notice + * is included in all copies of any software which is or includes a copy + * or modification of this software and in all copies of the supporting + * documentation for such software. + * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED + * WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR LUCENT TECHNOLOGIES MAKE ANY + * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY + * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. + */ + +/* + * This code is copied, with slight editing due to type differences, + * from a subset of ../lib9/utf/rune.c + */ + +package runtime + +const ( + bit1 = 7 + bitx = 6 + bit2 = 5 + bit3 = 4 + bit4 = 3 + bit5 = 2 + + t1 = ((1 << (bit1 + 1)) - 1) ^ 0xFF /* 0000 0000 */ + tx = ((1 << (bitx + 1)) - 1) ^ 0xFF /* 1000 0000 */ + t2 = ((1 << (bit2 + 1)) - 1) ^ 0xFF /* 1100 0000 */ + t3 = ((1 << (bit3 + 1)) - 1) ^ 0xFF /* 1110 0000 */ + t4 = ((1 << (bit4 + 1)) - 1) ^ 0xFF /* 1111 0000 */ + t5 = ((1 << (bit5 + 1)) - 1) ^ 0xFF /* 1111 1000 */ + + rune1 = (1 << (bit1 + 0*bitx)) - 1 /* 0000 0000 0111 1111 */ + rune2 = (1 << (bit2 + 1*bitx)) - 1 /* 0000 0111 1111 1111 */ + rune3 = (1 << (bit3 + 2*bitx)) - 1 /* 1111 1111 1111 1111 */ + rune4 = (1 << (bit4 + 3*bitx)) - 1 /* 0001 1111 1111 1111 1111 1111 */ + + maskx = (1 << bitx) - 1 /* 0011 1111 */ + testx = maskx ^ 0xFF /* 1100 0000 */ + + runeerror = 0xFFFD + runeself = 0x80 + + surrogateMin = 0xD800 + surrogateMax = 0xDFFF + + bad = runeerror + + runemax = 0x10FFFF /* maximum rune value */ +) + +/* + * Modified by Wei-Hwa Huang, Google Inc., on 2004-09-24 + * This is a slower but "safe" version of the old chartorune + * that works on strings that are not necessarily null-terminated. + * + * If you know for sure that your string is null-terminated, + * chartorune will be a bit faster. + * + * It is guaranteed not to attempt to access "length" + * past the incoming pointer. This is to avoid + * possible access violations. If the string appears to be + * well-formed but incomplete (i.e., to get the whole Rune + * we'd need to read past str+length) then we'll set the Rune + * to Bad and return 0. + * + * Note that if we have decoding problems for other + * reasons, we return 1 instead of 0. + */ +func charntorune(s string) (rune, int) { + /* When we're not allowed to read anything */ + if len(s) <= 0 { + return bad, 1 + } + + /* + * one character sequence (7-bit value) + * 00000-0007F => T1 + */ + c := s[0] + if c < tx { + return rune(c), 1 + } + + // If we can't read more than one character we must stop + if len(s) <= 1 { + return bad, 1 + } + + /* + * two character sequence (11-bit value) + * 0080-07FF => t2 tx + */ + c1 := s[1] ^ tx + if (c1 & testx) != 0 { + return bad, 1 + } + if c < t3 { + if c < t2 { + return bad, 1 + } + l := ((rune(c) << bitx) | rune(c1)) & rune2 + if l <= rune1 { + return bad, 1 + } + return l, 2 + } + + // If we can't read more than two characters we must stop + if len(s) <= 2 { + return bad, 1 + } + + /* + * three character sequence (16-bit value) + * 0800-FFFF => t3 tx tx + */ + c2 := s[2] ^ tx + if (c2 & testx) != 0 { + return bad, 1 + } + if c < t4 { + l := ((((rune(c) << bitx) | rune(c1)) << bitx) | rune(c2)) & rune3 + if l <= rune2 { + return bad, 1 + } + if surrogateMin <= l && l <= surrogateMax { + return bad, 1 + } + return l, 3 + } + + if len(s) <= 3 { + return bad, 1 + } + + /* + * four character sequence (21-bit value) + * 10000-1FFFFF => t4 tx tx tx + */ + c3 := s[3] ^ tx + if (c3 & testx) != 0 { + return bad, 1 + } + if c < t5 { + l := ((((((rune(c) << bitx) | rune(c1)) << bitx) | rune(c2)) << bitx) | rune(c3)) & rune4 + if l <= rune3 || l > runemax { + return bad, 1 + } + return l, 4 + } + + // Support for 5-byte or longer UTF-8 would go here, but + // since we don't have that, we'll just return bad. + return bad, 1 +} + +// runetochar converts r to bytes and writes the result to str. +// returns the number of bytes generated. +func runetochar(str []byte, r rune) int { + /* runes are signed, so convert to unsigned for range check. */ + c := uint32(r) + /* + * one character sequence + * 00000-0007F => 00-7F + */ + if c <= rune1 { + str[0] = byte(c) + return 1 + } + /* + * two character sequence + * 0080-07FF => t2 tx + */ + if c <= rune2 { + str[0] = byte(t2 | (c >> (1 * bitx))) + str[1] = byte(tx | (c & maskx)) + return 2 + } + + /* + * If the rune is out of range or a surrogate half, convert it to the error rune. + * Do this test here because the error rune encodes to three bytes. + * Doing it earlier would duplicate work, since an out of range + * rune wouldn't have fit in one or two bytes. + */ + if c > runemax { + c = runeerror + } + if surrogateMin <= c && c <= surrogateMax { + c = runeerror + } + + /* + * three character sequence + * 0800-FFFF => t3 tx tx + */ + if c <= rune3 { + str[0] = byte(t3 | (c >> (2 * bitx))) + str[1] = byte(tx | ((c >> (1 * bitx)) & maskx)) + str[2] = byte(tx | (c & maskx)) + return 3 + } + + /* + * four character sequence (21-bit value) + * 10000-1FFFFF => t4 tx tx tx + */ + str[0] = byte(t4 | (c >> (3 * bitx))) + str[1] = byte(tx | ((c >> (2 * bitx)) & maskx)) + str[2] = byte(tx | ((c >> (1 * bitx)) & maskx)) + str[3] = byte(tx | (c & maskx)) + return 4 +} diff --git a/libgo/go/runtime/runtime.go b/libgo/go/runtime/runtime.go new file mode 100644 index 00000000000..4e4e1d17a5c --- /dev/null +++ b/libgo/go/runtime/runtime.go @@ -0,0 +1,60 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +var ticks struct { + lock mutex + val uint64 +} + +var tls0 [8]uintptr // available storage for m0's TLS; not necessarily used; opaque to GC + +// Note: Called by runtime/pprof in addition to runtime code. +func tickspersecond() int64 { + r := int64(atomicload64(&ticks.val)) + if r != 0 { + return r + } + lock(&ticks.lock) + r = int64(ticks.val) + if r == 0 { + t0 := nanotime() + c0 := cputicks() + usleep(100 * 1000) + t1 := nanotime() + c1 := cputicks() + if t1 == t0 { + t1++ + } + r = (c1 - c0) * 1000 * 1000 * 1000 / (t1 - t0) + if r == 0 { + r++ + } + atomicstore64(&ticks.val, uint64(r)) + } + unlock(&ticks.lock) + return r +} + +func makeStringSlice(n int) []string { + return make([]string, n) +} + +// TODO: Move to parfor.go when parfor.c becomes parfor.go. +func parforalloc(nthrmax uint32) *parfor { + return &parfor{ + thr: &make([]parforthread, nthrmax)[0], + nthrmax: nthrmax, + } +} + +var envs []string +var argslice []string + +// called from syscall +func runtime_envs() []string { return envs } + +// called from os +func runtime_args() []string { return argslice } diff --git a/libgo/go/runtime/runtime_test.go b/libgo/go/runtime/runtime_test.go index 5c504675968..8059d1ad9a1 100644 --- a/libgo/go/runtime/runtime_test.go +++ b/libgo/go/runtime/runtime_test.go @@ -97,8 +97,9 @@ func BenchmarkDeferMany(b *testing.B) { // The value reported will include the padding between runtime.gogo and the // next function in memory. That's fine. func TestRuntimeGogoBytes(t *testing.T) { - if GOOS == "nacl" { - t.Skip("skipping on nacl") + switch GOOS { + case "android", "nacl": + t.Skipf("skipping on %s", GOOS) } dir, err := ioutil.TempDir("", "go-build") @@ -107,7 +108,7 @@ func TestRuntimeGogoBytes(t *testing.T) { } defer os.RemoveAll(dir) - out, err := exec.Command("go", "build", "-o", dir+"/hello", "../../../test/helloworld.go").CombinedOutput() + out, err := exec.Command("go", "build", "-o", dir+"/hello", "../../test/helloworld.go").CombinedOutput() if err != nil { t.Fatalf("building hello world: %v\n%s", err, out) } @@ -159,8 +160,8 @@ var faultAddrs = []uint64{ // or else malformed. 0xffffffffffffffff, 0xfffffffffffff001, - // no 0xffffffffffff0001; 0xffff0001 is mapped for 32-bit user space on OS X - // no 0xfffffffffff00001; 0xfff00001 is mapped for 32-bit user space sometimes on Linux + 0xffffffffffff0001, + 0xfffffffffff00001, 0xffffffffff000001, 0xfffffffff0000001, 0xffffffff00000001, @@ -184,29 +185,68 @@ func TestSetPanicOnFault(t *testing.T) { old := debug.SetPanicOnFault(true) defer debug.SetPanicOnFault(old) + nfault := 0 for _, addr := range faultAddrs { - if Compiler == "gccgo" && GOARCH == "386" && (addr&0xff000000) != 0 { - // On gccgo these addresses can be used for - // the thread stack. - continue - } - testSetPanicOnFault(t, uintptr(addr)) + testSetPanicOnFault(t, uintptr(addr), &nfault) + } + if nfault == 0 { + t.Fatalf("none of the addresses faulted") } } -func testSetPanicOnFault(t *testing.T, addr uintptr) { +func testSetPanicOnFault(t *testing.T, addr uintptr, nfault *int) { if GOOS == "nacl" { t.Skip("nacl doesn't seem to fault on high addresses") } defer func() { - if err := recover(); err == nil { - t.Fatalf("did not find error in recover") + if err := recover(); err != nil { + *nfault++ } }() - var p *int - p = (*int)(unsafe.Pointer(addr)) - println(*p) - t.Fatalf("still here - should have faulted on address %#x", addr) + // The read should fault, except that sometimes we hit + // addresses that have had C or kernel pages mapped there + // readable by user code. So just log the content. + // If no addresses fault, we'll fail the test. + v := *(*byte)(unsafe.Pointer(addr)) + t.Logf("addr %#x: %#x\n", addr, v) +} + +func eqstring_generic(s1, s2 string) bool { + if len(s1) != len(s2) { + return false + } + // optimization in assembly versions: + // if s1.str == s2.str { return true } + for i := 0; i < len(s1); i++ { + if s1[i] != s2[i] { + return false + } + } + return true +} + +func TestEqString(t *testing.T) { + // This isn't really an exhaustive test of eqstring, it's + // just a convenient way of documenting (via eqstring_generic) + // what eqstring does. + s := []string{ + "", + "a", + "c", + "aaa", + "ccc", + "cccc"[:3], // same contents, different string + "1234567890", + } + for _, s1 := range s { + for _, s2 := range s { + x := s1 == s2 + y := eqstring_generic(s1, s2) + if x != y { + t.Errorf(`eqstring("%s","%s") = %t, want %t`, s1, s2, x, y) + } + } + } } diff --git a/libgo/go/runtime/select.go b/libgo/go/runtime/select.go new file mode 100644 index 00000000000..f735a71e2f5 --- /dev/null +++ b/libgo/go/runtime/select.go @@ -0,0 +1,651 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +// This file contains the implementation of Go select statements. + +import "unsafe" + +const ( + debugSelect = false +) + +var ( + chansendpc = funcPC(chansend) + chanrecvpc = funcPC(chanrecv) +) + +func selectsize(size uintptr) uintptr { + selsize := unsafe.Sizeof(_select{}) + + (size-1)*unsafe.Sizeof(_select{}.scase[0]) + + size*unsafe.Sizeof(*_select{}.lockorder) + + size*unsafe.Sizeof(*_select{}.pollorder) + return round(selsize, _Int64Align) +} + +func newselect(sel *_select, selsize int64, size int32) { + if selsize != int64(selectsize(uintptr(size))) { + print("runtime: bad select size ", selsize, ", want ", selectsize(uintptr(size)), "\n") + gothrow("bad select size") + } + sel.tcase = uint16(size) + sel.ncase = 0 + sel.lockorder = (**hchan)(add(unsafe.Pointer(&sel.scase), uintptr(size)*unsafe.Sizeof(_select{}.scase[0]))) + sel.pollorder = (*uint16)(add(unsafe.Pointer(sel.lockorder), uintptr(size)*unsafe.Sizeof(*_select{}.lockorder))) + + if debugSelect { + print("newselect s=", sel, " size=", size, "\n") + } +} + +//go:nosplit +func selectsend(sel *_select, c *hchan, elem unsafe.Pointer) (selected bool) { + // nil cases do not compete + if c != nil { + selectsendImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel))) + } + return +} + +// cut in half to give stack a chance to split +func selectsendImpl(sel *_select, c *hchan, pc uintptr, elem unsafe.Pointer, so uintptr) { + i := sel.ncase + if i >= sel.tcase { + gothrow("selectsend: too many cases") + } + sel.ncase = i + 1 + cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0]))) + + cas.pc = pc + cas._chan = c + cas.so = uint16(so) + cas.kind = _CaseSend + cas.elem = elem + + if debugSelect { + print("selectsend s=", sel, " pc=", hex(cas.pc), " chan=", cas._chan, " so=", cas.so, "\n") + } +} + +//go:nosplit +func selectrecv(sel *_select, c *hchan, elem unsafe.Pointer) (selected bool) { + // nil cases do not compete + if c != nil { + selectrecvImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, nil, uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel))) + } + return +} + +//go:nosplit +func selectrecv2(sel *_select, c *hchan, elem unsafe.Pointer, received *bool) (selected bool) { + // nil cases do not compete + if c != nil { + selectrecvImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, received, uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel))) + } + return +} + +func selectrecvImpl(sel *_select, c *hchan, pc uintptr, elem unsafe.Pointer, received *bool, so uintptr) { + i := sel.ncase + if i >= sel.tcase { + gothrow("selectrecv: too many cases") + } + sel.ncase = i + 1 + cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0]))) + cas.pc = pc + cas._chan = c + cas.so = uint16(so) + cas.kind = _CaseRecv + cas.elem = elem + cas.receivedp = received + + if debugSelect { + print("selectrecv s=", sel, " pc=", hex(cas.pc), " chan=", cas._chan, " so=", cas.so, "\n") + } +} + +//go:nosplit +func selectdefault(sel *_select) (selected bool) { + selectdefaultImpl(sel, getcallerpc(unsafe.Pointer(&sel)), uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel))) + return +} + +func selectdefaultImpl(sel *_select, callerpc uintptr, so uintptr) { + i := sel.ncase + if i >= sel.tcase { + gothrow("selectdefault: too many cases") + } + sel.ncase = i + 1 + cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0]))) + cas.pc = callerpc + cas._chan = nil + cas.so = uint16(so) + cas.kind = _CaseDefault + + if debugSelect { + print("selectdefault s=", sel, " pc=", hex(cas.pc), " so=", cas.so, "\n") + } +} + +func sellock(sel *_select) { + lockslice := sliceStruct{unsafe.Pointer(sel.lockorder), int(sel.ncase), int(sel.ncase)} + lockorder := *(*[]*hchan)(unsafe.Pointer(&lockslice)) + var c *hchan + for _, c0 := range lockorder { + if c0 != nil && c0 != c { + c = c0 + lock(&c.lock) + } + } +} + +func selunlock(sel *_select) { + // We must be very careful here to not touch sel after we have unlocked + // the last lock, because sel can be freed right after the last unlock. + // Consider the following situation. + // First M calls runtime·park() in runtime·selectgo() passing the sel. + // Once runtime·park() has unlocked the last lock, another M makes + // the G that calls select runnable again and schedules it for execution. + // When the G runs on another M, it locks all the locks and frees sel. + // Now if the first M touches sel, it will access freed memory. + n := int(sel.ncase) + r := 0 + lockslice := sliceStruct{unsafe.Pointer(sel.lockorder), n, n} + lockorder := *(*[]*hchan)(unsafe.Pointer(&lockslice)) + // skip the default case + if n > 0 && lockorder[0] == nil { + r = 1 + } + for i := n - 1; i >= r; i-- { + c := lockorder[i] + if i > 0 && c == lockorder[i-1] { + continue // will unlock it on the next iteration + } + unlock(&c.lock) + } +} + +func selparkcommit(gp *g, sel *_select) bool { + selunlock(sel) + return true +} + +func block() { + gopark(nil, nil, "select (no cases)") // forever +} + +// overwrites return pc on stack to signal which case of the select +// to run, so cannot appear at the top of a split stack. +//go:nosplit +func selectgo(sel *_select) { + pc, offset := selectgoImpl(sel) + *(*bool)(add(unsafe.Pointer(&sel), uintptr(offset))) = true + setcallerpc(unsafe.Pointer(&sel), pc) +} + +// selectgoImpl returns scase.pc and scase.so for the select +// case which fired. +func selectgoImpl(sel *_select) (uintptr, uint16) { + if debugSelect { + print("select: sel=", sel, "\n") + } + + scaseslice := sliceStruct{unsafe.Pointer(&sel.scase), int(sel.ncase), int(sel.ncase)} + scases := *(*[]scase)(unsafe.Pointer(&scaseslice)) + + var t0 int64 + if blockprofilerate > 0 { + t0 = cputicks() + for i := 0; i < int(sel.ncase); i++ { + scases[i].releasetime = -1 + } + } + + // The compiler rewrites selects that statically have + // only 0 or 1 cases plus default into simpler constructs. + // The only way we can end up with such small sel.ncase + // values here is for a larger select in which most channels + // have been nilled out. The general code handles those + // cases correctly, and they are rare enough not to bother + // optimizing (and needing to test). + + // generate permuted order + pollslice := sliceStruct{unsafe.Pointer(sel.pollorder), int(sel.ncase), int(sel.ncase)} + pollorder := *(*[]uint16)(unsafe.Pointer(&pollslice)) + for i := 0; i < int(sel.ncase); i++ { + pollorder[i] = uint16(i) + } + for i := 1; i < int(sel.ncase); i++ { + o := pollorder[i] + j := int(fastrand1()) % (i + 1) + pollorder[i] = pollorder[j] + pollorder[j] = o + } + + // sort the cases by Hchan address to get the locking order. + // simple heap sort, to guarantee n log n time and constant stack footprint. + lockslice := sliceStruct{unsafe.Pointer(sel.lockorder), int(sel.ncase), int(sel.ncase)} + lockorder := *(*[]*hchan)(unsafe.Pointer(&lockslice)) + for i := 0; i < int(sel.ncase); i++ { + j := i + c := scases[j]._chan + for j > 0 && lockorder[(j-1)/2].sortkey() < c.sortkey() { + k := (j - 1) / 2 + lockorder[j] = lockorder[k] + j = k + } + lockorder[j] = c + } + for i := int(sel.ncase) - 1; i >= 0; i-- { + c := lockorder[i] + lockorder[i] = lockorder[0] + j := 0 + for { + k := j*2 + 1 + if k >= i { + break + } + if k+1 < i && lockorder[k].sortkey() < lockorder[k+1].sortkey() { + k++ + } + if c.sortkey() < lockorder[k].sortkey() { + lockorder[j] = lockorder[k] + j = k + continue + } + break + } + lockorder[j] = c + } + /* + for i := 0; i+1 < int(sel.ncase); i++ { + if lockorder[i].sortkey() > lockorder[i+1].sortkey() { + print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n") + gothrow("select: broken sort") + } + } + */ + + // lock all the channels involved in the select + sellock(sel) + + var ( + gp *g + done uint32 + sg *sudog + c *hchan + k *scase + sglist *sudog + sgnext *sudog + ) + +loop: + // pass 1 - look for something already waiting + var dfl *scase + var cas *scase + for i := 0; i < int(sel.ncase); i++ { + cas = &scases[pollorder[i]] + c = cas._chan + + switch cas.kind { + case _CaseRecv: + if c.dataqsiz > 0 { + if c.qcount > 0 { + goto asyncrecv + } + } else { + sg = c.sendq.dequeue() + if sg != nil { + goto syncrecv + } + } + if c.closed != 0 { + goto rclose + } + + case _CaseSend: + if raceenabled { + racereadpc(unsafe.Pointer(c), cas.pc, chansendpc) + } + if c.closed != 0 { + goto sclose + } + if c.dataqsiz > 0 { + if c.qcount < c.dataqsiz { + goto asyncsend + } + } else { + sg = c.recvq.dequeue() + if sg != nil { + goto syncsend + } + } + + case _CaseDefault: + dfl = cas + } + } + + if dfl != nil { + selunlock(sel) + cas = dfl + goto retc + } + + // pass 2 - enqueue on all chans + gp = getg() + done = 0 + for i := 0; i < int(sel.ncase); i++ { + cas = &scases[pollorder[i]] + c = cas._chan + sg := acquireSudog() + sg.g = gp + // Note: selectdone is adjusted for stack copies in stack.c:adjustsudogs + sg.selectdone = (*uint32)(noescape(unsafe.Pointer(&done))) + sg.elem = cas.elem + sg.releasetime = 0 + if t0 != 0 { + sg.releasetime = -1 + } + sg.waitlink = gp.waiting + gp.waiting = sg + + switch cas.kind { + case _CaseRecv: + c.recvq.enqueue(sg) + + case _CaseSend: + c.sendq.enqueue(sg) + } + } + + // wait for someone to wake us up + gp.param = nil + gopark(unsafe.Pointer(funcPC(selparkcommit)), unsafe.Pointer(sel), "select") + + // someone woke us up + sellock(sel) + sg = (*sudog)(gp.param) + gp.param = nil + + // pass 3 - dequeue from unsuccessful chans + // otherwise they stack up on quiet channels + // record the successful case, if any. + // We singly-linked up the SudoGs in case order, so when + // iterating through the linked list they are in reverse order. + cas = nil + sglist = gp.waiting + // Clear all selectdone and elem before unlinking from gp.waiting. + // They must be cleared before being put back into the sudog cache. + // Clear before unlinking, because if a stack copy happens after the unlink, + // they will not be updated, they will be left pointing to the old stack, + // which creates dangling pointers, which may be detected by the + // garbage collector. + for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink { + sg1.selectdone = nil + sg1.elem = nil + } + gp.waiting = nil + for i := int(sel.ncase) - 1; i >= 0; i-- { + k = &scases[pollorder[i]] + if sglist.releasetime > 0 { + k.releasetime = sglist.releasetime + } + if sg == sglist { + cas = k + } else { + c = k._chan + if k.kind == _CaseSend { + c.sendq.dequeueSudoG(sglist) + } else { + c.recvq.dequeueSudoG(sglist) + } + } + sgnext = sglist.waitlink + sglist.waitlink = nil + releaseSudog(sglist) + sglist = sgnext + } + + if cas == nil { + goto loop + } + + c = cas._chan + + if c.dataqsiz > 0 { + gothrow("selectgo: shouldn't happen") + } + + if debugSelect { + print("wait-return: sel=", sel, " c=", c, " cas=", cas, " kind=", cas.kind, "\n") + } + + if cas.kind == _CaseRecv { + if cas.receivedp != nil { + *cas.receivedp = true + } + } + + if raceenabled { + if cas.kind == _CaseRecv && cas.elem != nil { + raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc) + } else if cas.kind == _CaseSend { + raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc) + } + } + + selunlock(sel) + goto retc + +asyncrecv: + // can receive from buffer + if raceenabled { + if cas.elem != nil { + raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc) + } + raceacquire(chanbuf(c, c.recvx)) + racerelease(chanbuf(c, c.recvx)) + } + if cas.receivedp != nil { + *cas.receivedp = true + } + if cas.elem != nil { + memmove(cas.elem, chanbuf(c, c.recvx), uintptr(c.elemsize)) + } + memclr(chanbuf(c, c.recvx), uintptr(c.elemsize)) + c.recvx++ + if c.recvx == c.dataqsiz { + c.recvx = 0 + } + c.qcount-- + sg = c.sendq.dequeue() + if sg != nil { + gp = sg.g + selunlock(sel) + if sg.releasetime != 0 { + sg.releasetime = cputicks() + } + goready(gp) + } else { + selunlock(sel) + } + goto retc + +asyncsend: + // can send to buffer + if raceenabled { + raceacquire(chanbuf(c, c.sendx)) + racerelease(chanbuf(c, c.sendx)) + raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc) + } + memmove(chanbuf(c, c.sendx), cas.elem, uintptr(c.elemsize)) + c.sendx++ + if c.sendx == c.dataqsiz { + c.sendx = 0 + } + c.qcount++ + sg = c.recvq.dequeue() + if sg != nil { + gp = sg.g + selunlock(sel) + if sg.releasetime != 0 { + sg.releasetime = cputicks() + } + goready(gp) + } else { + selunlock(sel) + } + goto retc + +syncrecv: + // can receive from sleeping sender (sg) + if raceenabled { + if cas.elem != nil { + raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc) + } + racesync(c, sg) + } + selunlock(sel) + if debugSelect { + print("syncrecv: sel=", sel, " c=", c, "\n") + } + if cas.receivedp != nil { + *cas.receivedp = true + } + if cas.elem != nil { + memmove(cas.elem, sg.elem, uintptr(c.elemsize)) + } + sg.elem = nil + gp = sg.g + gp.param = unsafe.Pointer(sg) + if sg.releasetime != 0 { + sg.releasetime = cputicks() + } + goready(gp) + goto retc + +rclose: + // read at end of closed channel + selunlock(sel) + if cas.receivedp != nil { + *cas.receivedp = false + } + if cas.elem != nil { + memclr(cas.elem, uintptr(c.elemsize)) + } + if raceenabled { + raceacquire(unsafe.Pointer(c)) + } + goto retc + +syncsend: + // can send to sleeping receiver (sg) + if raceenabled { + raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc) + racesync(c, sg) + } + selunlock(sel) + if debugSelect { + print("syncsend: sel=", sel, " c=", c, "\n") + } + if sg.elem != nil { + memmove(sg.elem, cas.elem, uintptr(c.elemsize)) + } + sg.elem = nil + gp = sg.g + gp.param = unsafe.Pointer(sg) + if sg.releasetime != 0 { + sg.releasetime = cputicks() + } + goready(gp) + +retc: + if cas.releasetime > 0 { + blockevent(cas.releasetime-t0, 2) + } + return cas.pc, cas.so + +sclose: + // send on closed channel + selunlock(sel) + panic("send on closed channel") +} + +func (c *hchan) sortkey() uintptr { + // TODO(khr): if we have a moving garbage collector, we'll need to + // change this function. + return uintptr(unsafe.Pointer(c)) +} + +// A runtimeSelect is a single case passed to rselect. +// This must match ../reflect/value.go:/runtimeSelect +type runtimeSelect struct { + dir selectDir + typ unsafe.Pointer // channel type (not used here) + ch *hchan // channel + val unsafe.Pointer // ptr to data (SendDir) or ptr to receive buffer (RecvDir) +} + +// These values must match ../reflect/value.go:/SelectDir. +type selectDir int + +const ( + _ selectDir = iota + selectSend // case Chan <- Send + selectRecv // case <-Chan: + selectDefault // default +) + +func reflect_rselect(cases []runtimeSelect) (chosen int, recvOK bool) { + // flagNoScan is safe here, because all objects are also referenced from cases. + size := selectsize(uintptr(len(cases))) + sel := (*_select)(mallocgc(size, nil, flagNoScan)) + newselect(sel, int64(size), int32(len(cases))) + r := new(bool) + for i := range cases { + rc := &cases[i] + switch rc.dir { + case selectDefault: + selectdefaultImpl(sel, uintptr(i), 0) + case selectSend: + if rc.ch == nil { + break + } + selectsendImpl(sel, rc.ch, uintptr(i), rc.val, 0) + case selectRecv: + if rc.ch == nil { + break + } + selectrecvImpl(sel, rc.ch, uintptr(i), rc.val, r, 0) + } + } + + pc, _ := selectgoImpl(sel) + chosen = int(pc) + recvOK = *r + return +} + +func (q *waitq) dequeueSudoG(s *sudog) { + var prevsgp *sudog + l := &q.first + for { + sgp := *l + if sgp == nil { + return + } + if sgp == s { + *l = sgp.next + if q.last == sgp { + q.last = prevsgp + } + s.next = nil + return + } + l = &sgp.next + prevsgp = sgp + } +} diff --git a/libgo/go/runtime/sema.go b/libgo/go/runtime/sema.go new file mode 100644 index 00000000000..26dbd30ea3f --- /dev/null +++ b/libgo/go/runtime/sema.go @@ -0,0 +1,275 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Semaphore implementation exposed to Go. +// Intended use is provide a sleep and wakeup +// primitive that can be used in the contended case +// of other synchronization primitives. +// Thus it targets the same goal as Linux's futex, +// but it has much simpler semantics. +// +// That is, don't think of these as semaphores. +// Think of them as a way to implement sleep and wakeup +// such that every sleep is paired with a single wakeup, +// even if, due to races, the wakeup happens before the sleep. +// +// See Mullender and Cox, ``Semaphores in Plan 9,'' +// http://swtch.com/semaphore.pdf + +package runtime + +import "unsafe" + +// Asynchronous semaphore for sync.Mutex. + +type semaRoot struct { + lock mutex + head *sudog + tail *sudog + nwait uint32 // Number of waiters. Read w/o the lock. +} + +// Prime to not correlate with any user patterns. +const semTabSize = 251 + +var semtable [semTabSize]struct { + root semaRoot + pad [_CacheLineSize - unsafe.Sizeof(semaRoot{})]byte +} + +// Called from sync/net packages. +func asyncsemacquire(addr *uint32) { + semacquire(addr, true) +} + +func asyncsemrelease(addr *uint32) { + semrelease(addr) +} + +// Called from runtime. +func semacquire(addr *uint32, profile bool) { + gp := getg() + if gp != gp.m.curg { + gothrow("semacquire not on the G stack") + } + + // Easy case. + if cansemacquire(addr) { + return + } + + // Harder case: + // increment waiter count + // try cansemacquire one more time, return if succeeded + // enqueue itself as a waiter + // sleep + // (waiter descriptor is dequeued by signaler) + s := acquireSudog() + root := semroot(addr) + t0 := int64(0) + s.releasetime = 0 + if profile && blockprofilerate > 0 { + t0 = cputicks() + s.releasetime = -1 + } + for { + lock(&root.lock) + // Add ourselves to nwait to disable "easy case" in semrelease. + xadd(&root.nwait, 1) + // Check cansemacquire to avoid missed wakeup. + if cansemacquire(addr) { + xadd(&root.nwait, -1) + unlock(&root.lock) + break + } + // Any semrelease after the cansemacquire knows we're waiting + // (we set nwait above), so go to sleep. + root.queue(addr, s) + goparkunlock(&root.lock, "semacquire") + if cansemacquire(addr) { + break + } + } + if s.releasetime > 0 { + blockevent(int64(s.releasetime)-t0, 3) + } + releaseSudog(s) +} + +func semrelease(addr *uint32) { + root := semroot(addr) + xadd(addr, 1) + + // Easy case: no waiters? + // This check must happen after the xadd, to avoid a missed wakeup + // (see loop in semacquire). + if atomicload(&root.nwait) == 0 { + return + } + + // Harder case: search for a waiter and wake it. + lock(&root.lock) + if atomicload(&root.nwait) == 0 { + // The count is already consumed by another goroutine, + // so no need to wake up another goroutine. + unlock(&root.lock) + return + } + s := root.head + for ; s != nil; s = s.next { + if s.elem == unsafe.Pointer(addr) { + xadd(&root.nwait, -1) + root.dequeue(s) + break + } + } + unlock(&root.lock) + if s != nil { + if s.releasetime != 0 { + s.releasetime = cputicks() + } + goready(s.g) + } +} + +func semroot(addr *uint32) *semaRoot { + return &semtable[(uintptr(unsafe.Pointer(addr))>>3)%semTabSize].root +} + +func cansemacquire(addr *uint32) bool { + for { + v := atomicload(addr) + if v == 0 { + return false + } + if cas(addr, v, v-1) { + return true + } + } +} + +func (root *semaRoot) queue(addr *uint32, s *sudog) { + s.g = getg() + s.elem = unsafe.Pointer(addr) + s.next = nil + s.prev = root.tail + if root.tail != nil { + root.tail.next = s + } else { + root.head = s + } + root.tail = s +} + +func (root *semaRoot) dequeue(s *sudog) { + if s.next != nil { + s.next.prev = s.prev + } else { + root.tail = s.prev + } + if s.prev != nil { + s.prev.next = s.next + } else { + root.head = s.next + } + s.elem = nil + s.next = nil + s.prev = nil +} + +// Synchronous semaphore for sync.Cond. +type syncSema struct { + lock mutex + head *sudog + tail *sudog +} + +// Syncsemacquire waits for a pairing syncsemrelease on the same semaphore s. +func syncsemacquire(s *syncSema) { + lock(&s.lock) + if s.head != nil && s.head.nrelease > 0 { + // Have pending release, consume it. + var wake *sudog + s.head.nrelease-- + if s.head.nrelease == 0 { + wake = s.head + s.head = wake.next + if s.head == nil { + s.tail = nil + } + } + unlock(&s.lock) + if wake != nil { + wake.next = nil + goready(wake.g) + } + } else { + // Enqueue itself. + w := acquireSudog() + w.g = getg() + w.nrelease = -1 + w.next = nil + w.releasetime = 0 + t0 := int64(0) + if blockprofilerate > 0 { + t0 = cputicks() + w.releasetime = -1 + } + if s.tail == nil { + s.head = w + } else { + s.tail.next = w + } + s.tail = w + goparkunlock(&s.lock, "semacquire") + if t0 != 0 { + blockevent(int64(w.releasetime)-t0, 2) + } + releaseSudog(w) + } +} + +// Syncsemrelease waits for n pairing syncsemacquire on the same semaphore s. +func syncsemrelease(s *syncSema, n uint32) { + lock(&s.lock) + for n > 0 && s.head != nil && s.head.nrelease < 0 { + // Have pending acquire, satisfy it. + wake := s.head + s.head = wake.next + if s.head == nil { + s.tail = nil + } + if wake.releasetime != 0 { + wake.releasetime = cputicks() + } + wake.next = nil + goready(wake.g) + n-- + } + if n > 0 { + // enqueue itself + w := acquireSudog() + w.g = getg() + w.nrelease = int32(n) + w.next = nil + w.releasetime = 0 + if s.tail == nil { + s.head = w + } else { + s.tail.next = w + } + s.tail = w + goparkunlock(&s.lock, "semarelease") + releaseSudog(w) + } else { + unlock(&s.lock) + } +} + +func syncsemcheck(sz uintptr) { + if sz != unsafe.Sizeof(syncSema{}) { + print("runtime: bad syncSema size - sync=", sz, " runtime=", unsafe.Sizeof(syncSema{}), "\n") + gothrow("bad syncSema size") + } +} diff --git a/libgo/go/runtime/signal_unix.go b/libgo/go/runtime/signal_unix.go new file mode 100644 index 00000000000..ba77b6e7be1 --- /dev/null +++ b/libgo/go/runtime/signal_unix.go @@ -0,0 +1,13 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package runtime + +func sigpipe() + +func os_sigpipe() { + onM(sigpipe) +} diff --git a/libgo/go/runtime/sigpanic_unix.go b/libgo/go/runtime/sigpanic_unix.go new file mode 100644 index 00000000000..68079859b06 --- /dev/null +++ b/libgo/go/runtime/sigpanic_unix.go @@ -0,0 +1,40 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package runtime + +func signame(int32) *byte + +func sigpanic() { + g := getg() + if !canpanic(g) { + gothrow("unexpected signal during runtime execution") + } + + switch g.sig { + case _SIGBUS: + if g.sigcode0 == _BUS_ADRERR && g.sigcode1 < 0x1000 || g.paniconfault { + panicmem() + } + print("unexpected fault address ", hex(g.sigcode1), "\n") + gothrow("fault") + case _SIGSEGV: + if (g.sigcode0 == 0 || g.sigcode0 == _SEGV_MAPERR || g.sigcode0 == _SEGV_ACCERR) && g.sigcode1 < 0x1000 || g.paniconfault { + panicmem() + } + print("unexpected fault address ", hex(g.sigcode1), "\n") + gothrow("fault") + case _SIGFPE: + switch g.sigcode0 { + case _FPE_INTDIV: + panicdivide() + case _FPE_INTOVF: + panicoverflow() + } + panicfloat() + } + panic(errorString(gostringnocopy(signame(g.sig)))) +} diff --git a/libgo/go/runtime/sigqueue.go b/libgo/go/runtime/sigqueue.go new file mode 100644 index 00000000000..2d9c24d2d24 --- /dev/null +++ b/libgo/go/runtime/sigqueue.go @@ -0,0 +1,173 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements runtime support for signal handling. +// +// Most synchronization primitives are not available from +// the signal handler (it cannot block, allocate memory, or use locks) +// so the handler communicates with a processing goroutine +// via struct sig, below. +// +// sigsend is called by the signal handler to queue a new signal. +// signal_recv is called by the Go program to receive a newly queued signal. +// Synchronization between sigsend and signal_recv is based on the sig.state +// variable. It can be in 3 states: sigIdle, sigReceiving and sigSending. +// sigReceiving means that signal_recv is blocked on sig.Note and there are no +// new pending signals. +// sigSending means that sig.mask *may* contain new pending signals, +// signal_recv can't be blocked in this state. +// sigIdle means that there are no new pending signals and signal_recv is not blocked. +// Transitions between states are done atomically with CAS. +// When signal_recv is unblocked, it resets sig.Note and rechecks sig.mask. +// If several sigsends and signal_recv execute concurrently, it can lead to +// unnecessary rechecks of sig.mask, but it cannot lead to missed signals +// nor deadlocks. + +package runtime + +import "unsafe" + +var sig struct { + note note + mask [(_NSIG + 31) / 32]uint32 + wanted [(_NSIG + 31) / 32]uint32 + recv [(_NSIG + 31) / 32]uint32 + state uint32 + inuse bool +} + +const ( + sigIdle = iota + sigReceiving + sigSending +) + +// Called from sighandler to send a signal back out of the signal handling thread. +// Reports whether the signal was sent. If not, the caller typically crashes the program. +func sigsend(s int32) bool { + bit := uint32(1) << uint(s&31) + if !sig.inuse || s < 0 || int(s) >= 32*len(sig.wanted) || sig.wanted[s/32]&bit == 0 { + return false + } + + // Add signal to outgoing queue. + for { + mask := sig.mask[s/32] + if mask&bit != 0 { + return true // signal already in queue + } + if cas(&sig.mask[s/32], mask, mask|bit) { + break + } + } + + // Notify receiver that queue has new bit. +Send: + for { + switch atomicload(&sig.state) { + default: + gothrow("sigsend: inconsistent state") + case sigIdle: + if cas(&sig.state, sigIdle, sigSending) { + break Send + } + case sigSending: + // notification already pending + break Send + case sigReceiving: + if cas(&sig.state, sigReceiving, sigIdle) { + notewakeup(&sig.note) + break Send + } + } + } + + return true +} + +// Called to receive the next queued signal. +// Must only be called from a single goroutine at a time. +func signal_recv() uint32 { + for { + // Serve any signals from local copy. + for i := uint32(0); i < _NSIG; i++ { + if sig.recv[i/32]&(1<<(i&31)) != 0 { + sig.recv[i/32] &^= 1 << (i & 31) + return i + } + } + + // Wait for updates to be available from signal sender. + Receive: + for { + switch atomicload(&sig.state) { + default: + gothrow("signal_recv: inconsistent state") + case sigIdle: + if cas(&sig.state, sigIdle, sigReceiving) { + notetsleepg(&sig.note, -1) + noteclear(&sig.note) + break Receive + } + case sigSending: + if cas(&sig.state, sigSending, sigIdle) { + break Receive + } + } + } + + // Incorporate updates from sender into local copy. + for i := range sig.mask { + sig.recv[i] = xchg(&sig.mask[i], 0) + } + } +} + +// Must only be called from a single goroutine at a time. +func signal_enable(s uint32) { + if !sig.inuse { + // The first call to signal_enable is for us + // to use for initialization. It does not pass + // signal information in m. + sig.inuse = true // enable reception of signals; cannot disable + noteclear(&sig.note) + return + } + + if int(s) >= len(sig.wanted)*32 { + return + } + sig.wanted[s/32] |= 1 << (s & 31) + sigenable_go(s) +} + +// Must only be called from a single goroutine at a time. +func signal_disable(s uint32) { + if int(s) >= len(sig.wanted)*32 { + return + } + sig.wanted[s/32] &^= 1 << (s & 31) + sigdisable_go(s) +} + +// This runs on a foreign stack, without an m or a g. No stack split. +//go:nosplit +func badsignal(sig uintptr) { + cgocallback(unsafe.Pointer(funcPC(sigsend)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig)) +} + +func sigenable_m() +func sigdisable_m() + +func sigenable_go(s uint32) { + g := getg() + g.m.scalararg[0] = uintptr(s) + onM(sigenable_m) +} + +func sigdisable_go(s uint32) { + g := getg() + g.m.scalararg[0] = uintptr(s) + onM(sigdisable_m) +} diff --git a/libgo/go/runtime/slice.go b/libgo/go/runtime/slice.go new file mode 100644 index 00000000000..171087d7f6f --- /dev/null +++ b/libgo/go/runtime/slice.go @@ -0,0 +1,139 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "unsafe" +) + +type sliceStruct struct { + array unsafe.Pointer + len int + cap int +} + +// TODO: take uintptrs instead of int64s? +func makeslice(t *slicetype, len64 int64, cap64 int64) sliceStruct { + // NOTE: The len > MaxMem/elemsize check here is not strictly necessary, + // but it produces a 'len out of range' error instead of a 'cap out of range' error + // when someone does make([]T, bignumber). 'cap out of range' is true too, + // but since the cap is only being supplied implicitly, saying len is clearer. + // See issue 4085. + len := int(len64) + if len64 < 0 || int64(len) != len64 || t.elem.size > 0 && uintptr(len) > maxmem/uintptr(t.elem.size) { + panic(errorString("makeslice: len out of range")) + } + cap := int(cap64) + if cap < len || int64(cap) != cap64 || t.elem.size > 0 && uintptr(cap) > maxmem/uintptr(t.elem.size) { + panic(errorString("makeslice: cap out of range")) + } + p := newarray(t.elem, uintptr(cap)) + return sliceStruct{p, len, cap} +} + +// TODO: take uintptr instead of int64? +func growslice(t *slicetype, old sliceStruct, n int64) sliceStruct { + if n < 1 { + panic(errorString("growslice: invalid n")) + } + + cap64 := int64(old.cap) + n + cap := int(cap64) + + if int64(cap) != cap64 || cap < old.cap || t.elem.size > 0 && uintptr(cap) > maxmem/uintptr(t.elem.size) { + panic(errorString("growslice: cap out of range")) + } + + if raceenabled { + callerpc := getcallerpc(unsafe.Pointer(&t)) + racereadrangepc(old.array, uintptr(old.len*int(t.elem.size)), callerpc, funcPC(growslice)) + } + + et := t.elem + if et.size == 0 { + return sliceStruct{old.array, old.len, cap} + } + + newcap := old.cap + if newcap+newcap < cap { + newcap = cap + } else { + for { + if old.len < 1024 { + newcap += newcap + } else { + newcap += newcap / 4 + } + if newcap >= cap { + break + } + } + } + + if uintptr(newcap) >= maxmem/uintptr(et.size) { + panic(errorString("growslice: cap out of range")) + } + lenmem := uintptr(old.len) * uintptr(et.size) + capmem := goroundupsize(uintptr(newcap) * uintptr(et.size)) + newcap = int(capmem / uintptr(et.size)) + var p unsafe.Pointer + if et.kind&kindNoPointers != 0 { + p = rawmem(capmem) + memclr(add(p, lenmem), capmem-lenmem) + } else { + // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan unitialized memory + p = newarray(et, uintptr(newcap)) + } + memmove(p, old.array, lenmem) + + return sliceStruct{p, old.len, newcap} +} + +func slicecopy(to sliceStruct, fm sliceStruct, width uintptr) int { + if fm.len == 0 || to.len == 0 || width == 0 { + return 0 + } + + n := fm.len + if to.len < n { + n = to.len + } + + if raceenabled { + callerpc := getcallerpc(unsafe.Pointer(&to)) + pc := funcPC(slicecopy) + racewriterangepc(to.array, uintptr(n*int(width)), callerpc, pc) + racereadrangepc(fm.array, uintptr(n*int(width)), callerpc, pc) + } + + size := uintptr(n) * width + if size == 1 { // common case worth about 2x to do here + // TODO: is this still worth it with new memmove impl? + *(*byte)(to.array) = *(*byte)(fm.array) // known to be a byte pointer + } else { + memmove(to.array, fm.array, size) + } + return int(n) +} + +func slicestringcopy(to []byte, fm string) int { + if len(fm) == 0 || len(to) == 0 { + return 0 + } + + n := len(fm) + if len(to) < n { + n = len(to) + } + + if raceenabled { + callerpc := getcallerpc(unsafe.Pointer(&to)) + pc := funcPC(slicestringcopy) + racewriterangepc(unsafe.Pointer(&to[0]), uintptr(n), callerpc, pc) + } + + memmove(unsafe.Pointer(&to[0]), unsafe.Pointer((*stringStruct)(unsafe.Pointer(&fm)).str), uintptr(n)) + return n +} diff --git a/libgo/go/runtime/stack.go b/libgo/go/runtime/stack.go new file mode 100644 index 00000000000..f1b7d32d205 --- /dev/null +++ b/libgo/go/runtime/stack.go @@ -0,0 +1,13 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +const ( + // Goroutine preemption request. + // Stored into g->stackguard0 to cause split stack check failure. + // Must be greater than any real sp. + // 0xfffffade in hex. + stackPreempt = ^uintptr(1313) +) diff --git a/libgo/go/runtime/string.go b/libgo/go/runtime/string.go new file mode 100644 index 00000000000..0809f89bc1f --- /dev/null +++ b/libgo/go/runtime/string.go @@ -0,0 +1,298 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "unsafe" +) + +func concatstrings(a []string) string { + idx := 0 + l := 0 + count := 0 + for i, x := range a { + n := len(x) + if n == 0 { + continue + } + if l+n < l { + gothrow("string concatenation too long") + } + l += n + count++ + idx = i + } + if count == 0 { + return "" + } + if count == 1 { + return a[idx] + } + s, b := rawstring(l) + l = 0 + for _, x := range a { + copy(b[l:], x) + l += len(x) + } + return s +} + +//go:nosplit +func concatstring2(a [2]string) string { + return concatstrings(a[:]) +} + +//go:nosplit +func concatstring3(a [3]string) string { + return concatstrings(a[:]) +} + +//go:nosplit +func concatstring4(a [4]string) string { + return concatstrings(a[:]) +} + +//go:nosplit +func concatstring5(a [5]string) string { + return concatstrings(a[:]) +} + +func slicebytetostring(b []byte) string { + if raceenabled && len(b) > 0 { + racereadrangepc(unsafe.Pointer(&b[0]), + uintptr(len(b)), + getcallerpc(unsafe.Pointer(&b)), + funcPC(slicebytetostring)) + } + s, c := rawstring(len(b)) + copy(c, b) + return s +} + +func slicebytetostringtmp(b []byte) string { + // Return a "string" referring to the actual []byte bytes. + // This is only for use by internal compiler optimizations + // that know that the string form will be discarded before + // the calling goroutine could possibly modify the original + // slice or synchronize with another goroutine. + // Today, the only such case is a m[string(k)] lookup where + // m is a string-keyed map and k is a []byte. + + if raceenabled && len(b) > 0 { + racereadrangepc(unsafe.Pointer(&b[0]), + uintptr(len(b)), + getcallerpc(unsafe.Pointer(&b)), + funcPC(slicebytetostringtmp)) + } + return *(*string)(unsafe.Pointer(&b)) +} + +func stringtoslicebyte(s string) []byte { + b := rawbyteslice(len(s)) + copy(b, s) + return b +} + +func stringtoslicerune(s string) []rune { + // two passes. + // unlike slicerunetostring, no race because strings are immutable. + n := 0 + t := s + for len(s) > 0 { + _, k := charntorune(s) + s = s[k:] + n++ + } + a := rawruneslice(n) + n = 0 + for len(t) > 0 { + r, k := charntorune(t) + t = t[k:] + a[n] = r + n++ + } + return a +} + +func slicerunetostring(a []rune) string { + if raceenabled && len(a) > 0 { + racereadrangepc(unsafe.Pointer(&a[0]), + uintptr(len(a))*unsafe.Sizeof(a[0]), + getcallerpc(unsafe.Pointer(&a)), + funcPC(slicerunetostring)) + } + var dum [4]byte + size1 := 0 + for _, r := range a { + size1 += runetochar(dum[:], r) + } + s, b := rawstring(size1 + 3) + size2 := 0 + for _, r := range a { + // check for race + if size2 >= size1 { + break + } + size2 += runetochar(b[size2:], r) + } + return s[:size2] +} + +type stringStruct struct { + str unsafe.Pointer + len int +} + +func intstring(v int64) string { + s, b := rawstring(4) + n := runetochar(b, rune(v)) + return s[:n] +} + +// stringiter returns the index of the next +// rune after the rune that starts at s[k]. +func stringiter(s string, k int) int { + if k >= len(s) { + // 0 is end of iteration + return 0 + } + + c := s[k] + if c < runeself { + return k + 1 + } + + // multi-char rune + _, n := charntorune(s[k:]) + return k + n +} + +// stringiter2 returns the rune that starts at s[k] +// and the index where the next rune starts. +func stringiter2(s string, k int) (int, rune) { + if k >= len(s) { + // 0 is end of iteration + return 0, 0 + } + + c := s[k] + if c < runeself { + return k + 1, rune(c) + } + + // multi-char rune + r, n := charntorune(s[k:]) + return k + n, r +} + +// rawstring allocates storage for a new string. The returned +// string and byte slice both refer to the same storage. +// The storage is not zeroed. Callers should use +// b to set the string contents and then drop b. +func rawstring(size int) (s string, b []byte) { + p := mallocgc(uintptr(size), nil, flagNoScan|flagNoZero) + + (*stringStruct)(unsafe.Pointer(&s)).str = p + (*stringStruct)(unsafe.Pointer(&s)).len = size + + (*slice)(unsafe.Pointer(&b)).array = (*uint8)(p) + (*slice)(unsafe.Pointer(&b)).len = uint(size) + (*slice)(unsafe.Pointer(&b)).cap = uint(size) + + for { + ms := maxstring + if uintptr(size) <= uintptr(ms) || casuintptr((*uintptr)(unsafe.Pointer(&maxstring)), uintptr(ms), uintptr(size)) { + return + } + } +} + +// rawbyteslice allocates a new byte slice. The byte slice is not zeroed. +func rawbyteslice(size int) (b []byte) { + cap := goroundupsize(uintptr(size)) + p := mallocgc(cap, nil, flagNoScan|flagNoZero) + if cap != uintptr(size) { + memclr(add(p, uintptr(size)), cap-uintptr(size)) + } + + (*slice)(unsafe.Pointer(&b)).array = (*uint8)(p) + (*slice)(unsafe.Pointer(&b)).len = uint(size) + (*slice)(unsafe.Pointer(&b)).cap = uint(cap) + return +} + +// rawruneslice allocates a new rune slice. The rune slice is not zeroed. +func rawruneslice(size int) (b []rune) { + if uintptr(size) > maxmem/4 { + gothrow("out of memory") + } + mem := goroundupsize(uintptr(size) * 4) + p := mallocgc(mem, nil, flagNoScan|flagNoZero) + if mem != uintptr(size)*4 { + memclr(add(p, uintptr(size)*4), mem-uintptr(size)*4) + } + + (*slice)(unsafe.Pointer(&b)).array = (*uint8)(p) + (*slice)(unsafe.Pointer(&b)).len = uint(size) + (*slice)(unsafe.Pointer(&b)).cap = uint(mem / 4) + return +} + +// used by cmd/cgo +func gobytes(p *byte, n int) []byte { + if n == 0 { + return make([]byte, 0) + } + x := make([]byte, n) + memmove(unsafe.Pointer(&x[0]), unsafe.Pointer(p), uintptr(n)) + return x +} + +func gostringsize(n int) string { + s, _ := rawstring(n) + return s +} + +//go:noescape +func findnull(*byte) int + +func gostring(p *byte) string { + l := findnull(p) + if l == 0 { + return "" + } + s, b := rawstring(l) + memmove(unsafe.Pointer(&b[0]), unsafe.Pointer(p), uintptr(l)) + return s +} + +func gostringn(p *byte, l int) string { + if l == 0 { + return "" + } + s, b := rawstring(l) + memmove(unsafe.Pointer(&b[0]), unsafe.Pointer(p), uintptr(l)) + return s +} + +func index(s, t string) int { + if len(t) == 0 { + return 0 + } + for i := 0; i < len(s); i++ { + if s[i] == t[0] && hasprefix(s[i:], t) { + return i + } + } + return -1 +} + +func contains(s, t string) bool { + return index(s, t) >= 0 +} + +func hasprefix(s, t string) bool { + return len(s) >= len(t) && s[:len(t)] == t +} diff --git a/libgo/go/runtime/stubs.go b/libgo/go/runtime/stubs.go new file mode 100644 index 00000000000..fe8f9c9222a --- /dev/null +++ b/libgo/go/runtime/stubs.go @@ -0,0 +1,316 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +// Declarations for runtime services implemented in C or assembly. + +const ptrSize = 4 << (^uintptr(0) >> 63) // unsafe.Sizeof(uintptr(0)) but an ideal const +const regSize = 4 << (^uintreg(0) >> 63) // unsafe.Sizeof(uintreg(0)) but an ideal const + +// Should be a built-in for unsafe.Pointer? +//go:nosplit +func add(p unsafe.Pointer, x uintptr) unsafe.Pointer { + return unsafe.Pointer(uintptr(p) + x) +} + +// n must be a power of 2 +func roundup(p unsafe.Pointer, n uintptr) unsafe.Pointer { + delta := -uintptr(p) & (n - 1) + return unsafe.Pointer(uintptr(p) + delta) +} + +// in runtime.c +func getg() *g +func acquirem() *m +func releasem(mp *m) +func gomcache() *mcache +func readgstatus(*g) uint32 // proc.c + +// mcall switches from the g to the g0 stack and invokes fn(g), +// where g is the goroutine that made the call. +// mcall saves g's current PC/SP in g->sched so that it can be restored later. +// It is up to fn to arrange for that later execution, typically by recording +// g in a data structure, causing something to call ready(g) later. +// mcall returns to the original goroutine g later, when g has been rescheduled. +// fn must not return at all; typically it ends by calling schedule, to let the m +// run other goroutines. +// +// mcall can only be called from g stacks (not g0, not gsignal). +//go:noescape +func mcall(fn func(*g)) + +// onM switches from the g to the g0 stack and invokes fn(). +// When fn returns, onM switches back to the g and returns, +// continuing execution on the g stack. +// If arguments must be passed to fn, they can be written to +// g->m->ptrarg (pointers) and g->m->scalararg (non-pointers) +// before the call and then consulted during fn. +// Similarly, fn can pass return values back in those locations. +// If fn is written in Go, it can be a closure, which avoids the need for +// ptrarg and scalararg entirely. +// After reading values out of ptrarg and scalararg it is conventional +// to zero them to avoid (memory or information) leaks. +// +// If onM is called from a g0 stack, it invokes fn and returns, +// without any stack switches. +// +// If onM is called from a gsignal stack, it crashes the program. +// The implication is that functions used in signal handlers must +// not use onM. +// +// NOTE(rsc): We could introduce a separate onMsignal that is +// like onM but if called from a gsignal stack would just run fn on +// that stack. The caller of onMsignal would be required to save the +// old values of ptrarg/scalararg and restore them when the call +// was finished, in case the signal interrupted an onM sequence +// in progress on the g or g0 stacks. Until there is a clear need for this, +// we just reject onM in signal handling contexts entirely. +// +//go:noescape +func onM(fn func()) + +// onMsignal is like onM but is allowed to be used in code that +// might run on the gsignal stack. Code running on a signal stack +// may be interrupting an onM sequence on the main stack, so +// if the onMsignal calling sequence writes to ptrarg/scalararg, +// it must first save the old values and then restore them when +// finished. As an exception to the rule, it is fine not to save and +// restore the values if the program is trying to crash rather than +// return from the signal handler. +// Once all the runtime is written in Go, there will be no ptrarg/scalararg +// and the distinction between onM and onMsignal (and perhaps mcall) +// can go away. +// +// If onMsignal is called from a gsignal stack, it invokes fn directly, +// without a stack switch. Otherwise onMsignal behaves like onM. +// +//go:noescape +func onM_signalok(fn func()) + +func badonm() { + gothrow("onM called from signal goroutine") +} + +// C functions that run on the M stack. +// Call using mcall. +func gosched_m(*g) +func park_m(*g) +func recovery_m(*g) + +// More C functions that run on the M stack. +// Call using onM. +func mcacheRefill_m() +func largeAlloc_m() +func gc_m() +func scavenge_m() +func setFinalizer_m() +func removeFinalizer_m() +func markallocated_m() +func unrollgcprog_m() +func unrollgcproginplace_m() +func setgcpercent_m() +func setmaxthreads_m() +func ready_m() +func deferproc_m() +func goexit_m() +func startpanic_m() +func dopanic_m() +func readmemstats_m() +func writeheapdump_m() + +// memclr clears n bytes starting at ptr. +// in memclr_*.s +//go:noescape +func memclr(ptr unsafe.Pointer, n uintptr) + +// memmove copies n bytes from "from" to "to". +// in memmove_*.s +//go:noescape +func memmove(to unsafe.Pointer, from unsafe.Pointer, n uintptr) + +func starttheworld() +func stoptheworld() +func newextram() +func lockOSThread() +func unlockOSThread() + +// exported value for testing +var hashLoad = loadFactor + +// in asm_*.s +func fastrand1() uint32 + +// in asm_*.s +//go:noescape +func memeq(a, b unsafe.Pointer, size uintptr) bool + +// noescape hides a pointer from escape analysis. noescape is +// the identity function but escape analysis doesn't think the +// output depends on the input. noescape is inlined and currently +// compiles down to a single xor instruction. +// USE CAREFULLY! +//go:nosplit +func noescape(p unsafe.Pointer) unsafe.Pointer { + x := uintptr(p) + return unsafe.Pointer(x ^ 0) +} + +func entersyscall() +func reentersyscall(pc uintptr, sp unsafe.Pointer) +func entersyscallblock() +func exitsyscall() + +func cgocallback(fn, frame unsafe.Pointer, framesize uintptr) +func gogo(buf *gobuf) +func gosave(buf *gobuf) +func read(fd int32, p unsafe.Pointer, n int32) int32 +func close(fd int32) int32 +func mincore(addr unsafe.Pointer, n uintptr, dst *byte) int32 + +//go:noescape +func jmpdefer(fv *funcval, argp uintptr) +func exit1(code int32) +func asminit() +func setg(gg *g) +func exit(code int32) +func breakpoint() +func nanotime() int64 +func usleep(usec uint32) + +// careful: cputicks is not guaranteed to be monotonic! In particular, we have +// noticed drift between cpus on certain os/arch combinations. See issue 8976. +func cputicks() int64 + +func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer +func munmap(addr unsafe.Pointer, n uintptr) +func madvise(addr unsafe.Pointer, n uintptr, flags int32) +func reflectcall(fn, arg unsafe.Pointer, n uint32, retoffset uint32) +func osyield() +func procyield(cycles uint32) +func cgocallback_gofunc(fv *funcval, frame unsafe.Pointer, framesize uintptr) +func readgogc() int32 +func purgecachedstats(c *mcache) +func gostringnocopy(b *byte) string +func goexit() + +//go:noescape +func write(fd uintptr, p unsafe.Pointer, n int32) int32 + +//go:noescape +func cas(ptr *uint32, old, new uint32) bool + +//go:noescape +func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool + +//go:noescape +func casuintptr(ptr *uintptr, old, new uintptr) bool + +//go:noescape +func atomicstoreuintptr(ptr *uintptr, new uintptr) + +//go:noescape +func atomicloaduintptr(ptr *uintptr) uintptr + +//go:noescape +func atomicloaduint(ptr *uint) uint + +//go:noescape +func setcallerpc(argp unsafe.Pointer, pc uintptr) + +// getcallerpc returns the program counter (PC) of its caller's caller. +// getcallersp returns the stack pointer (SP) of its caller's caller. +// For both, the argp must be a pointer to the caller's first function argument. +// The implementation may or may not use argp, depending on +// the architecture. +// +// For example: +// +// func f(arg1, arg2, arg3 int) { +// pc := getcallerpc(unsafe.Pointer(&arg1)) +// sp := getcallerpc(unsafe.Pointer(&arg2)) +// } +// +// These two lines find the PC and SP immediately following +// the call to f (where f will return). +// +// The call to getcallerpc and getcallersp must be done in the +// frame being asked about. It would not be correct for f to pass &arg1 +// to another function g and let g call getcallerpc/getcallersp. +// The call inside g might return information about g's caller or +// information about f's caller or complete garbage. +// +// The result of getcallersp is correct at the time of the return, +// but it may be invalidated by any subsequent call to a function +// that might relocate the stack in order to grow or shrink it. +// A general rule is that the result of getcallersp should be used +// immediately and can only be passed to nosplit functions. + +//go:noescape +func getcallerpc(argp unsafe.Pointer) uintptr + +//go:noescape +func getcallersp(argp unsafe.Pointer) uintptr + +//go:noescape +func asmcgocall(fn, arg unsafe.Pointer) + +//go:noescape +func asmcgocall_errno(fn, arg unsafe.Pointer) int32 + +//go:noescape +func open(name *byte, mode, perm int32) int32 + +//go:noescape +func gotraceback(*bool) int32 + +const _NoArgs = ^uintptr(0) + +func newstack() +func newproc() +func morestack() +func mstart() +func rt0_go() + +// return0 is a stub used to return 0 from deferproc. +// It is called at the very end of deferproc to signal +// the calling Go function that it should not jump +// to deferreturn. +// in asm_*.s +func return0() + +// thunk to call time.now. +func timenow() (sec int64, nsec int32) + +// in asm_*.s +// not called directly; definitions here supply type information for traceback. +func call16(fn, arg unsafe.Pointer, n, retoffset uint32) +func call32(fn, arg unsafe.Pointer, n, retoffset uint32) +func call64(fn, arg unsafe.Pointer, n, retoffset uint32) +func call128(fn, arg unsafe.Pointer, n, retoffset uint32) +func call256(fn, arg unsafe.Pointer, n, retoffset uint32) +func call512(fn, arg unsafe.Pointer, n, retoffset uint32) +func call1024(fn, arg unsafe.Pointer, n, retoffset uint32) +func call2048(fn, arg unsafe.Pointer, n, retoffset uint32) +func call4096(fn, arg unsafe.Pointer, n, retoffset uint32) +func call8192(fn, arg unsafe.Pointer, n, retoffset uint32) +func call16384(fn, arg unsafe.Pointer, n, retoffset uint32) +func call32768(fn, arg unsafe.Pointer, n, retoffset uint32) +func call65536(fn, arg unsafe.Pointer, n, retoffset uint32) +func call131072(fn, arg unsafe.Pointer, n, retoffset uint32) +func call262144(fn, arg unsafe.Pointer, n, retoffset uint32) +func call524288(fn, arg unsafe.Pointer, n, retoffset uint32) +func call1048576(fn, arg unsafe.Pointer, n, retoffset uint32) +func call2097152(fn, arg unsafe.Pointer, n, retoffset uint32) +func call4194304(fn, arg unsafe.Pointer, n, retoffset uint32) +func call8388608(fn, arg unsafe.Pointer, n, retoffset uint32) +func call16777216(fn, arg unsafe.Pointer, n, retoffset uint32) +func call33554432(fn, arg unsafe.Pointer, n, retoffset uint32) +func call67108864(fn, arg unsafe.Pointer, n, retoffset uint32) +func call134217728(fn, arg unsafe.Pointer, n, retoffset uint32) +func call268435456(fn, arg unsafe.Pointer, n, retoffset uint32) +func call536870912(fn, arg unsafe.Pointer, n, retoffset uint32) +func call1073741824(fn, arg unsafe.Pointer, n, retoffset uint32) diff --git a/libgo/go/runtime/syscall_windows.go b/libgo/go/runtime/syscall_windows.go new file mode 100644 index 00000000000..efbcab510da --- /dev/null +++ b/libgo/go/runtime/syscall_windows.go @@ -0,0 +1,170 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "unsafe" +) + +type callbacks struct { + lock mutex + ctxt [cb_max]*wincallbackcontext + n int +} + +func (c *wincallbackcontext) isCleanstack() bool { + return c.cleanstack +} + +func (c *wincallbackcontext) setCleanstack(cleanstack bool) { + c.cleanstack = cleanstack +} + +var ( + cbs callbacks + cbctxts **wincallbackcontext = &cbs.ctxt[0] // to simplify access to cbs.ctxt in sys_windows_*.s + + callbackasm byte // type isn't really byte, it's code in runtime +) + +// callbackasmAddr returns address of runtime.callbackasm +// function adjusted by i. +// runtime.callbackasm is just a series of CALL instructions +// (each is 5 bytes long), and we want callback to arrive at +// correspondent call instruction instead of start of +// runtime.callbackasm. +func callbackasmAddr(i int) uintptr { + return uintptr(add(unsafe.Pointer(&callbackasm), uintptr(i*5))) +} + +func compileCallback(fn eface, cleanstack bool) (code uintptr) { + if fn._type == nil || (fn._type.kind&kindMask) != kindFunc { + panic("compilecallback: not a function") + } + ft := (*functype)(unsafe.Pointer(fn._type)) + if len(ft.out) != 1 { + panic("compilecallback: function must have one output parameter") + } + uintptrSize := unsafe.Sizeof(uintptr(0)) + if t := (**_type)(unsafe.Pointer(&ft.out[0])); (*t).size != uintptrSize { + panic("compilecallback: output parameter size is wrong") + } + argsize := uintptr(0) + for _, t := range (*[1024](*_type))(unsafe.Pointer(&ft.in[0]))[:len(ft.in)] { + if (*t).size > uintptrSize { + panic("compilecallback: input parameter size is wrong") + } + argsize += uintptrSize + } + + lock(&cbs.lock) + defer unlock(&cbs.lock) + + n := cbs.n + for i := 0; i < n; i++ { + if cbs.ctxt[i].gobody == fn.data && cbs.ctxt[i].isCleanstack() == cleanstack { + return callbackasmAddr(i) + } + } + if n >= cb_max { + gothrow("too many callback functions") + } + + c := new(wincallbackcontext) + c.gobody = fn.data + c.argsize = argsize + c.setCleanstack(cleanstack) + if cleanstack && argsize != 0 { + c.restorestack = argsize + } else { + c.restorestack = 0 + } + cbs.ctxt[n] = c + cbs.n++ + + return callbackasmAddr(n) +} + +func getLoadLibrary() uintptr + +//go:nosplit +func syscall_loadlibrary(filename *uint16) (handle, err uintptr) { + var c libcall + c.fn = getLoadLibrary() + c.n = 1 + c.args = uintptr(unsafe.Pointer(&filename)) + cgocall_errno(unsafe.Pointer(funcPC(asmstdcall)), unsafe.Pointer(&c)) + handle = c.r1 + if handle == 0 { + err = c.err + } + return +} + +func getGetProcAddress() uintptr + +//go:nosplit +func syscall_getprocaddress(handle uintptr, procname *byte) (outhandle, err uintptr) { + var c libcall + c.fn = getGetProcAddress() + c.n = 2 + c.args = uintptr(unsafe.Pointer(&handle)) + cgocall_errno(unsafe.Pointer(funcPC(asmstdcall)), unsafe.Pointer(&c)) + outhandle = c.r1 + if outhandle == 0 { + err = c.err + } + return +} + +//go:nosplit +func syscall_Syscall(fn, nargs, a1, a2, a3 uintptr) (r1, r2, err uintptr) { + var c libcall + c.fn = fn + c.n = nargs + c.args = uintptr(unsafe.Pointer(&a1)) + cgocall_errno(unsafe.Pointer(funcPC(asmstdcall)), unsafe.Pointer(&c)) + return c.r1, c.r2, c.err +} + +//go:nosplit +func syscall_Syscall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { + var c libcall + c.fn = fn + c.n = nargs + c.args = uintptr(unsafe.Pointer(&a1)) + cgocall_errno(unsafe.Pointer(funcPC(asmstdcall)), unsafe.Pointer(&c)) + return c.r1, c.r2, c.err +} + +//go:nosplit +func syscall_Syscall9(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) { + var c libcall + c.fn = fn + c.n = nargs + c.args = uintptr(unsafe.Pointer(&a1)) + cgocall_errno(unsafe.Pointer(funcPC(asmstdcall)), unsafe.Pointer(&c)) + return c.r1, c.r2, c.err +} + +//go:nosplit +func syscall_Syscall12(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12 uintptr) (r1, r2, err uintptr) { + var c libcall + c.fn = fn + c.n = nargs + c.args = uintptr(unsafe.Pointer(&a1)) + cgocall_errno(unsafe.Pointer(funcPC(asmstdcall)), unsafe.Pointer(&c)) + return c.r1, c.r2, c.err +} + +//go:nosplit +func syscall_Syscall15(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) { + var c libcall + c.fn = fn + c.n = nargs + c.args = uintptr(unsafe.Pointer(&a1)) + cgocall_errno(unsafe.Pointer(funcPC(asmstdcall)), unsafe.Pointer(&c)) + return c.r1, c.r2, c.err +} diff --git a/libgo/go/runtime/time.go b/libgo/go/runtime/time.go new file mode 100644 index 00000000000..11862c7e235 --- /dev/null +++ b/libgo/go/runtime/time.go @@ -0,0 +1,289 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Time-related runtime and pieces of package time. + +package runtime + +import "unsafe" + +// Package time knows the layout of this structure. +// If this struct changes, adjust ../time/sleep.go:/runtimeTimer. +// For GOOS=nacl, package syscall knows the layout of this structure. +// If this struct changes, adjust ../syscall/net_nacl.go:/runtimeTimer. +type timer struct { + i int // heap index + + // Timer wakes up at when, and then at when+period, ... (period > 0 only) + // each time calling f(now, arg) in the timer goroutine, so f must be + // a well-behaved function and not block. + when int64 + period int64 + f func(interface{}, uintptr) + arg interface{} + seq uintptr +} + +var timers struct { + lock mutex + gp *g + created bool + sleeping bool + rescheduling bool + waitnote note + t []*timer +} + +// nacl fake time support - time in nanoseconds since 1970 +var faketime int64 + +// Package time APIs. +// Godoc uses the comments in package time, not these. + +// time.now is implemented in assembly. + +// Sleep puts the current goroutine to sleep for at least ns nanoseconds. +func timeSleep(ns int64) { + if ns <= 0 { + return + } + + t := new(timer) + t.when = nanotime() + ns + t.f = goroutineReady + t.arg = getg() + lock(&timers.lock) + addtimerLocked(t) + goparkunlock(&timers.lock, "sleep") +} + +// startTimer adds t to the timer heap. +func startTimer(t *timer) { + if raceenabled { + racerelease(unsafe.Pointer(t)) + } + addtimer(t) +} + +// stopTimer removes t from the timer heap if it is there. +// It returns true if t was removed, false if t wasn't even there. +func stopTimer(t *timer) bool { + return deltimer(t) +} + +// Go runtime. + +// Ready the goroutine arg. +func goroutineReady(arg interface{}, seq uintptr) { + goready(arg.(*g)) +} + +func addtimer(t *timer) { + lock(&timers.lock) + addtimerLocked(t) + unlock(&timers.lock) +} + +// Add a timer to the heap and start or kick the timer proc. +// If the new timer is earlier than any of the others. +// Timers are locked. +func addtimerLocked(t *timer) { + // when must never be negative; otherwise timerproc will overflow + // during its delta calculation and never expire other runtime·timers. + if t.when < 0 { + t.when = 1<<63 - 1 + } + t.i = len(timers.t) + timers.t = append(timers.t, t) + siftupTimer(t.i) + if t.i == 0 { + // siftup moved to top: new earliest deadline. + if timers.sleeping { + timers.sleeping = false + notewakeup(&timers.waitnote) + } + if timers.rescheduling { + timers.rescheduling = false + goready(timers.gp) + } + } + if !timers.created { + timers.created = true + go timerproc() + } +} + +// Delete timer t from the heap. +// Do not need to update the timerproc: if it wakes up early, no big deal. +func deltimer(t *timer) bool { + // Dereference t so that any panic happens before the lock is held. + // Discard result, because t might be moving in the heap. + _ = t.i + + lock(&timers.lock) + // t may not be registered anymore and may have + // a bogus i (typically 0, if generated by Go). + // Verify it before proceeding. + i := t.i + last := len(timers.t) - 1 + if i < 0 || i > last || timers.t[i] != t { + unlock(&timers.lock) + return false + } + if i != last { + timers.t[i] = timers.t[last] + timers.t[i].i = i + } + timers.t[last] = nil + timers.t = timers.t[:last] + if i != last { + siftupTimer(i) + siftdownTimer(i) + } + unlock(&timers.lock) + return true +} + +// Timerproc runs the time-driven events. +// It sleeps until the next event in the timers heap. +// If addtimer inserts a new earlier event, addtimer1 wakes timerproc early. +func timerproc() { + timers.gp = getg() + timers.gp.issystem = true + for { + lock(&timers.lock) + timers.sleeping = false + now := nanotime() + delta := int64(-1) + for { + if len(timers.t) == 0 { + delta = -1 + break + } + t := timers.t[0] + delta = t.when - now + if delta > 0 { + break + } + if t.period > 0 { + // leave in heap but adjust next time to fire + t.when += t.period * (1 + -delta/t.period) + siftdownTimer(0) + } else { + // remove from heap + last := len(timers.t) - 1 + if last > 0 { + timers.t[0] = timers.t[last] + timers.t[0].i = 0 + } + timers.t[last] = nil + timers.t = timers.t[:last] + if last > 0 { + siftdownTimer(0) + } + t.i = -1 // mark as removed + } + f := t.f + arg := t.arg + seq := t.seq + unlock(&timers.lock) + if raceenabled { + raceacquire(unsafe.Pointer(t)) + } + f(arg, seq) + lock(&timers.lock) + } + if delta < 0 || faketime > 0 { + // No timers left - put goroutine to sleep. + timers.rescheduling = true + goparkunlock(&timers.lock, "timer goroutine (idle)") + continue + } + // At least one timer pending. Sleep until then. + timers.sleeping = true + noteclear(&timers.waitnote) + unlock(&timers.lock) + notetsleepg(&timers.waitnote, delta) + } +} + +func timejump() *g { + if faketime == 0 { + return nil + } + + lock(&timers.lock) + if !timers.created || len(timers.t) == 0 { + unlock(&timers.lock) + return nil + } + + var gp *g + if faketime < timers.t[0].when { + faketime = timers.t[0].when + if timers.rescheduling { + timers.rescheduling = false + gp = timers.gp + } + } + unlock(&timers.lock) + return gp +} + +// Heap maintenance algorithms. + +func siftupTimer(i int) { + t := timers.t + when := t[i].when + tmp := t[i] + for i > 0 { + p := (i - 1) / 4 // parent + if when >= t[p].when { + break + } + t[i] = t[p] + t[i].i = i + t[p] = tmp + t[p].i = p + i = p + } +} + +func siftdownTimer(i int) { + t := timers.t + n := len(t) + when := t[i].when + tmp := t[i] + for { + c := i*4 + 1 // left child + c3 := c + 2 // mid child + if c >= n { + break + } + w := t[c].when + if c+1 < n && t[c+1].when < w { + w = t[c+1].when + c++ + } + if c3 < n { + w3 := t[c3].when + if c3+1 < n && t[c3+1].when < w3 { + w3 = t[c3+1].when + c3++ + } + if w3 < w { + w = w3 + c = c3 + } + } + if w >= when { + break + } + t[i] = t[c] + t[i].i = i + t[c] = tmp + t[c].i = c + i = c + } +} diff --git a/libgo/go/runtime/type.go b/libgo/go/runtime/type.go deleted file mode 100644 index a5ed8af7a85..00000000000 --- a/libgo/go/runtime/type.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* - * Runtime type representation. - * This file exists only to provide types that 6l can turn into - * DWARF information for use by gdb. Nothing else uses these. - * They should match the same types in ../reflect/type.go. - * For comments see ../reflect/type.go. - */ - -package runtime - -import "unsafe" - -type rtype struct { - kind uint8 - align uint8 - fieldAlign uint8 - size uintptr - hash uint32 - - hashfn func(unsafe.Pointer, uintptr) uintptr - equalfn func(unsafe.Pointer, unsafe.Pointer, uintptr) bool - - gc unsafe.Pointer - string *string - *uncommonType - ptrToThis *rtype - zero unsafe.Pointer -} - -type _method struct { - name *string - pkgPath *string - mtyp *rtype - typ *rtype - tfn unsafe.Pointer -} - -type uncommonType struct { - name *string - pkgPath *string - methods []_method -} - -type _imethod struct { - name *string - pkgPath *string - typ *rtype -} - -type interfaceType struct { - rtype - methods []_imethod -} diff --git a/libgo/go/runtime/typekind.go b/libgo/go/runtime/typekind.go new file mode 100644 index 00000000000..b64ec44f9ec --- /dev/null +++ b/libgo/go/runtime/typekind.go @@ -0,0 +1,44 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +const ( + kindBool = _KindBool + kindInt = _KindInt + kindInt8 = _KindInt8 + kindInt16 = _KindInt16 + kindInt32 = _KindInt32 + kindInt64 = _KindInt64 + kindUint = _KindUint + kindUint8 = _KindUint8 + kindUint16 = _KindUint16 + kindUint32 = _KindUint32 + kindUint64 = _KindUint64 + kindUintptr = _KindUintptr + kindFloat32 = _KindFloat32 + kindFloat64 = _KindFloat64 + kindComplex64 = _KindComplex64 + kindComplex128 = _KindComplex128 + kindArray = _KindArray + kindChan = _KindChan + kindFunc = _KindFunc + kindInterface = _KindInterface + kindMap = _KindMap + kindPtr = _KindPtr + kindSlice = _KindSlice + kindString = _KindString + kindStruct = _KindStruct + kindUnsafePointer = _KindUnsafePointer + + kindDirectIface = _KindDirectIface + kindGCProg = _KindGCProg + kindNoPointers = _KindNoPointers + kindMask = _KindMask +) + +// isDirectIface reports whether t is stored directly in an interface value. +func isDirectIface(t *_type) bool { + return t.kind&kindDirectIface != 0 +} diff --git a/libgo/go/runtime/vlrt.go b/libgo/go/runtime/vlrt.go new file mode 100644 index 00000000000..6370732ca0d --- /dev/null +++ b/libgo/go/runtime/vlrt.go @@ -0,0 +1,258 @@ +// Inferno's libkern/vlrt-arm.c +// http://code.google.com/p/inferno-os/source/browse/libkern/vlrt-arm.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved. +// Portions Copyright 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// +build arm 386 + +package runtime + +import "unsafe" + +const ( + sign32 = 1 << (32 - 1) + sign64 = 1 << (64 - 1) +) + +func float64toint64(d float64) (y uint64) { + _d2v(&y, d) + return +} + +func float64touint64(d float64) (y uint64) { + _d2v(&y, d) + return +} + +func int64tofloat64(y int64) float64 { + if y < 0 { + return -uint64tofloat64(-uint64(y)) + } + return uint64tofloat64(uint64(y)) +} + +func uint64tofloat64(y uint64) float64 { + hi := float64(uint32(y >> 32)) + lo := float64(uint32(y)) + d := hi*(1<<32) + lo + return d +} + +func _d2v(y *uint64, d float64) { + x := *(*uint64)(unsafe.Pointer(&d)) + + xhi := uint32(x>>32)&0xfffff | 0x100000 + xlo := uint32(x) + sh := 1075 - int32(uint32(x>>52)&0x7ff) + + var ylo, yhi uint32 + if sh >= 0 { + sh := uint32(sh) + /* v = (hi||lo) >> sh */ + if sh < 32 { + if sh == 0 { + ylo = xlo + yhi = xhi + } else { + ylo = xlo>>sh | xhi<<(32-sh) + yhi = xhi >> sh + } + } else { + if sh == 32 { + ylo = xhi + } else if sh < 64 { + ylo = xhi >> (sh - 32) + } + } + } else { + /* v = (hi||lo) << -sh */ + sh := uint32(-sh) + if sh <= 11 { + ylo = xlo << sh + yhi = xhi<<sh | xlo>>(32-sh) + } else { + /* overflow */ + yhi = uint32(d) /* causes something awful */ + } + } + if x&sign64 != 0 { + if ylo != 0 { + ylo = -ylo + yhi = ^yhi + } else { + yhi = -yhi + } + } + + *y = uint64(yhi)<<32 | uint64(ylo) +} + +func uint64div(n, d uint64) uint64 { + // Check for 32 bit operands + if uint32(n>>32) == 0 && uint32(d>>32) == 0 { + if uint32(d) == 0 { + panicdivide() + } + return uint64(uint32(n) / uint32(d)) + } + q, _ := dodiv(n, d) + return q +} + +func uint64mod(n, d uint64) uint64 { + // Check for 32 bit operands + if uint32(n>>32) == 0 && uint32(d>>32) == 0 { + if uint32(d) == 0 { + panicdivide() + } + return uint64(uint32(n) % uint32(d)) + } + _, r := dodiv(n, d) + return r +} + +func int64div(n, d int64) int64 { + // Check for 32 bit operands + if int64(int32(n)) == n && int64(int32(d)) == d { + if int32(n) == -0x80000000 && int32(d) == -1 { + // special case: 32-bit -0x80000000 / -1 = -0x80000000, + // but 64-bit -0x80000000 / -1 = 0x80000000. + return 0x80000000 + } + if int32(d) == 0 { + panicdivide() + } + return int64(int32(n) / int32(d)) + } + + nneg := n < 0 + dneg := d < 0 + if nneg { + n = -n + } + if dneg { + d = -d + } + uq, _ := dodiv(uint64(n), uint64(d)) + q := int64(uq) + if nneg != dneg { + q = -q + } + return q +} + +func int64mod(n, d int64) int64 { + // Check for 32 bit operands + if int64(int32(n)) == n && int64(int32(d)) == d { + if int32(d) == 0 { + panicdivide() + } + return int64(int32(n) % int32(d)) + } + + nneg := n < 0 + if nneg { + n = -n + } + if d < 0 { + d = -d + } + _, ur := dodiv(uint64(n), uint64(d)) + r := int64(ur) + if nneg { + r = -r + } + return r +} + +//go:noescape +func _mul64by32(lo64 *uint64, a uint64, b uint32) (hi32 uint32) + +//go:noescape +func _div64by32(a uint64, b uint32, r *uint32) (q uint32) + +func dodiv(n, d uint64) (q, r uint64) { + if GOARCH == "arm" { + // arm doesn't have a division instruction, so + // slowdodiv is the best that we can do. + // TODO: revisit for arm64. + return slowdodiv(n, d) + } + + if d > n { + return 0, n + } + + if uint32(d>>32) != 0 { + t := uint32(n>>32) / uint32(d>>32) + var lo64 uint64 + hi32 := _mul64by32(&lo64, d, t) + if hi32 != 0 || lo64 > n { + return slowdodiv(n, d) + } + return uint64(t), n - lo64 + } + + // d is 32 bit + var qhi uint32 + if uint32(n>>32) >= uint32(d) { + if uint32(d) == 0 { + panicdivide() + } + qhi = uint32(n>>32) / uint32(d) + n -= uint64(uint32(d)*qhi) << 32 + } else { + qhi = 0 + } + + var rlo uint32 + qlo := _div64by32(n, uint32(d), &rlo) + return uint64(qhi)<<32 + uint64(qlo), uint64(rlo) +} + +func slowdodiv(n, d uint64) (q, r uint64) { + if d == 0 { + panicdivide() + } + + // Set up the divisor and find the number of iterations needed. + capn := n + if n >= sign64 { + capn = sign64 + } + i := 0 + for d < capn { + d <<= 1 + i++ + } + + for ; i >= 0; i-- { + q <<= 1 + if n >= d { + n -= d + q |= 1 + } + d >>= 1 + } + return q, n +} diff --git a/libgo/go/strconv/atoi.go b/libgo/go/strconv/atoi.go index cbf0380ec82..9ecec5a58b9 100644 --- a/libgo/go/strconv/atoi.go +++ b/libgo/go/strconv/atoi.go @@ -31,7 +31,7 @@ func rangeError(fn, str string) *NumError { return &NumError{fn, str, ErrRange} } -const intSize = 32 << uint(^uint(0)>>63) +const intSize = 32 << (^uint(0) >> 63) // IntSize is the size in bits of an int or uint value. const IntSize = intSize diff --git a/libgo/go/strconv/isprint.go b/libgo/go/strconv/isprint.go index 91f1795356b..80738ed7111 100644 --- a/libgo/go/strconv/isprint.go +++ b/libgo/go/strconv/isprint.go @@ -3,20 +3,19 @@ // license that can be found in the LICENSE file. // DO NOT EDIT. GENERATED BY -// go run makeisprint.go >x && mv x isprint.go +// go run makeisprint.go -output isprint.go package strconv -// (470+136+60)*2 + (218)*4 = 2204 bytes +// (468+138+67)*2 + (326)*4 = 2650 bytes var isPrint16 = []uint16{ 0x0020, 0x007e, 0x00a1, 0x0377, - 0x037a, 0x037e, - 0x0384, 0x0527, - 0x0531, 0x0556, + 0x037a, 0x037f, + 0x0384, 0x0556, 0x0559, 0x058a, - 0x058f, 0x05c7, + 0x058d, 0x05c7, 0x05d0, 0x05ea, 0x05f0, 0x05f4, 0x0606, 0x061b, @@ -27,7 +26,7 @@ var isPrint16 = []uint16{ 0x0800, 0x082d, 0x0830, 0x085b, 0x085e, 0x085e, - 0x08a0, 0x08ac, + 0x08a0, 0x08b2, 0x08e4, 0x098c, 0x098f, 0x0990, 0x0993, 0x09b2, @@ -72,18 +71,17 @@ var isPrint16 = []uint16{ 0x0bd0, 0x0bd0, 0x0bd7, 0x0bd7, 0x0be6, 0x0bfa, - 0x0c01, 0x0c39, + 0x0c00, 0x0c39, 0x0c3d, 0x0c4d, 0x0c55, 0x0c59, 0x0c60, 0x0c63, 0x0c66, 0x0c6f, - 0x0c78, 0x0c7f, - 0x0c82, 0x0cb9, + 0x0c78, 0x0cb9, 0x0cbc, 0x0ccd, 0x0cd5, 0x0cd6, 0x0cde, 0x0ce3, 0x0ce6, 0x0cf2, - 0x0d02, 0x0d3a, + 0x0d01, 0x0d3a, 0x0d3d, 0x0d4e, 0x0d57, 0x0d57, 0x0d60, 0x0d63, @@ -94,6 +92,7 @@ var isPrint16 = []uint16{ 0x0dc0, 0x0dc6, 0x0dca, 0x0dca, 0x0dcf, 0x0ddf, + 0x0de6, 0x0def, 0x0df2, 0x0df4, 0x0e01, 0x0e3a, 0x0e3f, 0x0e5b, @@ -120,7 +119,7 @@ var isPrint16 = []uint16{ 0x1380, 0x1399, 0x13a0, 0x13f4, 0x1400, 0x169c, - 0x16a0, 0x16f0, + 0x16a0, 0x16f8, 0x1700, 0x1714, 0x1720, 0x1736, 0x1740, 0x1753, @@ -133,8 +132,7 @@ var isPrint16 = []uint16{ 0x1820, 0x1877, 0x1880, 0x18aa, 0x18b0, 0x18f5, - 0x1900, 0x191c, - 0x1920, 0x192b, + 0x1900, 0x192b, 0x1930, 0x193b, 0x1940, 0x1940, 0x1944, 0x196d, @@ -147,6 +145,7 @@ var isPrint16 = []uint16{ 0x1a7f, 0x1a89, 0x1a90, 0x1a99, 0x1aa0, 0x1aad, + 0x1ab0, 0x1abe, 0x1b00, 0x1b4b, 0x1b50, 0x1b7c, 0x1b80, 0x1bf3, @@ -154,8 +153,8 @@ var isPrint16 = []uint16{ 0x1c3b, 0x1c49, 0x1c4d, 0x1c7f, 0x1cc0, 0x1cc7, - 0x1cd0, 0x1cf6, - 0x1d00, 0x1de6, + 0x1cd0, 0x1cf9, + 0x1d00, 0x1df5, 0x1dfc, 0x1f15, 0x1f18, 0x1f1d, 0x1f20, 0x1f45, @@ -168,21 +167,23 @@ var isPrint16 = []uint16{ 0x2030, 0x205e, 0x2070, 0x2071, 0x2074, 0x209c, - 0x20a0, 0x20ba, + 0x20a0, 0x20bd, 0x20d0, 0x20f0, 0x2100, 0x2189, - 0x2190, 0x23f3, + 0x2190, 0x23fa, 0x2400, 0x2426, 0x2440, 0x244a, - 0x2460, 0x2b4c, - 0x2b50, 0x2b59, + 0x2460, 0x2b73, + 0x2b76, 0x2b95, + 0x2b98, 0x2bb9, + 0x2bbd, 0x2bd1, 0x2c00, 0x2cf3, 0x2cf9, 0x2d27, 0x2d2d, 0x2d2d, 0x2d30, 0x2d67, 0x2d6f, 0x2d70, 0x2d7f, 0x2d96, - 0x2da0, 0x2e3b, + 0x2da0, 0x2e42, 0x2e80, 0x2ef3, 0x2f00, 0x2fd5, 0x2ff0, 0x2ffb, @@ -196,11 +197,10 @@ var isPrint16 = []uint16{ 0xa000, 0xa48c, 0xa490, 0xa4c6, 0xa4d0, 0xa62b, - 0xa640, 0xa697, - 0xa69f, 0xa6f7, - 0xa700, 0xa793, - 0xa7a0, 0xa7aa, - 0xa7f8, 0xa82b, + 0xa640, 0xa6f7, + 0xa700, 0xa7ad, + 0xa7b0, 0xa7b1, + 0xa7f7, 0xa82b, 0xa830, 0xa839, 0xa840, 0xa877, 0xa880, 0xa8c4, @@ -209,17 +209,16 @@ var isPrint16 = []uint16{ 0xa900, 0xa953, 0xa95f, 0xa97c, 0xa980, 0xa9d9, - 0xa9de, 0xa9df, - 0xaa00, 0xaa36, + 0xa9de, 0xaa36, 0xaa40, 0xaa4d, 0xaa50, 0xaa59, - 0xaa5c, 0xaa7b, - 0xaa80, 0xaac2, + 0xaa5c, 0xaac2, 0xaadb, 0xaaf6, 0xab01, 0xab06, 0xab09, 0xab0e, 0xab11, 0xab16, - 0xab20, 0xab2e, + 0xab20, 0xab5f, + 0xab64, 0xab65, 0xabc0, 0xabed, 0xabf0, 0xabf9, 0xac00, 0xd7a3, @@ -235,7 +234,7 @@ var isPrint16 = []uint16{ 0xfd92, 0xfdc7, 0xfdf0, 0xfdfd, 0xfe00, 0xfe19, - 0xfe20, 0xfe26, + 0xfe20, 0xfe2d, 0xfe30, 0xfe6b, 0xfe70, 0xfefc, 0xff01, 0xffbe, @@ -252,15 +251,12 @@ var isNotPrint16 = []uint16{ 0x038b, 0x038d, 0x03a2, + 0x0530, 0x0560, 0x0588, 0x0590, 0x06dd, 0x083f, - 0x08a1, - 0x08ff, - 0x0978, - 0x0980, 0x0984, 0x09a9, 0x09b1, @@ -294,10 +290,10 @@ var isNotPrint16 = []uint16{ 0x0c0d, 0x0c11, 0x0c29, - 0x0c34, 0x0c45, 0x0c49, 0x0c57, + 0x0c80, 0x0c84, 0x0c8d, 0x0c91, @@ -345,7 +341,9 @@ var isNotPrint16 = []uint16{ 0x170d, 0x176d, 0x1771, + 0x191f, 0x1a5f, + 0x1cf7, 0x1f58, 0x1f5a, 0x1f5c, @@ -355,7 +353,7 @@ var isNotPrint16 = []uint16{ 0x1fdc, 0x1ff5, 0x208f, - 0x2700, + 0x2bc9, 0x2c2f, 0x2c5f, 0x2d26, @@ -372,9 +370,12 @@ var isNotPrint16 = []uint16{ 0x318f, 0x321f, 0x32ff, + 0xa69e, 0xa78f, 0xa9ce, + 0xa9ff, 0xab27, + 0xab2f, 0xfb37, 0xfb3d, 0xfb3f, @@ -392,21 +393,31 @@ var isPrint32 = []uint32{ 0x010080, 0x0100fa, 0x010100, 0x010102, 0x010107, 0x010133, - 0x010137, 0x01018a, + 0x010137, 0x01018c, 0x010190, 0x01019b, + 0x0101a0, 0x0101a0, 0x0101d0, 0x0101fd, 0x010280, 0x01029c, 0x0102a0, 0x0102d0, + 0x0102e0, 0x0102fb, 0x010300, 0x010323, 0x010330, 0x01034a, + 0x010350, 0x01037a, 0x010380, 0x0103c3, 0x0103c8, 0x0103d5, 0x010400, 0x01049d, 0x0104a0, 0x0104a9, + 0x010500, 0x010527, + 0x010530, 0x010563, + 0x01056f, 0x01056f, + 0x010600, 0x010736, + 0x010740, 0x010755, + 0x010760, 0x010767, 0x010800, 0x010805, 0x010808, 0x010838, 0x01083c, 0x01083c, - 0x01083f, 0x01085f, + 0x01083f, 0x01089e, + 0x0108a7, 0x0108af, 0x010900, 0x01091b, 0x01091f, 0x010939, 0x01093f, 0x01093f, @@ -417,32 +428,72 @@ var isPrint32 = []uint32{ 0x010a38, 0x010a3a, 0x010a3f, 0x010a47, 0x010a50, 0x010a58, - 0x010a60, 0x010a7f, + 0x010a60, 0x010a9f, + 0x010ac0, 0x010ae6, + 0x010aeb, 0x010af6, 0x010b00, 0x010b35, 0x010b39, 0x010b55, 0x010b58, 0x010b72, - 0x010b78, 0x010b7f, + 0x010b78, 0x010b91, + 0x010b99, 0x010b9c, + 0x010ba9, 0x010baf, 0x010c00, 0x010c48, 0x010e60, 0x010e7e, 0x011000, 0x01104d, 0x011052, 0x01106f, - 0x011080, 0x0110c1, + 0x01107f, 0x0110c1, 0x0110d0, 0x0110e8, 0x0110f0, 0x0110f9, 0x011100, 0x011143, + 0x011150, 0x011176, 0x011180, 0x0111c8, - 0x0111d0, 0x0111d9, + 0x0111cd, 0x0111cd, + 0x0111d0, 0x0111da, + 0x0111e1, 0x0111f4, + 0x011200, 0x01123d, + 0x0112b0, 0x0112ea, + 0x0112f0, 0x0112f9, + 0x011301, 0x01130c, + 0x01130f, 0x011310, + 0x011313, 0x011339, + 0x01133c, 0x011344, + 0x011347, 0x011348, + 0x01134b, 0x01134d, + 0x011357, 0x011357, + 0x01135d, 0x011363, + 0x011366, 0x01136c, + 0x011370, 0x011374, + 0x011480, 0x0114c7, + 0x0114d0, 0x0114d9, + 0x011580, 0x0115b5, + 0x0115b8, 0x0115c9, + 0x011600, 0x011644, + 0x011650, 0x011659, 0x011680, 0x0116b7, 0x0116c0, 0x0116c9, - 0x012000, 0x01236e, - 0x012400, 0x012462, - 0x012470, 0x012473, + 0x0118a0, 0x0118f2, + 0x0118ff, 0x0118ff, + 0x011ac0, 0x011af8, + 0x012000, 0x012398, + 0x012400, 0x012474, 0x013000, 0x01342e, 0x016800, 0x016a38, + 0x016a40, 0x016a69, + 0x016a6e, 0x016a6f, + 0x016ad0, 0x016aed, + 0x016af0, 0x016af5, + 0x016b00, 0x016b45, + 0x016b50, 0x016b77, + 0x016b7d, 0x016b8f, 0x016f00, 0x016f44, 0x016f50, 0x016f7e, 0x016f8f, 0x016f9f, 0x01b000, 0x01b001, + 0x01bc00, 0x01bc6a, + 0x01bc70, 0x01bc7c, + 0x01bc80, 0x01bc88, + 0x01bc90, 0x01bc99, + 0x01bc9c, 0x01bc9f, 0x01d000, 0x01d0f5, 0x01d100, 0x01d126, 0x01d129, 0x01d172, @@ -458,6 +509,8 @@ var isPrint32 = []uint32{ 0x01d54a, 0x01d6a5, 0x01d6a8, 0x01d7cb, 0x01d7ce, 0x01d7ff, + 0x01e800, 0x01e8c4, + 0x01e8c7, 0x01e8d6, 0x01ee00, 0x01ee24, 0x01ee27, 0x01ee3b, 0x01ee42, 0x01ee42, @@ -469,28 +522,30 @@ var isPrint32 = []uint32{ 0x01f000, 0x01f02b, 0x01f030, 0x01f093, 0x01f0a0, 0x01f0ae, - 0x01f0b1, 0x01f0be, - 0x01f0c1, 0x01f0df, - 0x01f100, 0x01f10a, + 0x01f0b1, 0x01f0f5, + 0x01f100, 0x01f10c, 0x01f110, 0x01f16b, 0x01f170, 0x01f19a, 0x01f1e6, 0x01f202, 0x01f210, 0x01f23a, 0x01f240, 0x01f248, 0x01f250, 0x01f251, - 0x01f300, 0x01f320, - 0x01f330, 0x01f37c, - 0x01f380, 0x01f393, - 0x01f3a0, 0x01f3ca, - 0x01f3e0, 0x01f3f0, - 0x01f400, 0x01f4fc, - 0x01f500, 0x01f53d, - 0x01f540, 0x01f543, - 0x01f550, 0x01f567, - 0x01f5fb, 0x01f640, - 0x01f645, 0x01f64f, - 0x01f680, 0x01f6c5, + 0x01f300, 0x01f32c, + 0x01f330, 0x01f37d, + 0x01f380, 0x01f3ce, + 0x01f3d4, 0x01f3f7, + 0x01f400, 0x01f54a, + 0x01f550, 0x01f642, + 0x01f645, 0x01f6cf, + 0x01f6e0, 0x01f6ec, + 0x01f6f0, 0x01f6f3, 0x01f700, 0x01f773, + 0x01f780, 0x01f7d4, + 0x01f800, 0x01f80b, + 0x01f810, 0x01f847, + 0x01f850, 0x01f859, + 0x01f860, 0x01f887, + 0x01f890, 0x01f8ad, 0x020000, 0x02a6d6, 0x02a700, 0x02b734, 0x02b740, 0x02b81d, @@ -503,7 +558,6 @@ var isNotPrint32 = []uint16{ // add 0x10000 to each entry 0x0027, 0x003b, 0x003e, - 0x031f, 0x039e, 0x0809, 0x0836, @@ -513,6 +567,15 @@ var isNotPrint32 = []uint16{ // add 0x10000 to each entry 0x0a18, 0x10bd, 0x1135, + 0x1212, + 0x1304, + 0x1329, + 0x1331, + 0x1334, + 0x246f, + 0x6a5f, + 0x6b5a, + 0x6b62, 0xd455, 0xd49d, 0xd4ad, @@ -552,11 +615,10 @@ var isNotPrint32 = []uint16{ // add 0x10000 to each entry 0xee8a, 0xeea4, 0xeeaa, + 0xf0c0, 0xf0d0, 0xf12f, - 0xf336, - 0xf3c5, - 0xf43f, - 0xf441, - 0xf4f8, + 0xf4ff, + 0xf57a, + 0xf5a4, } diff --git a/libgo/go/strconv/makeisprint.go b/libgo/go/strconv/makeisprint.go index 216159cc020..588d0a00b53 100644 --- a/libgo/go/strconv/makeisprint.go +++ b/libgo/go/strconv/makeisprint.go @@ -4,15 +4,26 @@ // +build ignore -// makeisprint generates the tables for strconv's compact isPrint. +// +// usage: +// +// go run makeisprint.go -output isprint.go +// + package main import ( + "bytes" + "flag" "fmt" - "os" + "go/format" + "io/ioutil" + "log" "unicode" ) +var filename = flag.String("output", "isprint.go", "output file name") + var ( range16 []uint16 except16 []uint16 @@ -110,6 +121,8 @@ func to16(x []uint32) []uint16 { } func main() { + flag.Parse() + rang, except := scan(0, 0xFFFF) range16 = to16(rang) except16 = to16(except) @@ -117,49 +130,58 @@ func main() { for i := rune(0); i <= unicode.MaxRune; i++ { if isPrint(i) != unicode.IsPrint(i) { - fmt.Fprintf(os.Stderr, "%U: isPrint=%v, want %v\n", i, isPrint(i), unicode.IsPrint(i)) - return + log.Fatalf("%U: isPrint=%v, want %v\n", i, isPrint(i), unicode.IsPrint(i)) } } - fmt.Printf(`// Copyright 2013 The Go Authors. All rights reserved. + var buf bytes.Buffer + + fmt.Fprintf(&buf, `// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file.` + "\n\n") - fmt.Printf("// DO NOT EDIT. GENERATED BY\n") - fmt.Printf("// go run makeisprint.go >x && mv x isprint.go\n\n") - fmt.Printf("package strconv\n\n") +// license that can be found in the LICENSE file.`+"\n\n") + fmt.Fprintf(&buf, "// DO NOT EDIT. GENERATED BY\n") + fmt.Fprintf(&buf, "// go run makeisprint.go -output isprint.go\n\n") + fmt.Fprintf(&buf, "package strconv\n\n") - fmt.Printf("// (%d+%d+%d)*2 + (%d)*4 = %d bytes\n\n", + fmt.Fprintf(&buf, "// (%d+%d+%d)*2 + (%d)*4 = %d bytes\n\n", len(range16), len(except16), len(except32), len(range32), (len(range16)+len(except16)+len(except32))*2+ (len(range32))*4) - fmt.Printf("var isPrint16 = []uint16{\n") + fmt.Fprintf(&buf, "var isPrint16 = []uint16{\n") for i := 0; i < len(range16); i += 2 { - fmt.Printf("\t%#04x, %#04x,\n", range16[i], range16[i+1]) + fmt.Fprintf(&buf, "\t%#04x, %#04x,\n", range16[i], range16[i+1]) } - fmt.Printf("}\n\n") + fmt.Fprintf(&buf, "}\n\n") - fmt.Printf("var isNotPrint16 = []uint16{\n") + fmt.Fprintf(&buf, "var isNotPrint16 = []uint16{\n") for _, r := range except16 { - fmt.Printf("\t%#04x,\n", r) + fmt.Fprintf(&buf, "\t%#04x,\n", r) } - fmt.Printf("}\n\n") + fmt.Fprintf(&buf, "}\n\n") - fmt.Printf("var isPrint32 = []uint32{\n") + fmt.Fprintf(&buf, "var isPrint32 = []uint32{\n") for i := 0; i < len(range32); i += 2 { - fmt.Printf("\t%#06x, %#06x,\n", range32[i], range32[i+1]) + fmt.Fprintf(&buf, "\t%#06x, %#06x,\n", range32[i], range32[i+1]) } - fmt.Printf("}\n\n") + fmt.Fprintf(&buf, "}\n\n") - fmt.Printf("var isNotPrint32 = []uint16{ // add 0x10000 to each entry\n") + fmt.Fprintf(&buf, "var isNotPrint32 = []uint16{ // add 0x10000 to each entry\n") for _, r := range except32 { if r >= 0x20000 { - fmt.Fprintf(os.Stderr, "%U too big for isNotPrint32\n", r) - return + log.Fatalf("%U too big for isNotPrint32\n", r) } - fmt.Printf("\t%#04x,\n", r-0x10000) + fmt.Fprintf(&buf, "\t%#04x,\n", r-0x10000) + } + fmt.Fprintf(&buf, "}\n") + + data, err := format.Source(buf.Bytes()) + if err != nil { + log.Fatal(err) + } + err = ioutil.WriteFile(*filename, data, 0644) + if err != nil { + log.Fatal(err) } - fmt.Printf("}\n") } diff --git a/libgo/go/strconv/quote.go b/libgo/go/strconv/quote.go index aded7e5930c..53d51b5a46a 100644 --- a/libgo/go/strconv/quote.go +++ b/libgo/go/strconv/quote.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:generate go run makeisprint.go -output isprint.go + package strconv import ( @@ -141,11 +143,21 @@ func AppendQuoteRuneToASCII(dst []byte, r rune) []byte { // CanBackquote reports whether the string s can be represented // unchanged as a single-line backquoted string without control -// characters other than space and tab. +// characters other than tab. func CanBackquote(s string) bool { - for i := 0; i < len(s); i++ { - c := s[i] - if (c < ' ' && c != '\t') || c == '`' || c == '\u007F' { + for len(s) > 0 { + r, wid := utf8.DecodeRuneInString(s) + s = s[wid:] + if wid > 1 { + if r == '\ufeff' { + return false // BOMs are invisible and should not be quoted. + } + continue // All other multibyte runes are correctly encoded and assumed printable. + } + if r == utf8.RuneError { + return false + } + if (r < ' ' && r != '\t') || r == '`' || r == '\u007F' { return false } } diff --git a/libgo/go/strconv/quote_test.go b/libgo/go/strconv/quote_test.go index e4b5b6b9fd2..3bf162f987e 100644 --- a/libgo/go/strconv/quote_test.go +++ b/libgo/go/strconv/quote_test.go @@ -146,6 +146,10 @@ var canbackquotetests = []canBackquoteTest{ {`ABCDEFGHIJKLMNOPQRSTUVWXYZ`, true}, {`abcdefghijklmnopqrstuvwxyz`, true}, {`☺`, true}, + {"\x80", false}, + {"a\xe0\xa0z", false}, + {"\ufeffabc", false}, + {"a\ufeffz", false}, } func TestCanBackquote(t *testing.T) { diff --git a/libgo/go/strings/replace.go b/libgo/go/strings/replace.go index 3e05d2057be..4752641be0c 100644 --- a/libgo/go/strings/replace.go +++ b/libgo/go/strings/replace.go @@ -6,7 +6,8 @@ package strings import "io" -// A Replacer replaces a list of strings with replacements. +// Replacer replaces a list of strings with replacements. +// It is safe for concurrent use by multiple goroutines. type Replacer struct { r replacer } @@ -17,15 +18,6 @@ type replacer interface { WriteString(w io.Writer, s string) (n int, err error) } -// byteBitmap represents bytes which are sought for replacement. -// byteBitmap is 256 bits wide, with a bit set for each old byte to be -// replaced. -type byteBitmap [256 / 32]uint32 - -func (m *byteBitmap) set(b byte) { - m[b>>5] |= uint32(1 << (b & 31)) -} - // NewReplacer returns a new Replacer from a list of old, new string pairs. // Replacements are performed in order, without overlapping matches. func NewReplacer(oldnew ...string) *Replacer { @@ -48,30 +40,29 @@ func NewReplacer(oldnew ...string) *Replacer { } if allNewBytes { - bb := &byteReplacer{} - for i := 0; i < len(oldnew); i += 2 { - o, n := oldnew[i][0], oldnew[i+1][0] - if bb.old[o>>5]&uint32(1<<(o&31)) != 0 { - // Later old->new maps do not override previous ones with the same old string. - continue - } - bb.old.set(o) - bb.new[o] = n + r := byteReplacer{} + for i := range r { + r[i] = byte(i) } - return &Replacer{r: bb} + // The first occurrence of old->new map takes precedence + // over the others with the same old string. + for i := len(oldnew) - 2; i >= 0; i -= 2 { + o := oldnew[i][0] + n := oldnew[i+1][0] + r[o] = n + } + return &Replacer{r: &r} } - bs := &byteStringReplacer{} - for i := 0; i < len(oldnew); i += 2 { - o, new := oldnew[i][0], oldnew[i+1] - if bs.old[o>>5]&uint32(1<<(o&31)) != 0 { - // Later old->new maps do not override previous ones with the same old string. - continue - } - bs.old.set(o) - bs.new[o] = []byte(new) + r := byteStringReplacer{} + // The first occurrence of old->new map takes precedence + // over the others with the same old string. + for i := len(oldnew) - 2; i >= 0; i -= 2 { + o := oldnew[i][0] + n := oldnew[i+1] + r[o] = []byte(n) } - return &Replacer{r: bs} + return &Replacer{r: &r} } // Replace returns a copy of s with all replacements performed. @@ -323,6 +314,15 @@ func (r *genericReplacer) WriteString(w io.Writer, s string) (n int, err error) var last, wn int var prevMatchEmpty bool for i := 0; i <= len(s); { + // Fast path: s[i] is not a prefix of any pattern. + if i != len(s) && r.root.priority == 0 { + index := int(r.mapping[s[i]]) + if index == r.tableSize || r.root.table[index] == nil { + i++ + continue + } + } + // Ignore the empty match iff the previous loop found the empty match. val, keylen, match := r.lookup(s[i:], prevMatchEmpty) prevMatchEmpty = match && keylen == 0 @@ -409,24 +409,18 @@ func (r *singleStringReplacer) WriteString(w io.Writer, s string) (n int, err er // byteReplacer is the implementation that's used when all the "old" // and "new" values are single ASCII bytes. -type byteReplacer struct { - // old has a bit set for each old byte that should be replaced. - old byteBitmap - - // replacement byte, indexed by old byte. only valid if - // corresponding old bit is set. - new [256]byte -} +// The array contains replacement bytes indexed by old byte. +type byteReplacer [256]byte func (r *byteReplacer) Replace(s string) string { var buf []byte // lazily allocated for i := 0; i < len(s); i++ { b := s[i] - if r.old[b>>5]&uint32(1<<(b&31)) != 0 { + if r[b] != b { if buf == nil { buf = []byte(s) } - buf[i] = r.new[b] + buf[i] = r[b] } } if buf == nil { @@ -447,9 +441,7 @@ func (r *byteReplacer) WriteString(w io.Writer, s string) (n int, err error) { ncopy := copy(buf, s[:]) s = s[ncopy:] for i, b := range buf[:ncopy] { - if r.old[b>>5]&uint32(1<<(b&31)) != 0 { - buf[i] = r.new[b] - } + buf[i] = r[b] } wn, err := w.Write(buf[:ncopy]) n += wn @@ -461,27 +453,20 @@ func (r *byteReplacer) WriteString(w io.Writer, s string) (n int, err error) { } // byteStringReplacer is the implementation that's used when all the -// "old" values are single ASCII bytes but the "new" values vary in -// size. -type byteStringReplacer struct { - // old has a bit set for each old byte that should be replaced. - old byteBitmap - - // replacement string, indexed by old byte. only valid if - // corresponding old bit is set. - new [256][]byte -} +// "old" values are single ASCII bytes but the "new" values vary in size. +// The array contains replacement byte slices indexed by old byte. +// A nil []byte means that the old byte should not be replaced. +type byteStringReplacer [256][]byte func (r *byteStringReplacer) Replace(s string) string { - newSize := 0 + newSize := len(s) anyChanges := false for i := 0; i < len(s); i++ { b := s[i] - if r.old[b>>5]&uint32(1<<(b&31)) != 0 { + if r[b] != nil { anyChanges = true - newSize += len(r.new[b]) - } else { - newSize++ + // The -1 is because we are replacing 1 byte with len(r[b]) bytes. + newSize += len(r[b]) - 1 } } if !anyChanges { @@ -491,8 +476,8 @@ func (r *byteStringReplacer) Replace(s string) string { bi := buf for i := 0; i < len(s); i++ { b := s[i] - if r.old[b>>5]&uint32(1<<(b&31)) != 0 { - n := copy(bi, r.new[b]) + if r[b] != nil { + n := copy(bi, r[b]) bi = bi[n:] } else { bi[0] = b @@ -502,48 +487,32 @@ func (r *byteStringReplacer) Replace(s string) string { return string(buf) } -// WriteString maintains one buffer that's at most 32KB. The bytes in -// s are enumerated and the buffer is filled. If it reaches its -// capacity or a byte has a replacement, the buffer is flushed to w. func (r *byteStringReplacer) WriteString(w io.Writer, s string) (n int, err error) { - // TODO(bradfitz): use io.WriteString with slices of s instead. - bufsize := 32 << 10 - if len(s) < bufsize { - bufsize = len(s) - } - buf := make([]byte, bufsize) - bi := buf[:0] - + sw := getStringWriter(w) + last := 0 for i := 0; i < len(s); i++ { b := s[i] - var new []byte - if r.old[b>>5]&uint32(1<<(b&31)) != 0 { - new = r.new[b] - } else { - bi = append(bi, b) - } - if len(bi) == cap(bi) || (len(bi) > 0 && len(new) > 0) { - nw, err := w.Write(bi) - n += nw - if err != nil { - return n, err - } - bi = buf[:0] + if r[b] == nil { + continue } - if len(new) > 0 { - nw, err := w.Write(new) + if last != i { + nw, err := sw.WriteString(s[last:i]) n += nw if err != nil { return n, err } } - } - if len(bi) > 0 { - nw, err := w.Write(bi) + last = i + 1 + nw, err := w.Write(r[b]) n += nw if err != nil { return n, err } } - return n, nil + if last != len(s) { + var nw int + nw, err = sw.WriteString(s[last:]) + n += nw + } + return } diff --git a/libgo/go/strings/replace_test.go b/libgo/go/strings/replace_test.go index 82e4b6ef08e..77e48b988bc 100644 --- a/libgo/go/strings/replace_test.go +++ b/libgo/go/strings/replace_test.go @@ -308,20 +308,21 @@ func TestReplacer(t *testing.T) { } } +var algorithmTestCases = []struct { + r *Replacer + want string +}{ + {capitalLetters, "*strings.byteReplacer"}, + {htmlEscaper, "*strings.byteStringReplacer"}, + {NewReplacer("12", "123"), "*strings.singleStringReplacer"}, + {NewReplacer("1", "12"), "*strings.byteStringReplacer"}, + {NewReplacer("", "X"), "*strings.genericReplacer"}, + {NewReplacer("a", "1", "b", "12", "cde", "123"), "*strings.genericReplacer"}, +} + // TestPickAlgorithm tests that NewReplacer picks the correct algorithm. func TestPickAlgorithm(t *testing.T) { - testCases := []struct { - r *Replacer - want string - }{ - {capitalLetters, "*strings.byteReplacer"}, - {htmlEscaper, "*strings.byteStringReplacer"}, - {NewReplacer("12", "123"), "*strings.singleStringReplacer"}, - {NewReplacer("1", "12"), "*strings.byteStringReplacer"}, - {NewReplacer("", "X"), "*strings.genericReplacer"}, - {NewReplacer("a", "1", "b", "12", "cde", "123"), "*strings.genericReplacer"}, - } - for i, tc := range testCases { + for i, tc := range algorithmTestCases { got := fmt.Sprintf("%T", tc.r.Replacer()) if got != tc.want { t.Errorf("%d. algorithm = %s, want %s", i, got, tc.want) @@ -329,6 +330,23 @@ func TestPickAlgorithm(t *testing.T) { } } +type errWriter struct{} + +func (errWriter) Write(p []byte) (n int, err error) { + return 0, fmt.Errorf("unwritable") +} + +// TestWriteStringError tests that WriteString returns an error +// received from the underlying io.Writer. +func TestWriteStringError(t *testing.T) { + for i, tc := range algorithmTestCases { + n, err := tc.r.WriteString(errWriter{}, "abc") + if n != 0 || err == nil || err.Error() != "unwritable" { + t.Errorf("%d. WriteStringError = %d, %v, want 0, unwritable", i, n, err) + } + } +} + // TestGenericTrieBuilding verifies the structure of the generated trie. There // is one node per line, and the key ending with the current line is in the // trie if it ends with a "+". @@ -480,6 +498,24 @@ func BenchmarkHTMLEscapeOld(b *testing.B) { } } +func BenchmarkByteStringReplacerWriteString(b *testing.B) { + str := Repeat("I <3 to escape HTML & other text too.", 100) + buf := new(bytes.Buffer) + for i := 0; i < b.N; i++ { + htmlEscaper.WriteString(buf, str) + buf.Reset() + } +} + +func BenchmarkByteReplacerWriteString(b *testing.B) { + str := Repeat("abcdefghijklmnopqrstuvwxyz", 100) + buf := new(bytes.Buffer) + for i := 0; i < b.N; i++ { + capitalLetters.WriteString(buf, str) + buf.Reset() + } +} + // BenchmarkByteByteReplaces compares byteByteImpl against multiple Replaces. func BenchmarkByteByteReplaces(b *testing.B) { str := Repeat("a", 100) + Repeat("b", 100) diff --git a/libgo/go/strings/strings.go b/libgo/go/strings/strings.go index 5d46211d84e..27d384983ef 100644 --- a/libgo/go/strings/strings.go +++ b/libgo/go/strings/strings.go @@ -43,13 +43,29 @@ func explode(s string, n int) []string { // primeRK is the prime base used in Rabin-Karp algorithm. const primeRK = 16777619 -// hashstr returns the hash and the appropriate multiplicative +// hashStr returns the hash and the appropriate multiplicative // factor for use in Rabin-Karp algorithm. -func hashstr(sep string) (uint32, uint32) { +func hashStr(sep string) (uint32, uint32) { hash := uint32(0) for i := 0; i < len(sep); i++ { hash = hash*primeRK + uint32(sep[i]) + } + var pow, sq uint32 = 1, primeRK + for i := len(sep); i > 0; i >>= 1 { + if i&1 != 0 { + pow *= sq + } + sq *= sq + } + return hash, pow +} +// hashStrRev returns the hash of the reverse of sep and the +// appropriate multiplicative factor for use in Rabin-Karp algorithm. +func hashStrRev(sep string) (uint32, uint32) { + hash := uint32(0) + for i := len(sep) - 1; i >= 0; i-- { + hash = hash*primeRK + uint32(sep[i]) } var pow, sq uint32 = 1, primeRK for i := len(sep); i > 0; i >>= 1 { @@ -85,7 +101,8 @@ func Count(s, sep string) int { } return 0 } - hashsep, pow := hashstr(sep) + // Rabin-Karp search + hashsep, pow := hashStr(sep) h := uint32(0) for i := 0; i < len(sep); i++ { h = h*primeRK + uint32(s[i]) @@ -139,8 +156,8 @@ func Index(s, sep string) int { case n > len(s): return -1 } - // Hash sep. - hashsep, pow := hashstr(sep) + // Rabin-Karp search + hashsep, pow := hashStr(sep) var h uint32 for i := 0; i < n; i++ { h = h*primeRK + uint32(s[i]) @@ -163,22 +180,41 @@ func Index(s, sep string) int { // LastIndex returns the index of the last instance of sep in s, or -1 if sep is not present in s. func LastIndex(s, sep string) int { n := len(sep) - if n == 0 { + switch { + case n == 0: return len(s) - } - c := sep[0] - if n == 1 { + case n == 1: // special case worth making fast + c := sep[0] for i := len(s) - 1; i >= 0; i-- { if s[i] == c { return i } } return -1 + case n == len(s): + if sep == s { + return 0 + } + return -1 + case n > len(s): + return -1 + } + // Rabin-Karp search from the end of the string + hashsep, pow := hashStrRev(sep) + last := len(s) - n + var h uint32 + for i := len(s) - 1; i >= last; i-- { + h = h*primeRK + uint32(s[i]) + } + if h == hashsep && s[last:] == sep { + return last } - // n > 1 - for i := len(s) - n; i >= 0; i-- { - if s[i] == c && s[i:i+n] == sep { + for i := last - 1; i >= 0; i-- { + h *= primeRK + h += uint32(s[i]) + h -= pow * uint32(s[i+n]) + if h == hashsep && s[i:i+n] == sep { return i } } @@ -189,13 +225,8 @@ func LastIndex(s, sep string) int { // r, or -1 if rune is not present in s. func IndexRune(s string, r rune) int { switch { - case r < 0x80: - b := byte(r) - for i := 0; i < len(s); i++ { - if s[i] == b { - return i - } - } + case r < utf8.RuneSelf: + return IndexByte(s, byte(r)) default: for i, c := range s { if c == r { @@ -311,6 +342,8 @@ func Fields(s string) []string { // FieldsFunc splits the string s at each run of Unicode code points c satisfying f(c) // and returns an array of slices of s. If all code points in s satisfy f(c) or the // string is empty, an empty slice is returned. +// FieldsFunc makes no guarantees about the order in which it calls f(c). +// If f does not return consistent results for a given c, FieldsFunc may crash. func FieldsFunc(s string, f func(rune) bool) []string { // First count the fields. n := 0 @@ -423,9 +456,10 @@ func Map(mapping func(rune) rune, s string) string { // Repeat returns a new string consisting of count copies of the string s. func Repeat(s string, count int) string { b := make([]byte, len(s)*count) - bp := 0 - for i := 0; i < count; i++ { - bp += copy(b[bp:], s) + bp := copy(b, s) + for bp < len(b) { + copy(b[bp:], b[:bp]) + bp *= 2 } return string(b) } @@ -634,6 +668,9 @@ func TrimSuffix(s, suffix string) string { // Replace returns a copy of the string s with the first n // non-overlapping instances of old replaced by new. +// If old is empty, it matches at the beginning of the string +// and after each UTF-8 sequence, yielding up to k+1 replacements +// for a k-rune string. // If n < 0, there is no limit on the number of replacements. func Replace(s, old, new string, n int) string { if old == new || n == 0 { diff --git a/libgo/go/strings/strings_test.go b/libgo/go/strings/strings_test.go index e40a18015e2..7bb81ef3ca1 100644 --- a/libgo/go/strings/strings_test.go +++ b/libgo/go/strings/strings_test.go @@ -168,6 +168,15 @@ func BenchmarkIndex(b *testing.B) { } } +func BenchmarkLastIndex(b *testing.B) { + if got := Index(benchmarkString, "v"); got != 17 { + b.Fatalf("wrong index: expected 17, got=%d", got) + } + for i := 0; i < b.N; i++ { + LastIndex(benchmarkString, "v") + } +} + func BenchmarkIndexByte(b *testing.B) { if got := IndexByte(benchmarkString, 'v'); got != 17 { b.Fatalf("wrong index: expected 17, got=%d", got) @@ -1069,8 +1078,11 @@ func makeBenchInputHard() string { "hello", "world", } x := make([]byte, 0, 1<<20) - for len(x) < 1<<20 { + for { i := rand.Intn(len(tokens)) + if len(x)+len(tokens[i]) >= 1<<20 { + break + } x = append(x, tokens[i]...) } return string(x) @@ -1084,6 +1096,12 @@ func benchmarkIndexHard(b *testing.B, sep string) { } } +func benchmarkLastIndexHard(b *testing.B, sep string) { + for i := 0; i < b.N; i++ { + LastIndex(benchInputHard, sep) + } +} + func benchmarkCountHard(b *testing.B, sep string) { for i := 0; i < b.N; i++ { Count(benchInputHard, sep) @@ -1094,6 +1112,10 @@ func BenchmarkIndexHard1(b *testing.B) { benchmarkIndexHard(b, "<>") } func BenchmarkIndexHard2(b *testing.B) { benchmarkIndexHard(b, "</pre>") } func BenchmarkIndexHard3(b *testing.B) { benchmarkIndexHard(b, "<b>hello world</b>") } +func BenchmarkLastIndexHard1(b *testing.B) { benchmarkLastIndexHard(b, "<>") } +func BenchmarkLastIndexHard2(b *testing.B) { benchmarkLastIndexHard(b, "</pre>") } +func BenchmarkLastIndexHard3(b *testing.B) { benchmarkLastIndexHard(b, "<b>hello world</b>") } + func BenchmarkCountHard1(b *testing.B) { benchmarkCountHard(b, "<>") } func BenchmarkCountHard2(b *testing.B) { benchmarkCountHard(b, "</pre>") } func BenchmarkCountHard3(b *testing.B) { benchmarkCountHard(b, "<b>hello world</b>") } @@ -1174,3 +1196,9 @@ func BenchmarkSplit3(b *testing.B) { Split(benchInputHard, "hello") } } + +func BenchmarkRepeat(b *testing.B) { + for i := 0; i < b.N; i++ { + Repeat("-", 80) + } +} diff --git a/libgo/go/sync/atomic/64bit_arm.go b/libgo/go/sync/atomic/64bit_arm.go index c08f214c7ef..b98e60827e4 100644 --- a/libgo/go/sync/atomic/64bit_arm.go +++ b/libgo/go/sync/atomic/64bit_arm.go @@ -44,3 +44,15 @@ func swapUint64(addr *uint64, new uint64) (old uint64) { } return } + +// Additional ARM-specific assembly routines. +// Declaration here to give assembly routines correct stack maps for arguments. +func armCompareAndSwapUint32(addr *uint32, old, new uint32) (swapped bool) +func armCompareAndSwapUint64(addr *uint64, old, new uint64) (swapped bool) +func generalCAS64(addr *uint64, old, new uint64) (swapped bool) +func armAddUint32(addr *uint32, delta uint32) (new uint32) +func armAddUint64(addr *uint64, delta uint64) (new uint64) +func armSwapUint32(addr *uint32, new uint32) (old uint32) +func armSwapUint64(addr *uint64, new uint64) (old uint64) +func armLoadUint64(addr *uint64) (val uint64) +func armStoreUint64(addr *uint64, val uint64) diff --git a/libgo/go/sync/atomic/doc.go b/libgo/go/sync/atomic/doc.go index 17ba72fa171..10fb8c9177c 100644 --- a/libgo/go/sync/atomic/doc.go +++ b/libgo/go/sync/atomic/doc.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !race - // Package atomic provides low-level atomic memory primitives // useful for implementing synchronization algorithms. // diff --git a/libgo/go/sync/atomic/race.go b/libgo/go/sync/atomic/race.go deleted file mode 100644 index 6cbbf12cb64..00000000000 --- a/libgo/go/sync/atomic/race.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build race - -package atomic - -import ( - "runtime" - "unsafe" -) - -// We use runtime.RaceRead() inside of atomic operations to catch races -// between atomic and non-atomic operations. It will also catch races -// between Mutex.Lock() and mutex overwrite (mu = Mutex{}). Since we use -// only RaceRead() we won't catch races with non-atomic loads. -// Otherwise (if we use RaceWrite()) we will report races -// between atomic operations (false positives). - -var mtx uint32 = 1 // same for all - -func SwapInt32(addr *int32, new int32) (old int32) { - return int32(SwapUint32((*uint32)(unsafe.Pointer(addr)), uint32(new))) -} - -func SwapUint32(addr *uint32, new uint32) (old uint32) { - _ = *addr - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(addr)) - runtime.RaceAcquire(unsafe.Pointer(addr)) - old = *addr - *addr = new - runtime.RaceReleaseMerge(unsafe.Pointer(addr)) - runtime.RaceSemrelease(&mtx) - return -} - -func SwapInt64(addr *int64, new int64) (old int64) { - return int64(SwapUint64((*uint64)(unsafe.Pointer(addr)), uint64(new))) -} - -func SwapUint64(addr *uint64, new uint64) (old uint64) { - _ = *addr - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(addr)) - runtime.RaceAcquire(unsafe.Pointer(addr)) - old = *addr - *addr = new - runtime.RaceReleaseMerge(unsafe.Pointer(addr)) - runtime.RaceSemrelease(&mtx) - return -} - -func SwapUintptr(addr *uintptr, new uintptr) (old uintptr) { - return uintptr(SwapPointer((*unsafe.Pointer)(unsafe.Pointer(addr)), unsafe.Pointer(new))) -} - -func SwapPointer(addr *unsafe.Pointer, new unsafe.Pointer) (old unsafe.Pointer) { - _ = *addr - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(addr)) - runtime.RaceAcquire(unsafe.Pointer(addr)) - old = *addr - *addr = new - runtime.RaceReleaseMerge(unsafe.Pointer(addr)) - runtime.RaceSemrelease(&mtx) - return -} - -func CompareAndSwapInt32(val *int32, old, new int32) bool { - return CompareAndSwapUint32((*uint32)(unsafe.Pointer(val)), uint32(old), uint32(new)) -} - -func CompareAndSwapUint32(val *uint32, old, new uint32) (swapped bool) { - _ = *val - swapped = false - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(val)) - runtime.RaceAcquire(unsafe.Pointer(val)) - if *val == old { - *val = new - swapped = true - runtime.RaceReleaseMerge(unsafe.Pointer(val)) - } - runtime.RaceSemrelease(&mtx) - return -} - -func CompareAndSwapInt64(val *int64, old, new int64) bool { - return CompareAndSwapUint64((*uint64)(unsafe.Pointer(val)), uint64(old), uint64(new)) -} - -func CompareAndSwapUint64(val *uint64, old, new uint64) (swapped bool) { - _ = *val - swapped = false - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(val)) - runtime.RaceAcquire(unsafe.Pointer(val)) - if *val == old { - *val = new - swapped = true - runtime.RaceReleaseMerge(unsafe.Pointer(val)) - } - runtime.RaceSemrelease(&mtx) - return -} - -func CompareAndSwapPointer(val *unsafe.Pointer, old, new unsafe.Pointer) (swapped bool) { - _ = *val - swapped = false - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(val)) - runtime.RaceAcquire(unsafe.Pointer(val)) - if *val == old { - *val = new - swapped = true - runtime.RaceReleaseMerge(unsafe.Pointer(val)) - } - runtime.RaceSemrelease(&mtx) - return -} - -func CompareAndSwapUintptr(val *uintptr, old, new uintptr) (swapped bool) { - _ = *val - swapped = false - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(val)) - runtime.RaceAcquire(unsafe.Pointer(val)) - if *val == old { - *val = new - swapped = true - runtime.RaceReleaseMerge(unsafe.Pointer(val)) - } - runtime.RaceSemrelease(&mtx) - return -} - -func AddInt32(val *int32, delta int32) int32 { - return int32(AddUint32((*uint32)(unsafe.Pointer(val)), uint32(delta))) -} - -func AddUint32(val *uint32, delta uint32) (new uint32) { - _ = *val - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(val)) - runtime.RaceAcquire(unsafe.Pointer(val)) - *val = *val + delta - new = *val - runtime.RaceReleaseMerge(unsafe.Pointer(val)) - runtime.RaceSemrelease(&mtx) - - return -} - -func AddInt64(val *int64, delta int64) int64 { - return int64(AddUint64((*uint64)(unsafe.Pointer(val)), uint64(delta))) -} - -func AddUint64(val *uint64, delta uint64) (new uint64) { - _ = *val - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(val)) - runtime.RaceAcquire(unsafe.Pointer(val)) - *val = *val + delta - new = *val - runtime.RaceReleaseMerge(unsafe.Pointer(val)) - runtime.RaceSemrelease(&mtx) - - return -} - -func AddUintptr(val *uintptr, delta uintptr) (new uintptr) { - _ = *val - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(val)) - runtime.RaceAcquire(unsafe.Pointer(val)) - *val = *val + delta - new = *val - runtime.RaceReleaseMerge(unsafe.Pointer(val)) - runtime.RaceSemrelease(&mtx) - - return -} - -func LoadInt32(addr *int32) int32 { - return int32(LoadUint32((*uint32)(unsafe.Pointer(addr)))) -} - -func LoadUint32(addr *uint32) (val uint32) { - _ = *addr - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(addr)) - runtime.RaceAcquire(unsafe.Pointer(addr)) - val = *addr - runtime.RaceSemrelease(&mtx) - return -} - -func LoadInt64(addr *int64) int64 { - return int64(LoadUint64((*uint64)(unsafe.Pointer(addr)))) -} - -func LoadUint64(addr *uint64) (val uint64) { - _ = *addr - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(addr)) - runtime.RaceAcquire(unsafe.Pointer(addr)) - val = *addr - runtime.RaceSemrelease(&mtx) - return -} - -func LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer) { - _ = *addr - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(addr)) - runtime.RaceAcquire(unsafe.Pointer(addr)) - val = *addr - runtime.RaceSemrelease(&mtx) - return -} - -func LoadUintptr(addr *uintptr) (val uintptr) { - _ = *addr - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(addr)) - runtime.RaceAcquire(unsafe.Pointer(addr)) - val = *addr - runtime.RaceSemrelease(&mtx) - return -} - -func StoreInt32(addr *int32, val int32) { - StoreUint32((*uint32)(unsafe.Pointer(addr)), uint32(val)) -} - -func StoreUint32(addr *uint32, val uint32) { - _ = *addr - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(addr)) - *addr = val - runtime.RaceRelease(unsafe.Pointer(addr)) - runtime.RaceSemrelease(&mtx) -} - -func StoreInt64(addr *int64, val int64) { - StoreUint64((*uint64)(unsafe.Pointer(addr)), uint64(val)) -} - -func StoreUint64(addr *uint64, val uint64) { - _ = *addr - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(addr)) - *addr = val - runtime.RaceRelease(unsafe.Pointer(addr)) - runtime.RaceSemrelease(&mtx) -} - -func StorePointer(addr *unsafe.Pointer, val unsafe.Pointer) { - _ = *addr - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(addr)) - *addr = val - runtime.RaceRelease(unsafe.Pointer(addr)) - runtime.RaceSemrelease(&mtx) -} - -func StoreUintptr(addr *uintptr, val uintptr) { - _ = *addr - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(addr)) - *addr = val - runtime.RaceRelease(unsafe.Pointer(addr)) - runtime.RaceSemrelease(&mtx) -} diff --git a/libgo/go/sync/atomic/value.go b/libgo/go/sync/atomic/value.go new file mode 100644 index 00000000000..ab3aa112854 --- /dev/null +++ b/libgo/go/sync/atomic/value.go @@ -0,0 +1,85 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atomic + +import ( + "unsafe" +) + +// A Value provides an atomic load and store of a consistently typed value. +// Values can be created as part of other data structures. +// The zero value for a Value returns nil from Load. +// Once Store has been called, a Value must not be copied. +type Value struct { + v interface{} +} + +// ifaceWords is interface{} internal representation. +type ifaceWords struct { + typ unsafe.Pointer + data unsafe.Pointer +} + +// Load returns the value set by the most recent Store. +// It returns nil if there has been no call to Store for this Value. +func (v *Value) Load() (x interface{}) { + vp := (*ifaceWords)(unsafe.Pointer(v)) + typ := LoadPointer(&vp.typ) + if typ == nil || uintptr(typ) == ^uintptr(0) { + // First store not yet completed. + return nil + } + data := LoadPointer(&vp.data) + xp := (*ifaceWords)(unsafe.Pointer(&x)) + xp.typ = typ + xp.data = data + return +} + +// Store sets the value of the Value to x. +// All calls to Store for a given Value must use values of the same concrete type. +// Store of an inconsistent type panics, as does Store(nil). +func (v *Value) Store(x interface{}) { + if x == nil { + panic("sync/atomic: store of nil value into Value") + } + vp := (*ifaceWords)(unsafe.Pointer(v)) + xp := (*ifaceWords)(unsafe.Pointer(&x)) + for { + typ := LoadPointer(&vp.typ) + if typ == nil { + // Attempt to start first store. + // Disable preemption so that other goroutines can use + // active spin wait to wait for completion; and so that + // GC does not see the fake type accidentally. + runtime_procPin() + if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(^uintptr(0))) { + runtime_procUnpin() + continue + } + // Complete first store. + StorePointer(&vp.data, xp.data) + StorePointer(&vp.typ, xp.typ) + runtime_procUnpin() + return + } + if uintptr(typ) == ^uintptr(0) { + // First store in progress. Wait. + // Since we disable preemption around the first store, + // we can wait with active spinning. + continue + } + // First store completed. Check type and overwrite data. + if typ != xp.typ { + panic("sync/atomic: store of inconsistently typed value into Value") + } + StorePointer(&vp.data, xp.data) + return + } +} + +// Disable/enable preemption, implemented in runtime. +func runtime_procPin() +func runtime_procUnpin() diff --git a/libgo/go/sync/atomic/value_test.go b/libgo/go/sync/atomic/value_test.go new file mode 100644 index 00000000000..382dc6854d1 --- /dev/null +++ b/libgo/go/sync/atomic/value_test.go @@ -0,0 +1,195 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atomic_test + +import ( + "math/rand" + "runtime" + "sync" + . "sync/atomic" + "testing" + "time" +) + +func TestValue(t *testing.T) { + var v Value + if v.Load() != nil { + t.Fatal("initial Value is not nil") + } + v.Store(42) + x := v.Load() + if xx, ok := x.(int); !ok || xx != 42 { + t.Fatalf("wrong value: got %+v, want 42", x) + } + v.Store(84) + x = v.Load() + if xx, ok := x.(int); !ok || xx != 84 { + t.Fatalf("wrong value: got %+v, want 84", x) + } +} + +func TestValueLarge(t *testing.T) { + var v Value + v.Store("foo") + x := v.Load() + if xx, ok := x.(string); !ok || xx != "foo" { + t.Fatalf("wrong value: got %+v, want foo", x) + } + v.Store("barbaz") + x = v.Load() + if xx, ok := x.(string); !ok || xx != "barbaz" { + t.Fatalf("wrong value: got %+v, want barbaz", x) + } +} + +func TestValuePanic(t *testing.T) { + const nilErr = "sync/atomic: store of nil value into Value" + const badErr = "sync/atomic: store of inconsistently typed value into Value" + var v Value + func() { + defer func() { + err := recover() + if err != nilErr { + t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, nilErr) + } + }() + v.Store(nil) + }() + v.Store(42) + func() { + defer func() { + err := recover() + if err != badErr { + t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, badErr) + } + }() + v.Store("foo") + }() + func() { + defer func() { + err := recover() + if err != nilErr { + t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, nilErr) + } + }() + v.Store(nil) + }() +} + +func TestValueConcurrent(t *testing.T) { + tests := [][]interface{}{ + {uint16(0), ^uint16(0), uint16(1 + 2<<8), uint16(3 + 4<<8)}, + {uint32(0), ^uint32(0), uint32(1 + 2<<16), uint32(3 + 4<<16)}, + {uint64(0), ^uint64(0), uint64(1 + 2<<32), uint64(3 + 4<<32)}, + {complex(0, 0), complex(1, 2), complex(3, 4), complex(5, 6)}, + } + p := 4 * runtime.GOMAXPROCS(0) + for _, test := range tests { + var v Value + done := make(chan bool) + for i := 0; i < p; i++ { + go func() { + r := rand.New(rand.NewSource(rand.Int63())) + loop: + for j := 0; j < 1e5; j++ { + x := test[r.Intn(len(test))] + v.Store(x) + x = v.Load() + for _, x1 := range test { + if x == x1 { + continue loop + } + } + t.Logf("loaded unexpected value %+v, want %+v", x, test) + done <- false + } + done <- true + }() + } + for i := 0; i < p; i++ { + if !<-done { + t.FailNow() + } + } + } +} + +func BenchmarkValueRead(b *testing.B) { + var v Value + v.Store(new(int)) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + x := v.Load().(*int) + if *x != 0 { + b.Fatalf("wrong value: got %v, want 0", *x) + } + } + }) +} + +// The following example shows how to use Value for periodic program config updates +// and propagation of the changes to worker goroutines. +func ExampleValue_config() { + var config Value // holds current server configuration + // Create initial config value and store into config. + config.Store(loadConfig()) + go func() { + // Reload config every 10 seconds + // and update config value with the new version. + for { + time.Sleep(10 * time.Second) + config.Store(loadConfig()) + } + }() + // Create worker goroutines that handle incoming requests + // using the latest config value. + for i := 0; i < 10; i++ { + go func() { + for r := range requests() { + c := config.Load() + // Handle request r using config c. + _, _ = r, c + } + }() + } +} + +func loadConfig() map[string]string { + return make(map[string]string) +} + +func requests() chan int { + return make(chan int) +} + +// The following example shows how to maintain a scalable frequently read, +// but infrequently updated data structure using copy-on-write idiom. +func ExampleValue_readMostly() { + type Map map[string]string + var m Value + m.Store(make(Map)) + var mu sync.Mutex // used only by writers + // read function can be used to read the data without further synchronization + read := func(key string) (val string) { + m1 := m.Load().(Map) + return m1[key] + } + // insert function can be used to update the data without further synchronization + insert := func(key, val string) { + mu.Lock() // synchronize with other potential writers + defer mu.Unlock() + m1 := m.Load().(Map) // load current value of the data structure + m2 := make(Map) // create a new value + for k, v := range m1 { + m2[k] = v // copy all data from the current object to the new one + } + m2[key] = val // do the update that we need + m.Store(m2) // atomically replace the current object with the new one + // At this point all new readers start working with the new version. + // The old version will be garbage collected once the existing readers + // (if any) are done with it. + } + _, _ = read, insert +} diff --git a/libgo/go/sync/once.go b/libgo/go/sync/once.go index 161ae3b3e96..10b42fddc2f 100644 --- a/libgo/go/sync/once.go +++ b/libgo/go/sync/once.go @@ -15,7 +15,7 @@ type Once struct { } // Do calls the function f if and only if Do is being called for the -// first time for this instance of Once. In other words, given +// first time for this instance of Once. In other words, given // var once Once // if once.Do(f) is called multiple times, only the first call will invoke f, // even if f has a different value in each invocation. A new instance of @@ -29,6 +29,9 @@ type Once struct { // Because no call to Do returns until the one call to f returns, if f causes // Do to be called, it will deadlock. // +// If f panics, Do considers it to have returned; future calls of Do return +// without calling f. +// func (o *Once) Do(f func()) { if atomic.LoadUint32(&o.done) == 1 { return @@ -37,7 +40,7 @@ func (o *Once) Do(f func()) { o.m.Lock() defer o.m.Unlock() if o.done == 0 { + defer atomic.StoreUint32(&o.done, 1) f() - atomic.StoreUint32(&o.done, 1) } } diff --git a/libgo/go/sync/once_test.go b/libgo/go/sync/once_test.go index 8afda82f3e1..1eec8d18ea5 100644 --- a/libgo/go/sync/once_test.go +++ b/libgo/go/sync/once_test.go @@ -40,22 +40,20 @@ func TestOnce(t *testing.T) { } func TestOncePanic(t *testing.T) { - once := new(Once) - for i := 0; i < 2; i++ { - func() { - defer func() { - if recover() == nil { - t.Fatalf("Once.Do() has not panic'ed") - } - }() - once.Do(func() { - panic("failed") - }) + var once Once + func() { + defer func() { + if r := recover(); r == nil { + t.Fatalf("Once.Do did not panic") + } }() - } - once.Do(func() {}) + once.Do(func() { + panic("failed") + }) + }() + once.Do(func() { - t.Fatalf("Once called twice") + t.Fatalf("Once.Do called twice") }) } diff --git a/libgo/go/sync/pool.go b/libgo/go/sync/pool.go index 1f08707cd42..0cf06370244 100644 --- a/libgo/go/sync/pool.go +++ b/libgo/go/sync/pool.go @@ -200,6 +200,8 @@ func poolCleanup() { } l.shared = nil } + p.local = nil + p.localSize = 0 } allPools = []*Pool{} } diff --git a/libgo/go/sync/pool_test.go b/libgo/go/sync/pool_test.go index c13477de904..051bb175338 100644 --- a/libgo/go/sync/pool_test.go +++ b/libgo/go/sync/pool_test.go @@ -69,37 +69,45 @@ func TestPoolNew(t *testing.T) { } } -// Test that Pool does not hold pointers to previously cached -// resources +// Test that Pool does not hold pointers to previously cached resources. func TestPoolGC(t *testing.T) { + testPool(t, true) +} + +// Test that Pool releases resources on GC. +func TestPoolRelease(t *testing.T) { + testPool(t, false) +} + +func testPool(t *testing.T, drain bool) { + t.Skip("gccgo imprecise GC breaks this test") var p Pool - var fin uint32 const N = 100 - for i := 0; i < N; i++ { - v := new(string) - runtime.SetFinalizer(v, func(vv *string) { - atomic.AddUint32(&fin, 1) - }) - p.Put(v) - } - for i := 0; i < N; i++ { - p.Get() - } - for i := 0; i < 5; i++ { - runtime.GC() - time.Sleep(time.Duration(i*100+10) * time.Millisecond) - // 1 pointer can remain on stack or elsewhere - if atomic.LoadUint32(&fin) >= N-1 { - return +loop: + for try := 0; try < 3; try++ { + var fin, fin1 uint32 + for i := 0; i < N; i++ { + v := new(string) + runtime.SetFinalizer(v, func(vv *string) { + atomic.AddUint32(&fin, 1) + }) + p.Put(v) } - - // gccgo has a less precise heap. - if runtime.Compiler == "gccgo" && atomic.LoadUint32(&fin) >= N-5 { - return + if drain { + for i := 0; i < N; i++ { + p.Get() + } + } + for i := 0; i < 5; i++ { + runtime.GC() + time.Sleep(time.Duration(i*100+10) * time.Millisecond) + // 1 pointer can remain on stack or elsewhere + if fin1 = atomic.LoadUint32(&fin); fin1 >= N-1 { + continue loop + } } + t.Fatalf("only %v out of %v resources are finalized on try %v", fin1, N, try) } - t.Fatalf("only %v out of %v resources are finalized", - atomic.LoadUint32(&fin), N) } func TestPoolStress(t *testing.T) { @@ -141,7 +149,7 @@ func BenchmarkPool(b *testing.B) { }) } -func BenchmarkPoolOverlflow(b *testing.B) { +func BenchmarkPoolOverflow(b *testing.B) { var p Pool b.RunParallel(func(pb *testing.PB) { for pb.Next() { diff --git a/libgo/go/sync/runtime.go b/libgo/go/sync/runtime.go index 3bf47ea52aa..3b866303a96 100644 --- a/libgo/go/sync/runtime.go +++ b/libgo/go/sync/runtime.go @@ -19,8 +19,12 @@ func runtime_Semacquire(s *uint32) // library and should not be used directly. func runtime_Semrelease(s *uint32) -// Opaque representation of SyncSema in runtime/sema.goc. -type syncSema [3]uintptr +// Approximation of syncSema in runtime/sema.go. +type syncSema struct { + lock uintptr + head unsafe.Pointer + tail unsafe.Pointer +} // Syncsemacquire waits for a pairing Syncsemrelease on the same semaphore s. func runtime_Syncsemacquire(s *syncSema) diff --git a/libgo/go/sync/rwmutex.go b/libgo/go/sync/rwmutex.go index 3db54199576..0e8a58e5f03 100644 --- a/libgo/go/sync/rwmutex.go +++ b/libgo/go/sync/rwmutex.go @@ -51,7 +51,11 @@ func (rw *RWMutex) RUnlock() { raceReleaseMerge(unsafe.Pointer(&rw.writerSem)) raceDisable() } - if atomic.AddInt32(&rw.readerCount, -1) < 0 { + if r := atomic.AddInt32(&rw.readerCount, -1); r < 0 { + if r+1 == 0 || r+1 == -rwmutexMaxReaders { + raceEnable() + panic("sync: RUnlock of unlocked RWMutex") + } // A writer is pending. if atomic.AddInt32(&rw.readerWait, -1) == 0 { // The last reader unblocks the writer. @@ -105,6 +109,10 @@ func (rw *RWMutex) Unlock() { // Announce to readers there is no active writer. r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders) + if r >= rwmutexMaxReaders { + raceEnable() + panic("sync: Unlock of unlocked RWMutex") + } // Unblock blocked readers, if any. for i := 0; i < int(r); i++ { runtime_Semrelease(&rw.readerSem) diff --git a/libgo/go/sync/rwmutex_test.go b/libgo/go/sync/rwmutex_test.go index 0436f97239c..f625bc3a585 100644 --- a/libgo/go/sync/rwmutex_test.go +++ b/libgo/go/sync/rwmutex_test.go @@ -155,6 +155,48 @@ func TestRLocker(t *testing.T) { } } +func TestUnlockPanic(t *testing.T) { + defer func() { + if recover() == nil { + t.Fatalf("unlock of unlocked RWMutex did not panic") + } + }() + var mu RWMutex + mu.Unlock() +} + +func TestUnlockPanic2(t *testing.T) { + defer func() { + if recover() == nil { + t.Fatalf("unlock of unlocked RWMutex did not panic") + } + }() + var mu RWMutex + mu.RLock() + mu.Unlock() +} + +func TestRUnlockPanic(t *testing.T) { + defer func() { + if recover() == nil { + t.Fatalf("read unlock of unlocked RWMutex did not panic") + } + }() + var mu RWMutex + mu.RUnlock() +} + +func TestRUnlockPanic2(t *testing.T) { + defer func() { + if recover() == nil { + t.Fatalf("read unlock of unlocked RWMutex did not panic") + } + }() + var mu RWMutex + mu.Lock() + mu.RUnlock() +} + func BenchmarkRWMutexUncontended(b *testing.B) { type PaddedRWMutex struct { RWMutex diff --git a/libgo/go/sync/waitgroup.go b/libgo/go/sync/waitgroup.go index 4c64dca393f..92cc57d2cc8 100644 --- a/libgo/go/sync/waitgroup.go +++ b/libgo/go/sync/waitgroup.go @@ -37,10 +37,13 @@ type WaitGroup struct { // If the counter becomes zero, all goroutines blocked on Wait are released. // If the counter goes negative, Add panics. // -// Note that calls with positive delta must happen before the call to Wait, -// or else Wait may wait for too small a group. Typically this means the calls -// to Add should execute before the statement creating the goroutine or -// other event to be waited for. See the WaitGroup example. +// Note that calls with a positive delta that occur when the counter is zero +// must happen before a Wait. Calls with a negative delta, or calls with a +// positive delta that start when the counter is greater than zero, may happen +// at any time. +// Typically this means the calls to Add should execute before the statement +// creating the goroutine or other event to be waited for. +// See the WaitGroup example. func (wg *WaitGroup) Add(delta int) { if raceenabled { _ = wg.m.state // trigger nil deref early diff --git a/libgo/go/syscall/env_plan9.go b/libgo/go/syscall/env_plan9.go index 9587ab5af9d..9ea36c886ab 100644 --- a/libgo/go/syscall/env_plan9.go +++ b/libgo/go/syscall/env_plan9.go @@ -8,22 +8,9 @@ package syscall import ( "errors" - "sync" ) var ( - // envOnce guards copyenv, which populates env. - envOnce sync.Once - - // envLock guards env and envs. - envLock sync.RWMutex - - // env maps from an environment variable to its value. - env = make(map[string]string) - - // envs contains elements of env in the form "key=value". - envs []string - errZeroLengthKey = errors.New("zero length key") errShortWrite = errors.New("i/o count too small") ) @@ -64,46 +51,14 @@ func writeenv(key, value string) error { return nil } -func copyenv() { - fd, err := Open("/env", O_RDONLY) - if err != nil { - return - } - defer Close(fd) - files, err := readdirnames(fd) - if err != nil { - return - } - envs = make([]string, len(files)) - i := 0 - for _, key := range files { - v, err := readenv(key) - if err != nil { - continue - } - env[key] = v - envs[i] = key + "=" + v - i++ - } -} - func Getenv(key string) (value string, found bool) { if len(key) == 0 { return "", false } - - envLock.RLock() - defer envLock.RUnlock() - - if v, ok := env[key]; ok { - return v, true - } v, err := readenv(key) if err != nil { return "", false } - env[key] = v - envs = append(envs, key+"="+v) return v, true } @@ -111,32 +66,43 @@ func Setenv(key, value string) error { if len(key) == 0 { return errZeroLengthKey } - - envLock.Lock() - defer envLock.Unlock() - err := writeenv(key, value) if err != nil { return err } - env[key] = value - envs = append(envs, key+"="+value) return nil } func Clearenv() { - envLock.Lock() - defer envLock.Unlock() - - env = make(map[string]string) - envs = []string{} RawSyscall(SYS_RFORK, RFCENVG, 0, 0) } +func Unsetenv(key string) error { + if len(key) == 0 { + return errZeroLengthKey + } + Remove("/env/" + key) + return nil +} + func Environ() []string { - envLock.RLock() - defer envLock.RUnlock() + fd, err := Open("/env", O_RDONLY) + if err != nil { + return nil + } + defer Close(fd) + files, err := readdirnames(fd) + if err != nil { + return nil + } + ret := make([]string, 0, len(files)) - envOnce.Do(copyenv) - return append([]string(nil), envs...) + for _, key := range files { + v, err := readenv(key) + if err != nil { + continue + } + ret = append(ret, key+"="+v) + } + return ret } diff --git a/libgo/go/syscall/env_unix.go b/libgo/go/syscall/env_unix.go index 7f39958437c..b5ded9c763c 100644 --- a/libgo/go/syscall/env_unix.go +++ b/libgo/go/syscall/env_unix.go @@ -20,23 +20,33 @@ var ( // env maps from an environment variable to its first occurrence in envs. env map[string]int - // envs is provided by the runtime. elements are expected to be - // of the form "key=value". - Envs []string + // envs is provided by the runtime. elements are expected to + // be of the form "key=value". An empty string means deleted + // (or a duplicate to be ignored). + envs []string = runtime_envs() ) -// setenv_c is provided by the runtime, but is a no-op if cgo isn't -// loaded. +func runtime_envs() []string // in package runtime + +// setenv_c and unsetenv_c are provided by the runtime but are no-ops +// if cgo isn't loaded. func setenv_c(k, v string) +func unsetenv_c(k string) func copyenv() { env = make(map[string]int) - for i, s := range Envs { + for i, s := range envs { for j := 0; j < len(s); j++ { if s[j] == '=' { key := s[:j] if _, ok := env[key]; !ok { - env[key] = i + env[key] = i // first mention of key + } else { + // Clear duplicate keys. This permits Unsetenv to + // safely delete only the first item without + // worrying about unshadowing a later one, + // which might be a security problem. + envs[i] = "" } break } @@ -44,6 +54,20 @@ func copyenv() { } } +func Unsetenv(key string) error { + envOnce.Do(copyenv) + + envLock.Lock() + defer envLock.Unlock() + + if i, ok := env[key]; ok { + envs[i] = "" + delete(env, key) + } + unsetenv_c(key) + return nil +} + func Getenv(key string) (value string, found bool) { envOnce.Do(copyenv) if len(key) == 0 { @@ -57,7 +81,7 @@ func Getenv(key string) (value string, found bool) { if !ok { return "", false } - s := Envs[i] + s := envs[i] for i := 0; i < len(s); i++ { if s[i] == '=' { return s[i+1:], true @@ -88,10 +112,10 @@ func Setenv(key, value string) error { i, ok := env[key] kv := key + "=" + value if ok { - Envs[i] = kv + envs[i] = kv } else { - i = len(Envs) - Envs = append(Envs, kv) + i = len(envs) + envs = append(envs, kv) } env[key] = i setenv_c(key, value) @@ -104,16 +128,22 @@ func Clearenv() { envLock.Lock() defer envLock.Unlock() + for k := range env { + unsetenv_c(k) + } env = make(map[string]int) - Envs = []string{} - // TODO(bradfitz): pass through to C + envs = []string{} } func Environ() []string { envOnce.Do(copyenv) envLock.RLock() defer envLock.RUnlock() - a := make([]string, len(Envs)) - copy(a, Envs) + a := make([]string, 0, len(envs)) + for _, env := range envs { + if env != "" { + a = append(a, env) + } + } return a } diff --git a/libgo/go/syscall/env_windows.go b/libgo/go/syscall/env_windows.go index 420b3872464..bc21690d9fd 100644 --- a/libgo/go/syscall/env_windows.go +++ b/libgo/go/syscall/env_windows.go @@ -47,6 +47,14 @@ func Setenv(key, value string) error { return nil } +func Unsetenv(key string) error { + keyp, err := UTF16PtrFromString(key) + if err != nil { + return err + } + return SetEnvironmentVariable(keyp, nil) +} + func Clearenv() { for _, s := range Environ() { // Environment variables can begin with = diff --git a/libgo/go/syscall/exec_linux.go b/libgo/go/syscall/exec_linux.go index 2371902cbaa..97bde0c4f52 100644 --- a/libgo/go/syscall/exec_linux.go +++ b/libgo/go/syscall/exec_linux.go @@ -14,17 +14,27 @@ import ( //sysnb raw_prctl(option int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err Errno) //prctl(option _C_int, arg2 _C_long, arg3 _C_long, arg4 _C_long, arg5 _C_long) _C_int +// SysProcIDMap holds Container ID to Host ID mappings used for User Namespaces in Linux. +// See user_namespaces(7). +type SysProcIDMap struct { + ContainerID int // Container ID. + HostID int // Host ID. + Size int // Size. +} + type SysProcAttr struct { - Chroot string // Chroot. - Credential *Credential // Credential. - Ptrace bool // Enable tracing. - Setsid bool // Create session. - Setpgid bool // Set process group ID to new pid (SYSV setpgrp) - Setctty bool // Set controlling terminal to fd Ctty (only meaningful if Setsid is set) - Noctty bool // Detach fd 0 from controlling terminal - Ctty int // Controlling TTY fd (Linux only) - Pdeathsig Signal // Signal that the process will get when its parent dies (Linux only) - Cloneflags uintptr // Flags for clone calls (Linux only) + Chroot string // Chroot. + Credential *Credential // Credential. + Ptrace bool // Enable tracing. + Setsid bool // Create session. + Setpgid bool // Set process group ID to new pid (SYSV setpgrp) + Setctty bool // Set controlling terminal to fd Ctty (only meaningful if Setsid is set) + Noctty bool // Detach fd 0 from controlling terminal + Ctty int // Controlling TTY fd (Linux only) + Pdeathsig Signal // Signal that the process will get when its parent dies (Linux only) + Cloneflags uintptr // Flags for clone calls (Linux only) + UidMappings []SysProcIDMap // User ID mappings for user namespaces. + GidMappings []SysProcIDMap // Group ID mappings for user namespaces. } // Implemented in runtime package. @@ -46,8 +56,10 @@ func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr var ( r1 uintptr err1 Errno + err2 Errno nextfd int i int + p [2]int ) // Guard against side effects of shuffling fds below. @@ -63,6 +75,14 @@ func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr } nextfd++ + // Allocate another pipe for parent to child communication for + // synchronizing writing of User ID/Group ID mappings. + if sys.UidMappings != nil || sys.GidMappings != nil { + if err := forkExecPipe(p[:]); err != nil { + return 0, err.(Errno) + } + } + // About to call fork. // No more allocation or calls of non-assembly functions. runtime_BeforeFork() @@ -79,11 +99,42 @@ func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr if r1 != 0 { // parent; return PID runtime_AfterFork() - return int(r1), 0 + pid = int(r1) + + if sys.UidMappings != nil || sys.GidMappings != nil { + Close(p[0]) + err := writeUidGidMappings(pid, sys) + if err != nil { + err2 = err.(Errno) + } + RawSyscall(SYS_WRITE, uintptr(p[1]), uintptr(unsafe.Pointer(&err2)), unsafe.Sizeof(err2)) + Close(p[1]) + } + + return pid, 0 } // Fork succeeded, now in child. + // Wait for User ID/Group ID mappings to be written. + if sys.UidMappings != nil || sys.GidMappings != nil { + if _, _, err1 = RawSyscall(SYS_CLOSE, uintptr(p[1]), 0, 0); err1 != 0 { + goto childerror + } + r1, _, err1 = RawSyscall(SYS_READ, uintptr(p[0]), uintptr(unsafe.Pointer(&err2)), unsafe.Sizeof(err2)) + if err1 != 0 { + goto childerror + } + if r1 != unsafe.Sizeof(err2) { + err1 = EINVAL + goto childerror + } + if err2 != 0 { + err1 = err2 + goto childerror + } + } + // Parent death signal if sys.Pdeathsig != 0 { _, err1 = raw_prctl(PR_SET_PDEATHSIG, int(sys.Pdeathsig), 0, 0, 0) @@ -282,3 +333,53 @@ func forkExecPipe(p []int) (err error) { } return } + +// writeIDMappings writes the user namespace User ID or Group ID mappings to the specified path. +func writeIDMappings(path string, idMap []SysProcIDMap) error { + fd, err := Open(path, O_RDWR, 0) + if err != nil { + return err + } + + data := "" + for _, im := range idMap { + data = data + itoa(im.ContainerID) + " " + itoa(im.HostID) + " " + itoa(im.Size) + "\n" + } + + bytes, err := ByteSliceFromString(data) + if err != nil { + Close(fd) + return err + } + + if _, err := Write(fd, bytes); err != nil { + Close(fd) + return err + } + + if err := Close(fd); err != nil { + return err + } + + return nil +} + +// writeUidGidMappings writes User ID and Group ID mappings for user namespaces +// for a process and it is called from the parent process. +func writeUidGidMappings(pid int, sys *SysProcAttr) error { + if sys.UidMappings != nil { + uidf := "/proc/" + itoa(pid) + "/uid_map" + if err := writeIDMappings(uidf, sys.UidMappings); err != nil { + return err + } + } + + if sys.GidMappings != nil { + gidf := "/proc/" + itoa(pid) + "/gid_map" + if err := writeIDMappings(gidf, sys.GidMappings); err != nil { + return err + } + } + + return nil +} diff --git a/libgo/go/syscall/exec_windows.go b/libgo/go/syscall/exec_windows.go index 82abc0715e5..936aeb577bc 100644 --- a/libgo/go/syscall/exec_windows.go +++ b/libgo/go/syscall/exec_windows.go @@ -129,9 +129,8 @@ func SetNonblock(fd Handle, nonblocking bool) (err error) { return nil } -// getFullPath retrieves the full path of the specified file. -// Just a wrapper for Windows GetFullPathName api. -func getFullPath(name string) (path string, err error) { +// FullPath retrieves the full path of the specified file. +func FullPath(name string) (path string, err error) { p, err := UTF16PtrFromString(name) if err != nil { return "", err @@ -160,7 +159,7 @@ func isSlash(c uint8) bool { } func normalizeDir(dir string) (name string, err error) { - ndir, err := getFullPath(dir) + ndir, err := FullPath(dir) if err != nil { return "", err } @@ -199,9 +198,9 @@ func joinExeDirAndFName(dir, p string) (name string, err error) { return "", err } if volToUpper(int(p[0])) == volToUpper(int(d[0])) { - return getFullPath(d + "\\" + p[2:]) + return FullPath(d + "\\" + p[2:]) } else { - return getFullPath(p) + return FullPath(p) } } } else { @@ -211,9 +210,9 @@ func joinExeDirAndFName(dir, p string) (name string, err error) { return "", err } if isSlash(p[0]) { - return getFullPath(d[:2] + p) + return FullPath(d[:2] + p) } else { - return getFullPath(d + "\\" + p) + return FullPath(d + "\\" + p) } } // we shouldn't be here diff --git a/libgo/go/syscall/export_test.go b/libgo/go/syscall/export_test.go new file mode 100644 index 00000000000..c9774622c87 --- /dev/null +++ b/libgo/go/syscall/export_test.go @@ -0,0 +1,7 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syscall + +var Itoa = itoa diff --git a/libgo/go/syscall/netlink_linux.go b/libgo/go/syscall/netlink_linux.go index 49550ea2f02..1b73dce8274 100644 --- a/libgo/go/syscall/netlink_linux.go +++ b/libgo/go/syscall/netlink_linux.go @@ -64,9 +64,10 @@ func NetlinkRIB(proto, family int) ([]byte, error) { return nil, err } var tab []byte + rbNew := make([]byte, Getpagesize()) done: for { - rb := make([]byte, Getpagesize()) + rb := rbNew nr, _, err := Recvfrom(s, rb, 0) if err != nil { return nil, err diff --git a/libgo/go/syscall/route_bsd.go b/libgo/go/syscall/route_bsd.go index 48af587450b..1dabe42531b 100644 --- a/libgo/go/syscall/route_bsd.go +++ b/libgo/go/syscall/route_bsd.go @@ -153,7 +153,7 @@ func (m *InterfaceAddrMessage) sockaddr() (sas []Sockaddr) { // RTAX_NETMASK socket address on the FreeBSD kernel. preferredFamily := uint8(AF_UNSPEC) for i := uint(0); i < RTAX_MAX; i++ { - if m.Header.Addrs&rtaIfaMask&(1<<i) == 0 { + if m.Header.Addrs&(1<<i) == 0 { continue } rsa := (*RawSockaddr)(unsafe.Pointer(&b[0])) diff --git a/libgo/go/syscall/str.go b/libgo/go/syscall/str.go index 0fce842e8c1..2ddf04b2275 100644 --- a/libgo/go/syscall/str.go +++ b/libgo/go/syscall/str.go @@ -6,8 +6,12 @@ package syscall func itoa(val int) string { // do it here rather than with fmt to avoid dependency if val < 0 { - return "-" + itoa(-val) + return "-" + uitoa(uint(-val)) } + return uitoa(uint(val)) +} + +func uitoa(val uint) string { var buf [32]byte // big enough for int64 i := len(buf) - 1 for val >= 10 { diff --git a/libgo/go/syscall/syscall.go b/libgo/go/syscall/syscall.go index c4f2125140e..ef9d7d65973 100644 --- a/libgo/go/syscall/syscall.go +++ b/libgo/go/syscall/syscall.go @@ -17,6 +17,13 @@ // These calls return err == nil to indicate success; otherwise // err is an operating system error describing the failure. // On most systems, that error has type syscall.Errno. +// +// NOTE: This package is locked down. Code outside the standard +// Go repository should be migrated to use the corresponding +// package in the go.sys subrepository. That is also where updates +// required by new systems or versions should be applied. +// See https://golang.org/s/go1.4-syscall for more information. +// package syscall import "unsafe" @@ -85,3 +92,8 @@ func (ts *Timespec) Nano() int64 { func (tv *Timeval) Nano() int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000 } + +// use is a no-op, but the compiler cannot see that it is. +// Calling use(p) ensures that p is kept live until that point. +//go:noescape +func use(p unsafe.Pointer) diff --git a/libgo/go/syscall/syscall_errno.go b/libgo/go/syscall/syscall_errno.go index 810572f58a9..01618d173a1 100644 --- a/libgo/go/syscall/syscall_errno.go +++ b/libgo/go/syscall/syscall_errno.go @@ -18,7 +18,7 @@ func (e Errno) Error() string { } func (e Errno) Temporary() bool { - return e == EINTR || e == EMFILE || e.Timeout() + return e == EINTR || e == EMFILE || e == ECONNRESET || e == ECONNABORTED || e.Timeout() } func (e Errno) Timeout() bool { diff --git a/libgo/go/syscall/syscall_test.go b/libgo/go/syscall/syscall_test.go index 2a39b54f1b2..846c4873d28 100644 --- a/libgo/go/syscall/syscall_test.go +++ b/libgo/go/syscall/syscall_test.go @@ -5,6 +5,7 @@ package syscall_test import ( + "fmt" "syscall" "testing" ) @@ -28,3 +29,19 @@ func TestEnv(t *testing.T) { // make sure TESTENV gets set to "", not deleted testSetGetenv(t, "TESTENV", "") } + +func TestItoa(t *testing.T) { + // Make most negative integer: 0x8000... + i := 1 + for i<<1 != 0 { + i <<= 1 + } + if i >= 0 { + t.Fatal("bad math") + } + s := syscall.Itoa(i) + f := fmt.Sprint(i) + if s != f { + t.Fatalf("itoa(%d) = %s, want %s", i, s, f) + } +} diff --git a/libgo/go/syscall/syscall_unix.go b/libgo/go/syscall/syscall_unix.go index a64b05fb5cc..74f10c29da5 100644 --- a/libgo/go/syscall/syscall_unix.go +++ b/libgo/go/syscall/syscall_unix.go @@ -125,7 +125,7 @@ func (m *mmapper) Mmap(fd int, offset int64, length int, prot int, flags int) (d cap int }{addr, length, length} - // Use unsafeto turn sl into a []byte. + // Use unsafe to turn sl into a []byte. b := *(*[]byte)(unsafe.Pointer(&sl)) // Register mapping in m and return it. diff --git a/libgo/go/testing/allocs_test.go b/libgo/go/testing/allocs_test.go new file mode 100644 index 00000000000..ec17daa2b1d --- /dev/null +++ b/libgo/go/testing/allocs_test.go @@ -0,0 +1,29 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing_test + +import "testing" + +var global interface{} + +var allocsPerRunTests = []struct { + name string + fn func() + allocs float64 +}{ + {"alloc *byte", func() { global = new(*byte) }, 1}, + {"alloc complex128", func() { global = new(complex128) }, 1}, + {"alloc float64", func() { global = new(float64) }, 1}, + {"alloc int32", func() { global = new(int32) }, 1}, + {"alloc byte", func() { global = new(byte) }, 1}, +} + +func TestAllocsPerRun(t *testing.T) { + for _, tt := range allocsPerRunTests { + if allocs := testing.AllocsPerRun(100, tt.fn); allocs != tt.allocs { + t.Errorf("AllocsPerRun(100, %s) = %v, want %v", tt.name, allocs, tt.allocs) + } + } +} diff --git a/libgo/go/testing/benchmark.go b/libgo/go/testing/benchmark.go index 1fbf5c8615f..ffd5376844a 100644 --- a/libgo/go/testing/benchmark.go +++ b/libgo/go/testing/benchmark.go @@ -157,7 +157,7 @@ func roundDown10(n int) int { return result } -// roundUp rounds x up to a number of the form [1eX, 2eX, 5eX]. +// roundUp rounds x up to a number of the form [1eX, 2eX, 3eX, 5eX]. func roundUp(n int) int { base := roundDown10(n) switch { @@ -165,6 +165,8 @@ func roundUp(n int) int { return base case n <= (2 * base): return 2 * base + case n <= (3 * base): + return 3 * base case n <= (5 * base): return 5 * base default: @@ -180,10 +182,10 @@ func (b *B) run() BenchmarkResult { } // launch launches the benchmark function. It gradually increases the number -// of benchmark iterations until the benchmark runs for a second in order -// to get a reasonable measurement. It prints timing information in this form +// of benchmark iterations until the benchmark runs for the requested benchtime. +// It prints timing information in this form // testing.BenchmarkHello 100000 19 ns/op -// launch is run by the fun function as a separate goroutine. +// launch is run by the run function as a separate goroutine. func (b *B) launch() { // Run the benchmark for a single iteration in case it's expensive. n := 1 @@ -199,16 +201,16 @@ func (b *B) launch() { d := *benchTime for !b.failed && b.duration < d && n < 1e9 { last := n - // Predict iterations/sec. + // Predict required iterations. if b.nsPerOp() == 0 { n = 1e9 } else { n = int(d.Nanoseconds() / b.nsPerOp()) } - // Run more iterations than we think we'll need for a second (1.5x). + // Run more iterations than we think we'll need (1.2x). // Don't grow too fast in case we had timing errors previously. // Be sure to run at least one more than last time. - n = max(min(n+n/2, 100*last), last+1) + n = max(min(n+n/5, 100*last), last+1) // Round up to something easy to read. n = roundUp(n) b.runN(n) diff --git a/libgo/go/testing/benchmark_test.go b/libgo/go/testing/benchmark_test.go index f7ea64e7f1c..431bb537bd5 100644 --- a/libgo/go/testing/benchmark_test.go +++ b/libgo/go/testing/benchmark_test.go @@ -41,12 +41,14 @@ var roundUpTests = []struct { {0, 1}, {1, 1}, {2, 2}, + {3, 3}, {5, 5}, {9, 10}, {999, 1000}, {1000, 1000}, {1400, 2000}, {1700, 2000}, + {2700, 3000}, {4999, 5000}, {5000, 5000}, {5001, 10000}, diff --git a/libgo/go/testing/cover.go b/libgo/go/testing/cover.go index dd29364d87e..a4ce37f7c2d 100644 --- a/libgo/go/testing/cover.go +++ b/libgo/go/testing/cover.go @@ -9,6 +9,7 @@ package testing import ( "fmt" "os" + "sync/atomic" ) // CoverBlock records the coverage data for a single basic block. @@ -34,6 +35,29 @@ type Cover struct { CoveredPackages string } +// Coverage reports the current code coverage as a fraction in the range [0, 1]. +// If coverage is not enabled, Coverage returns 0. +// +// When running a large set of sequential test cases, checking Coverage after each one +// can be useful for identifying which test cases exercise new code paths. +// It is not a replacement for the reports generated by 'go test -cover' and +// 'go tool cover'. +func Coverage() float64 { + var n, d int64 + for _, counters := range cover.Counters { + for i := range counters { + if atomic.LoadUint32(&counters[i]) > 0 { + n++ + } + d++ + } + } + if d == 0 { + return 0 + } + return float64(n) / float64(d) +} + // RegisterCover records the coverage data accumulators for the tests. // NOTE: This function is internal to the testing infrastructure and may change. // It is not covered (yet) by the Go 1 compatibility guidelines. @@ -61,11 +85,13 @@ func coverReport() { } var active, total int64 + var count uint32 for name, counts := range cover.Counters { blocks := cover.Blocks[name] - for i, count := range counts { + for i := range counts { stmts := int64(blocks[i].Stmts) total += stmts + count = atomic.LoadUint32(&counts[i]) // For -mode=atomic. if count > 0 { active += stmts } diff --git a/libgo/go/testing/example.go b/libgo/go/testing/example.go index 828c2d3eda8..f5762e4db4a 100644 --- a/libgo/go/testing/example.go +++ b/libgo/go/testing/example.go @@ -71,7 +71,7 @@ func runExample(eg InternalExample) (ok bool) { // Clean up in a deferred call so we can recover if the example panics. defer func() { - d := time.Now().Sub(start) + dstr := fmtDuration(time.Now().Sub(start)) // Close pipe, restore stdout, get output. w.Close() @@ -84,10 +84,10 @@ func runExample(eg InternalExample) (ok bool) { fail = fmt.Sprintf("got:\n%s\nwant:\n%s\n", g, e) } if fail != "" || err != nil { - fmt.Printf("--- FAIL: %s (%v)\n%s", eg.Name, d, fail) + fmt.Printf("--- FAIL: %s (%s)\n%s", eg.Name, dstr, fail) ok = false } else if *chatty { - fmt.Printf("--- PASS: %s (%v)\n", eg.Name, d) + fmt.Printf("--- PASS: %s (%s)\n", eg.Name, dstr) } if err != nil { panic(err) diff --git a/libgo/go/testing/quick/quick.go b/libgo/go/testing/quick/quick.go index bc79cc32922..909c65f788b 100644 --- a/libgo/go/testing/quick/quick.go +++ b/libgo/go/testing/quick/quick.go @@ -225,12 +225,12 @@ func (s *CheckEqualError) Error() string { // t.Error(err) // } // } -func Check(function interface{}, config *Config) (err error) { +func Check(f interface{}, config *Config) (err error) { if config == nil { config = &defaultConfig } - f, fType, ok := functionAndType(function) + fVal, fType, ok := functionAndType(f) if !ok { err = SetupError("argument is not a function") return @@ -255,7 +255,7 @@ func Check(function interface{}, config *Config) (err error) { return } - if !f.Call(arguments)[0].Bool() { + if !fVal.Call(arguments)[0].Bool() { err = &CheckError{i + 1, toInterfaces(arguments)} return } diff --git a/libgo/go/testing/testing.go b/libgo/go/testing/testing.go index 1b7360a177e..e54a3b8ce4d 100644 --- a/libgo/go/testing/testing.go +++ b/libgo/go/testing/testing.go @@ -44,7 +44,7 @@ // } // // The benchmark function must run the target code b.N times. -// The benchmark package will vary b.N until the benchmark function lasts +// During benchark execution, b.N is adjusted until the benchmark function lasts // long enough to be timed reliably. The output // BenchmarkHello 10000000 282 ns/op // means that the loop ran 10000000 times at a speed of 282 ns per loop. @@ -243,6 +243,11 @@ func decorate(s string) string { return buf.String() } +// fmtDuration returns a string representing d in the form "87.00s". +func fmtDuration(d time.Duration) string { + return fmt.Sprintf("%.2fs", d.Seconds()) +} + // TB is the interface common to T and B. type TB interface { Error(args ...interface{}) @@ -492,15 +497,15 @@ func (m *M) Run() int { } func (t *T) report() { - tstr := fmt.Sprintf("(%.2f seconds)", t.duration.Seconds()) - format := "--- %s: %s %s\n%s" + dstr := fmtDuration(t.duration) + format := "--- %s: %s (%s)\n%s" if t.Failed() { - fmt.Printf(format, "FAIL", t.name, tstr, t.output) + fmt.Printf(format, "FAIL", t.name, dstr, t.output) } else if *chatty { if t.Skipped() { - fmt.Printf(format, "SKIP", t.name, tstr, t.output) + fmt.Printf(format, "SKIP", t.name, dstr, t.output) } else { - fmt.Printf(format, "PASS", t.name, tstr, t.output) + fmt.Printf(format, "PASS", t.name, dstr, t.output) } } } @@ -615,6 +620,7 @@ func after() { fmt.Fprintf(os.Stderr, "testing: %s\n", err) os.Exit(2) } + runtime.GC() // materialize all statistics if err = pprof.WriteHeapProfile(f); err != nil { fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *memProfile, err) os.Exit(2) diff --git a/libgo/go/testing/testing_test.go b/libgo/go/testing/testing_test.go new file mode 100644 index 00000000000..87a5c16d6ed --- /dev/null +++ b/libgo/go/testing/testing_test.go @@ -0,0 +1,18 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing_test + +import ( + "os" + "testing" +) + +// This is exactly what a test would do without a TestMain. +// It's here only so that there is at least one package in the +// standard library with a TestMain, so that code is executed. + +func TestMain(m *testing.M) { + os.Exit(m.Run()) +} diff --git a/libgo/go/text/scanner/scanner.go b/libgo/go/text/scanner/scanner.go index db7ca73c68d..5199ee4fc7d 100644 --- a/libgo/go/text/scanner/scanner.go +++ b/libgo/go/text/scanner/scanner.go @@ -11,7 +11,7 @@ // By default, a Scanner skips white space and Go comments and recognizes all // literals as defined by the Go language specification. It may be // customized to recognize only a subset of those literals and to recognize -// different white space characters. +// different identifier and white space characters. // // Basic usage pattern: // @@ -34,8 +34,6 @@ import ( "unicode/utf8" ) -// TODO(gri): Consider changing this to use the new (token) Position package. - // A source position is represented by a Position value. // A position is valid if Line > 0. type Position struct { @@ -68,6 +66,12 @@ func (pos Position) String() string { // // ScanIdents | ScanInts | SkipComments // +// With the exceptions of comments, which are skipped if SkipComments is +// set, unrecognized tokens are not ignored. Instead, the scanner simply +// returns the respective individual characters (or possibly sub-tokens). +// For instance, if the mode is ScanIdents (not ScanStrings), the string +// "foo" is scanned as the token sequence '"' Ident '"'. +// const ( ScanIdents = 1 << -Ident ScanInts = 1 << -Int @@ -164,6 +168,13 @@ type Scanner struct { // for values ch > ' '). The field may be changed at any time. Whitespace uint64 + // IsIdentRune is a predicate controlling the characters accepted + // as the ith rune in an identifier. The set of valid characters + // must not intersect with the set of white space characters. + // If no IsIdentRune function is set, regular Go identifiers are + // accepted instead. The field may be changed at any time. + IsIdentRune func(ch rune, i int) bool + // Start position of most recently scanned token; set by Scan. // Calling Init or Next invalidates the position (Line == 0). // The Filename field is always left untouched by the Scanner. @@ -334,9 +345,17 @@ func (s *Scanner) error(msg string) { fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) } +func (s *Scanner) isIdentRune(ch rune, i int) bool { + if s.IsIdentRune != nil { + return s.IsIdentRune(ch, i) + } + return ch == '_' || unicode.IsLetter(ch) || unicode.IsDigit(ch) && i > 0 +} + func (s *Scanner) scanIdentifier() rune { - ch := s.next() // read character after first '_' or letter - for ch == '_' || unicode.IsLetter(ch) || unicode.IsDigit(ch) { + // we know the zero'th rune is OK; start scanning at the next one + ch := s.next() + for i := 1; s.isIdentRune(ch, i); i++ { ch = s.next() } return ch @@ -563,7 +582,7 @@ redo: // determine token value tok := ch switch { - case unicode.IsLetter(ch) || ch == '_': + case s.isIdentRune(ch, 0): if s.Mode&ScanIdents != 0 { tok = Ident ch = s.scanIdentifier() diff --git a/libgo/go/text/scanner/scanner_test.go b/libgo/go/text/scanner/scanner_test.go index 7d3f597eb9a..702fac2b1ad 100644 --- a/libgo/go/text/scanner/scanner_test.go +++ b/libgo/go/text/scanner/scanner_test.go @@ -357,6 +357,28 @@ func TestScanSelectedMask(t *testing.T) { testScanSelectedMode(t, ScanComments, Comment) } +func TestScanCustomIdent(t *testing.T) { + const src = "faab12345 a12b123 a12 3b" + s := new(Scanner).Init(strings.NewReader(src)) + // ident = ( 'a' | 'b' ) { digit } . + // digit = '0' .. '3' . + // with a maximum length of 4 + s.IsIdentRune = func(ch rune, i int) bool { + return i == 0 && (ch == 'a' || ch == 'b') || 0 < i && i < 4 && '0' <= ch && ch <= '3' + } + checkTok(t, s, 1, s.Scan(), 'f', "f") + checkTok(t, s, 1, s.Scan(), Ident, "a") + checkTok(t, s, 1, s.Scan(), Ident, "a") + checkTok(t, s, 1, s.Scan(), Ident, "b123") + checkTok(t, s, 1, s.Scan(), Int, "45") + checkTok(t, s, 1, s.Scan(), Ident, "a12") + checkTok(t, s, 1, s.Scan(), Ident, "b123") + checkTok(t, s, 1, s.Scan(), Ident, "a12") + checkTok(t, s, 1, s.Scan(), Int, "3") + checkTok(t, s, 1, s.Scan(), Ident, "b") + checkTok(t, s, 1, s.Scan(), EOF, "") +} + func TestScanNext(t *testing.T) { const BOM = '\uFEFF' BOMs := string(BOM) diff --git a/libgo/go/text/template/doc.go b/libgo/go/text/template/doc.go index 7c6efd59cde..223c595c25d 100644 --- a/libgo/go/text/template/doc.go +++ b/libgo/go/text/template/doc.go @@ -338,10 +338,11 @@ arguments will be evaluated.) The comparison functions work on basic types only (or named basic types, such as "type Celsius float32"). They implement the Go rules for comparison of values, except that size and exact type are -ignored, so any integer value may be compared with any other integer -value, any unsigned integer value may be compared with any other -unsigned integer value, and so on. However, as usual, one may not -compare an int with a float32 and so on. +ignored, so any integer value, signed or unsigned, may be compared +with any other integer value. (The arithmetic value is compared, +not the bit pattern, so all negative integers are less than all +unsigned integers.) However, as usual, one may not compare an int +with a float32 and so on. Associated templates diff --git a/libgo/go/text/template/exec.go b/libgo/go/text/template/exec.go index 2f323126453..b00e10c7e41 100644 --- a/libgo/go/text/template/exec.go +++ b/libgo/go/text/template/exec.go @@ -393,7 +393,7 @@ func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value { switch { case constant.IsComplex: return reflect.ValueOf(constant.Complex128) // incontrovertible. - case constant.IsFloat && strings.IndexAny(constant.Text, ".eE") >= 0: + case constant.IsFloat && !isHexConstant(constant.Text) && strings.IndexAny(constant.Text, ".eE") >= 0: return reflect.ValueOf(constant.Float64) case constant.IsInt: n := int(constant.Int64) @@ -407,6 +407,10 @@ func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value { return zero } +func isHexConstant(s string) bool { + return len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X') +} + func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value { s.at(field) return s.evalFieldChain(dot, dot, field, field.Ident, args, final) @@ -542,7 +546,7 @@ func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, a argv := make([]reflect.Value, numIn) // Args must be evaluated. Fixed args first. i := 0 - for ; i < numFixed; i++ { + for ; i < numFixed && i < len(args); i++ { argv[i] = s.evalArg(dot, typ.In(i), args[i]) } // Now the ... args. @@ -632,6 +636,8 @@ func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) refle return s.validateType(s.evalPipeline(dot, arg), typ) case *parse.IdentifierNode: return s.evalFunction(dot, arg, arg, nil, zero) + case *parse.ChainNode: + return s.validateType(s.evalChainNode(dot, arg, nil, zero), typ) } switch typ.Kind() { case reflect.Bool: diff --git a/libgo/go/text/template/exec_test.go b/libgo/go/text/template/exec_test.go index 868f2cb94c3..69c213ed245 100644 --- a/libgo/go/text/template/exec_test.go +++ b/libgo/go/text/template/exec_test.go @@ -176,6 +176,12 @@ func (t *T) Method3(v interface{}) string { return fmt.Sprintf("Method3: %v", v) } +func (t *T) Copy() *T { + n := new(T) + *n = *t + return n +} + func (t *T) MAdd(a int, b []int) []int { v := make([]int, len(b)) for i, x := range b { @@ -514,6 +520,13 @@ var execTests = []execTest{ {"bug10", "{{mapOfThree.three}}-{{(mapOfThree).three}}", "3-3", 0, true}, // Dereferencing nil pointer while evaluating function arguments should not panic. Issue 7333. {"bug11", "{{valueString .PS}}", "", T{}, false}, + // 0xef gave constant type float64. Issue 8622. + {"bug12xe", "{{printf `%T` 0xef}}", "int", T{}, true}, + {"bug12xE", "{{printf `%T` 0xEE}}", "int", T{}, true}, + {"bug12Xe", "{{printf `%T` 0Xef}}", "int", T{}, true}, + {"bug12XE", "{{printf `%T` 0XEE}}", "int", T{}, true}, + // Chained nodes did not work as arguments. Issue 8473. + {"bug13", "{{print (.Copy).I}}", "17", tVal, true}, } func zeroArgs() string { @@ -880,6 +893,18 @@ func TestMessageForExecuteEmpty(t *testing.T) { } } +func TestFinalForPrintf(t *testing.T) { + tmpl, err := New("").Parse(`{{"x" | printf}}`) + if err != nil { + t.Fatal(err) + } + var b bytes.Buffer + err = tmpl.Execute(&b, 0) + if err != nil { + t.Fatal(err) + } +} + type cmpTest struct { expr string truth string @@ -897,8 +922,8 @@ var cmpTests = []cmpTest{ {"eq 1 2", "false", true}, {"eq `xy` `xy`", "true", true}, {"eq `xy` `xyz`", "false", true}, - {"eq .Xuint .Xuint", "true", true}, - {"eq .Xuint .Yuint", "false", true}, + {"eq .Uthree .Uthree", "true", true}, + {"eq .Uthree .Ufour", "false", true}, {"eq 3 4 5 6 3", "true", true}, {"eq 3 4 5 6 7", "false", true}, {"ne true true", "false", true}, @@ -911,16 +936,16 @@ var cmpTests = []cmpTest{ {"ne 1 2", "true", true}, {"ne `xy` `xy`", "false", true}, {"ne `xy` `xyz`", "true", true}, - {"ne .Xuint .Xuint", "false", true}, - {"ne .Xuint .Yuint", "true", true}, + {"ne .Uthree .Uthree", "false", true}, + {"ne .Uthree .Ufour", "true", true}, {"lt 1.5 1.5", "false", true}, {"lt 1.5 2.5", "true", true}, {"lt 1 1", "false", true}, {"lt 1 2", "true", true}, {"lt `xy` `xy`", "false", true}, {"lt `xy` `xyz`", "true", true}, - {"lt .Xuint .Xuint", "false", true}, - {"lt .Xuint .Yuint", "true", true}, + {"lt .Uthree .Uthree", "false", true}, + {"lt .Uthree .Ufour", "true", true}, {"le 1.5 1.5", "true", true}, {"le 1.5 2.5", "true", true}, {"le 2.5 1.5", "false", true}, @@ -930,9 +955,9 @@ var cmpTests = []cmpTest{ {"le `xy` `xy`", "true", true}, {"le `xy` `xyz`", "true", true}, {"le `xyz` `xy`", "false", true}, - {"le .Xuint .Xuint", "true", true}, - {"le .Xuint .Yuint", "true", true}, - {"le .Yuint .Xuint", "false", true}, + {"le .Uthree .Uthree", "true", true}, + {"le .Uthree .Ufour", "true", true}, + {"le .Ufour .Uthree", "false", true}, {"gt 1.5 1.5", "false", true}, {"gt 1.5 2.5", "false", true}, {"gt 1 1", "false", true}, @@ -940,9 +965,9 @@ var cmpTests = []cmpTest{ {"gt 1 2", "false", true}, {"gt `xy` `xy`", "false", true}, {"gt `xy` `xyz`", "false", true}, - {"gt .Xuint .Xuint", "false", true}, - {"gt .Xuint .Yuint", "false", true}, - {"gt .Yuint .Xuint", "true", true}, + {"gt .Uthree .Uthree", "false", true}, + {"gt .Uthree .Ufour", "false", true}, + {"gt .Ufour .Uthree", "true", true}, {"ge 1.5 1.5", "true", true}, {"ge 1.5 2.5", "false", true}, {"ge 2.5 1.5", "true", true}, @@ -952,11 +977,40 @@ var cmpTests = []cmpTest{ {"ge `xy` `xy`", "true", true}, {"ge `xy` `xyz`", "false", true}, {"ge `xyz` `xy`", "true", true}, - {"ge .Xuint .Xuint", "true", true}, - {"ge .Xuint .Yuint", "false", true}, - {"ge .Yuint .Xuint", "true", true}, + {"ge .Uthree .Uthree", "true", true}, + {"ge .Uthree .Ufour", "false", true}, + {"ge .Ufour .Uthree", "true", true}, + // Mixing signed and unsigned integers. + {"eq .Uthree .Three", "true", true}, + {"eq .Three .Uthree", "true", true}, + {"le .Uthree .Three", "true", true}, + {"le .Three .Uthree", "true", true}, + {"ge .Uthree .Three", "true", true}, + {"ge .Three .Uthree", "true", true}, + {"lt .Uthree .Three", "false", true}, + {"lt .Three .Uthree", "false", true}, + {"gt .Uthree .Three", "false", true}, + {"gt .Three .Uthree", "false", true}, + {"eq .Ufour .Three", "false", true}, + {"lt .Ufour .Three", "false", true}, + {"gt .Ufour .Three", "true", true}, + {"eq .NegOne .Uthree", "false", true}, + {"eq .Uthree .NegOne", "false", true}, + {"ne .NegOne .Uthree", "true", true}, + {"ne .Uthree .NegOne", "true", true}, + {"lt .NegOne .Uthree", "true", true}, + {"lt .Uthree .NegOne", "false", true}, + {"le .NegOne .Uthree", "true", true}, + {"le .Uthree .NegOne", "false", true}, + {"gt .NegOne .Uthree", "false", true}, + {"gt .Uthree .NegOne", "true", true}, + {"ge .NegOne .Uthree", "false", true}, + {"ge .Uthree .NegOne", "true", true}, + {"eq (index `x` 0) 'x'", "true", true}, // The example that triggered this rule. + {"eq (index `x` 0) 'y'", "false", true}, // Errors {"eq `xy` 1", "", false}, // Different types. + {"eq 2 2.0", "", false}, // Different types. {"lt true true", "", false}, // Unordered types. {"lt 1+0i 1+0i", "", false}, // Unordered types. } @@ -964,13 +1018,14 @@ var cmpTests = []cmpTest{ func TestComparison(t *testing.T) { b := new(bytes.Buffer) var cmpStruct = struct { - Xuint, Yuint uint - }{3, 4} + Uthree, Ufour uint + NegOne, Three int + }{3, 4, -1, 3} for _, test := range cmpTests { text := fmt.Sprintf("{{if %s}}true{{else}}false{{end}}", test.expr) tmpl, err := New("empty").Parse(text) if err != nil { - t.Fatal(err) + t.Fatalf("%q: %s", test.expr, err) } b.Reset() err = tmpl.Execute(b, &cmpStruct) diff --git a/libgo/go/text/template/funcs.go b/libgo/go/text/template/funcs.go index e854122624e..39ee5ed68fb 100644 --- a/libgo/go/text/template/funcs.go +++ b/libgo/go/text/template/funcs.go @@ -314,25 +314,34 @@ func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) { if err != nil { return false, err } - if k1 != k2 { - return false, errBadComparison - } truth := false - switch k1 { - case boolKind: - truth = v1.Bool() == v2.Bool() - case complexKind: - truth = v1.Complex() == v2.Complex() - case floatKind: - truth = v1.Float() == v2.Float() - case intKind: - truth = v1.Int() == v2.Int() - case stringKind: - truth = v1.String() == v2.String() - case uintKind: - truth = v1.Uint() == v2.Uint() - default: - panic("invalid kind") + if k1 != k2 { + // Special case: Can compare integer values regardless of type's sign. + switch { + case k1 == intKind && k2 == uintKind: + truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint() + case k1 == uintKind && k2 == intKind: + truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int()) + default: + return false, errBadComparison + } + } else { + switch k1 { + case boolKind: + truth = v1.Bool() == v2.Bool() + case complexKind: + truth = v1.Complex() == v2.Complex() + case floatKind: + truth = v1.Float() == v2.Float() + case intKind: + truth = v1.Int() == v2.Int() + case stringKind: + truth = v1.String() == v2.String() + case uintKind: + truth = v1.Uint() == v2.Uint() + default: + panic("invalid kind") + } } if truth { return true, nil @@ -360,23 +369,32 @@ func lt(arg1, arg2 interface{}) (bool, error) { if err != nil { return false, err } - if k1 != k2 { - return false, errBadComparison - } truth := false - switch k1 { - case boolKind, complexKind: - return false, errBadComparisonType - case floatKind: - truth = v1.Float() < v2.Float() - case intKind: - truth = v1.Int() < v2.Int() - case stringKind: - truth = v1.String() < v2.String() - case uintKind: - truth = v1.Uint() < v2.Uint() - default: - panic("invalid kind") + if k1 != k2 { + // Special case: Can compare integer values regardless of type's sign. + switch { + case k1 == intKind && k2 == uintKind: + truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint() + case k1 == uintKind && k2 == intKind: + truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int()) + default: + return false, errBadComparison + } + } else { + switch k1 { + case boolKind, complexKind: + return false, errBadComparisonType + case floatKind: + truth = v1.Float() < v2.Float() + case intKind: + truth = v1.Int() < v2.Int() + case stringKind: + truth = v1.String() < v2.String() + case uintKind: + truth = v1.Uint() < v2.Uint() + default: + panic("invalid kind") + } } return truth, nil } diff --git a/libgo/go/text/template/parse/node.go b/libgo/go/text/template/parse/node.go index dc6a3bb929c..55c37f6dbac 100644 --- a/libgo/go/text/template/parse/node.go +++ b/libgo/go/text/template/parse/node.go @@ -26,8 +26,9 @@ type Node interface { // CopyXxx methods that return *XxxNode. Copy() Node Position() Pos // byte position of start of node in full original input string - // Make sure only functions in this package can create Nodes. - unexported() + // tree returns the containing *Tree. + // It is unexported so all implementations of Node are in this package. + tree() *Tree } // NodeType identifies the type of a parse tree node. @@ -41,11 +42,6 @@ func (p Pos) Position() Pos { return p } -// unexported keeps Node implementations local to the package. -// All implementations embed Pos, so this takes care of it. -func (Pos) unexported() { -} - // Type returns itself and provides an easy default implementation // for embedding in a Node. Embedded in all non-trivial Nodes. func (t NodeType) Type() NodeType { @@ -81,17 +77,22 @@ const ( type ListNode struct { NodeType Pos + tr *Tree Nodes []Node // The element nodes in lexical order. } -func newList(pos Pos) *ListNode { - return &ListNode{NodeType: NodeList, Pos: pos} +func (t *Tree) newList(pos Pos) *ListNode { + return &ListNode{tr: t, NodeType: NodeList, Pos: pos} } func (l *ListNode) append(n Node) { l.Nodes = append(l.Nodes, n) } +func (l *ListNode) tree() *Tree { + return l.tr +} + func (l *ListNode) String() string { b := new(bytes.Buffer) for _, n := range l.Nodes { @@ -104,7 +105,7 @@ func (l *ListNode) CopyList() *ListNode { if l == nil { return l } - n := newList(l.Pos) + n := l.tr.newList(l.Pos) for _, elem := range l.Nodes { n.append(elem.Copy()) } @@ -119,32 +120,38 @@ func (l *ListNode) Copy() Node { type TextNode struct { NodeType Pos + tr *Tree Text []byte // The text; may span newlines. } -func newText(pos Pos, text string) *TextNode { - return &TextNode{NodeType: NodeText, Pos: pos, Text: []byte(text)} +func (t *Tree) newText(pos Pos, text string) *TextNode { + return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)} } func (t *TextNode) String() string { return fmt.Sprintf(textFormat, t.Text) } +func (t *TextNode) tree() *Tree { + return t.tr +} + func (t *TextNode) Copy() Node { - return &TextNode{NodeType: NodeText, Text: append([]byte{}, t.Text...)} + return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)} } // PipeNode holds a pipeline with optional declaration type PipeNode struct { NodeType Pos + tr *Tree Line int // The line number in the input (deprecated; kept for compatibility) Decl []*VariableNode // Variable declarations in lexical order. Cmds []*CommandNode // The commands in lexical order. } -func newPipeline(pos Pos, line int, decl []*VariableNode) *PipeNode { - return &PipeNode{NodeType: NodePipe, Pos: pos, Line: line, Decl: decl} +func (t *Tree) newPipeline(pos Pos, line int, decl []*VariableNode) *PipeNode { + return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: decl} } func (p *PipeNode) append(command *CommandNode) { @@ -171,6 +178,10 @@ func (p *PipeNode) String() string { return s } +func (p *PipeNode) tree() *Tree { + return p.tr +} + func (p *PipeNode) CopyPipe() *PipeNode { if p == nil { return p @@ -179,7 +190,7 @@ func (p *PipeNode) CopyPipe() *PipeNode { for _, d := range p.Decl { decl = append(decl, d.Copy().(*VariableNode)) } - n := newPipeline(p.Pos, p.Line, decl) + n := p.tr.newPipeline(p.Pos, p.Line, decl) for _, c := range p.Cmds { n.append(c.Copy().(*CommandNode)) } @@ -196,12 +207,13 @@ func (p *PipeNode) Copy() Node { type ActionNode struct { NodeType Pos + tr *Tree Line int // The line number in the input (deprecated; kept for compatibility) Pipe *PipeNode // The pipeline in the action. } -func newAction(pos Pos, line int, pipe *PipeNode) *ActionNode { - return &ActionNode{NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe} +func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode { + return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe} } func (a *ActionNode) String() string { @@ -209,8 +221,12 @@ func (a *ActionNode) String() string { } +func (a *ActionNode) tree() *Tree { + return a.tr +} + func (a *ActionNode) Copy() Node { - return newAction(a.Pos, a.Line, a.Pipe.CopyPipe()) + return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe()) } @@ -218,11 +234,12 @@ func (a *ActionNode) Copy() Node { type CommandNode struct { NodeType Pos + tr *Tree Args []Node // Arguments in lexical order: Identifier, field, or constant. } -func newCommand(pos Pos) *CommandNode { - return &CommandNode{NodeType: NodeCommand, Pos: pos} +func (t *Tree) newCommand(pos Pos) *CommandNode { + return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos} } func (c *CommandNode) append(arg Node) { @@ -244,11 +261,15 @@ func (c *CommandNode) String() string { return s } +func (c *CommandNode) tree() *Tree { + return c.tr +} + func (c *CommandNode) Copy() Node { if c == nil { return c } - n := newCommand(c.Pos) + n := c.tr.newCommand(c.Pos) for _, c := range c.Args { n.append(c.Copy()) } @@ -259,6 +280,7 @@ func (c *CommandNode) Copy() Node { type IdentifierNode struct { NodeType Pos + tr *Tree Ident string // The identifier's name. } @@ -275,12 +297,24 @@ func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode { return i } +// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature. +// Chained for convenience. +// TODO: fix one day? +func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode { + i.tr = t + return i +} + func (i *IdentifierNode) String() string { return i.Ident } +func (i *IdentifierNode) tree() *Tree { + return i.tr +} + func (i *IdentifierNode) Copy() Node { - return NewIdentifier(i.Ident).SetPos(i.Pos) + return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos) } // VariableNode holds a list of variable names, possibly with chained field @@ -288,11 +322,12 @@ func (i *IdentifierNode) Copy() Node { type VariableNode struct { NodeType Pos + tr *Tree Ident []string // Variable name and fields in lexical order. } -func newVariable(pos Pos, ident string) *VariableNode { - return &VariableNode{NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")} +func (t *Tree) newVariable(pos Pos, ident string) *VariableNode { + return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")} } func (v *VariableNode) String() string { @@ -306,20 +341,29 @@ func (v *VariableNode) String() string { return s } +func (v *VariableNode) tree() *Tree { + return v.tr +} + func (v *VariableNode) Copy() Node { - return &VariableNode{NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)} + return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)} } // DotNode holds the special identifier '.'. type DotNode struct { + NodeType Pos + tr *Tree } -func newDot(pos Pos) *DotNode { - return &DotNode{Pos: pos} +func (t *Tree) newDot(pos Pos) *DotNode { + return &DotNode{tr: t, NodeType: NodeDot, Pos: pos} } func (d *DotNode) Type() NodeType { + // Override method on embedded NodeType for API compatibility. + // TODO: Not really a problem; could change API without effect but + // api tool complains. return NodeDot } @@ -327,20 +371,29 @@ func (d *DotNode) String() string { return "." } +func (d *DotNode) tree() *Tree { + return d.tr +} + func (d *DotNode) Copy() Node { - return newDot(d.Pos) + return d.tr.newDot(d.Pos) } // NilNode holds the special identifier 'nil' representing an untyped nil constant. type NilNode struct { + NodeType Pos + tr *Tree } -func newNil(pos Pos) *NilNode { - return &NilNode{Pos: pos} +func (t *Tree) newNil(pos Pos) *NilNode { + return &NilNode{tr: t, NodeType: NodeNil, Pos: pos} } func (n *NilNode) Type() NodeType { + // Override method on embedded NodeType for API compatibility. + // TODO: Not really a problem; could change API without effect but + // api tool complains. return NodeNil } @@ -348,8 +401,12 @@ func (n *NilNode) String() string { return "nil" } +func (n *NilNode) tree() *Tree { + return n.tr +} + func (n *NilNode) Copy() Node { - return newNil(n.Pos) + return n.tr.newNil(n.Pos) } // FieldNode holds a field (identifier starting with '.'). @@ -358,11 +415,12 @@ func (n *NilNode) Copy() Node { type FieldNode struct { NodeType Pos + tr *Tree Ident []string // The identifiers in lexical order. } -func newField(pos Pos, ident string) *FieldNode { - return &FieldNode{NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period +func (t *Tree) newField(pos Pos, ident string) *FieldNode { + return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period } func (f *FieldNode) String() string { @@ -373,8 +431,12 @@ func (f *FieldNode) String() string { return s } +func (f *FieldNode) tree() *Tree { + return f.tr +} + func (f *FieldNode) Copy() Node { - return &FieldNode{NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)} + return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)} } // ChainNode holds a term followed by a chain of field accesses (identifier starting with '.'). @@ -383,12 +445,13 @@ func (f *FieldNode) Copy() Node { type ChainNode struct { NodeType Pos + tr *Tree Node Node Field []string // The identifiers in lexical order. } -func newChain(pos Pos, node Node) *ChainNode { - return &ChainNode{NodeType: NodeChain, Pos: pos, Node: node} +func (t *Tree) newChain(pos Pos, node Node) *ChainNode { + return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node} } // Add adds the named field (which should start with a period) to the end of the chain. @@ -414,19 +477,24 @@ func (c *ChainNode) String() string { return s } +func (c *ChainNode) tree() *Tree { + return c.tr +} + func (c *ChainNode) Copy() Node { - return &ChainNode{NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)} + return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)} } // BoolNode holds a boolean constant. type BoolNode struct { NodeType Pos + tr *Tree True bool // The value of the boolean constant. } -func newBool(pos Pos, true bool) *BoolNode { - return &BoolNode{NodeType: NodeBool, Pos: pos, True: true} +func (t *Tree) newBool(pos Pos, true bool) *BoolNode { + return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true} } func (b *BoolNode) String() string { @@ -436,8 +504,12 @@ func (b *BoolNode) String() string { return "false" } +func (b *BoolNode) tree() *Tree { + return b.tr +} + func (b *BoolNode) Copy() Node { - return newBool(b.Pos, b.True) + return b.tr.newBool(b.Pos, b.True) } // NumberNode holds a number: signed or unsigned integer, float, or complex. @@ -446,6 +518,7 @@ func (b *BoolNode) Copy() Node { type NumberNode struct { NodeType Pos + tr *Tree IsInt bool // Number has an integral value. IsUint bool // Number has an unsigned integral value. IsFloat bool // Number has a floating-point value. @@ -457,8 +530,8 @@ type NumberNode struct { Text string // The original textual representation from the input. } -func newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) { - n := &NumberNode{NodeType: NodeNumber, Pos: pos, Text: text} +func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) { + n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text} switch typ { case itemCharConstant: rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0]) @@ -559,6 +632,10 @@ func (n *NumberNode) String() string { return n.Text } +func (n *NumberNode) tree() *Tree { + return n.tr +} + func (n *NumberNode) Copy() Node { nn := new(NumberNode) *nn = *n // Easy, fast, correct. @@ -569,53 +646,61 @@ func (n *NumberNode) Copy() Node { type StringNode struct { NodeType Pos + tr *Tree Quoted string // The original text of the string, with quotes. Text string // The string, after quote processing. } -func newString(pos Pos, orig, text string) *StringNode { - return &StringNode{NodeType: NodeString, Pos: pos, Quoted: orig, Text: text} +func (t *Tree) newString(pos Pos, orig, text string) *StringNode { + return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text} } func (s *StringNode) String() string { return s.Quoted } +func (s *StringNode) tree() *Tree { + return s.tr +} + func (s *StringNode) Copy() Node { - return newString(s.Pos, s.Quoted, s.Text) + return s.tr.newString(s.Pos, s.Quoted, s.Text) } // endNode represents an {{end}} action. // It does not appear in the final parse tree. type endNode struct { + NodeType Pos + tr *Tree } -func newEnd(pos Pos) *endNode { - return &endNode{Pos: pos} -} - -func (e *endNode) Type() NodeType { - return nodeEnd +func (t *Tree) newEnd(pos Pos) *endNode { + return &endNode{tr: t, NodeType: nodeEnd, Pos: pos} } func (e *endNode) String() string { return "{{end}}" } +func (e *endNode) tree() *Tree { + return e.tr +} + func (e *endNode) Copy() Node { - return newEnd(e.Pos) + return e.tr.newEnd(e.Pos) } // elseNode represents an {{else}} action. Does not appear in the final tree. type elseNode struct { NodeType Pos + tr *Tree Line int // The line number in the input (deprecated; kept for compatibility) } -func newElse(pos Pos, line int) *elseNode { - return &elseNode{NodeType: nodeElse, Pos: pos, Line: line} +func (t *Tree) newElse(pos Pos, line int) *elseNode { + return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line} } func (e *elseNode) Type() NodeType { @@ -626,14 +711,19 @@ func (e *elseNode) String() string { return "{{else}}" } +func (e *elseNode) tree() *Tree { + return e.tr +} + func (e *elseNode) Copy() Node { - return newElse(e.Pos, e.Line) + return e.tr.newElse(e.Pos, e.Line) } // BranchNode is the common representation of if, range, and with. type BranchNode struct { NodeType Pos + tr *Tree Line int // The line number in the input (deprecated; kept for compatibility) Pipe *PipeNode // The pipeline to be evaluated. List *ListNode // What to execute if the value is non-empty. @@ -658,17 +748,34 @@ func (b *BranchNode) String() string { return fmt.Sprintf("{{%s %s}}%s{{end}}", name, b.Pipe, b.List) } +func (b *BranchNode) tree() *Tree { + return b.tr +} + +func (b *BranchNode) Copy() Node { + switch b.NodeType { + case NodeIf: + return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) + case NodeRange: + return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) + case NodeWith: + return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) + default: + panic("unknown branch type") + } +} + // IfNode represents an {{if}} action and its commands. type IfNode struct { BranchNode } -func newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode { - return &IfNode{BranchNode{NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} +func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode { + return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} } func (i *IfNode) Copy() Node { - return newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList()) + return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList()) } // RangeNode represents a {{range}} action and its commands. @@ -676,12 +783,12 @@ type RangeNode struct { BranchNode } -func newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode { - return &RangeNode{BranchNode{NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} +func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode { + return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} } func (r *RangeNode) Copy() Node { - return newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList()) + return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList()) } // WithNode represents a {{with}} action and its commands. @@ -689,25 +796,26 @@ type WithNode struct { BranchNode } -func newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode { - return &WithNode{BranchNode{NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} +func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode { + return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} } func (w *WithNode) Copy() Node { - return newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList()) + return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList()) } // TemplateNode represents a {{template}} action. type TemplateNode struct { NodeType Pos + tr *Tree Line int // The line number in the input (deprecated; kept for compatibility) Name string // The name of the template (unquoted). Pipe *PipeNode // The command to evaluate as dot for the template. } -func newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode { - return &TemplateNode{NodeType: NodeTemplate, Line: line, Pos: pos, Name: name, Pipe: pipe} +func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode { + return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe} } func (t *TemplateNode) String() string { @@ -717,6 +825,10 @@ func (t *TemplateNode) String() string { return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe) } +func (t *TemplateNode) tree() *Tree { + return t.tr +} + func (t *TemplateNode) Copy() Node { - return newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe()) + return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe()) } diff --git a/libgo/go/text/template/parse/parse.go b/libgo/go/text/template/parse/parse.go index 34112fb7b35..af33880c15a 100644 --- a/libgo/go/text/template/parse/parse.go +++ b/libgo/go/text/template/parse/parse.go @@ -129,9 +129,15 @@ func New(name string, funcs ...map[string]interface{}) *Tree { } // ErrorContext returns a textual representation of the location of the node in the input text. +// The receiver is only used when the node does not have a pointer to the tree inside, +// which can occur in old code. func (t *Tree) ErrorContext(n Node) (location, context string) { pos := int(n.Position()) - text := t.text[:pos] + tree := n.tree() + if tree == nil { + tree = t + } + text := tree.text[:pos] byteNum := strings.LastIndex(text, "\n") if byteNum == -1 { byteNum = pos // On first line. @@ -144,7 +150,7 @@ func (t *Tree) ErrorContext(n Node) (location, context string) { if len(context) > 20 { context = fmt.Sprintf("%.20s...", context) } - return fmt.Sprintf("%s:%d:%d", t.ParseName, lineNum, byteNum), context + return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context } // errorf formats the error and terminates processing. @@ -268,7 +274,7 @@ func IsEmptyTree(n Node) bool { // as itemList except it also parses {{define}} actions. // It runs to EOF. func (t *Tree) parse(treeSet map[string]*Tree) (next Node) { - t.Root = newList(t.peek().pos) + t.Root = t.newList(t.peek().pos) for t.peek().typ != itemEOF { if t.peek().typ == itemLeftDelim { delim := t.next() @@ -316,7 +322,7 @@ func (t *Tree) parseDefinition(treeSet map[string]*Tree) { // textOrAction* // Terminates at {{end}} or {{else}}, returned separately. func (t *Tree) itemList() (list *ListNode, next Node) { - list = newList(t.peekNonSpace().pos) + list = t.newList(t.peekNonSpace().pos) for t.peekNonSpace().typ != itemEOF { n := t.textOrAction() switch n.Type() { @@ -334,7 +340,7 @@ func (t *Tree) itemList() (list *ListNode, next Node) { func (t *Tree) textOrAction() Node { switch token := t.nextNonSpace(); token.typ { case itemText: - return newText(token.pos, token.val) + return t.newText(token.pos, token.val) case itemLeftDelim: return t.action() default: @@ -365,7 +371,7 @@ func (t *Tree) action() (n Node) { } t.backup() // Do not pop variables; they persist until "end". - return newAction(t.peek().pos, t.lex.lineNumber(), t.pipeline("command")) + return t.newAction(t.peek().pos, t.lex.lineNumber(), t.pipeline("command")) } // Pipeline: @@ -384,7 +390,7 @@ func (t *Tree) pipeline(context string) (pipe *PipeNode) { tokenAfterVariable := t.peek() if next := t.peekNonSpace(); next.typ == itemColonEquals || (next.typ == itemChar && next.val == ",") { t.nextNonSpace() - variable := newVariable(v.pos, v.val) + variable := t.newVariable(v.pos, v.val) decl = append(decl, variable) t.vars = append(t.vars, v.val) if next.typ == itemChar && next.val == "," { @@ -401,7 +407,7 @@ func (t *Tree) pipeline(context string) (pipe *PipeNode) { } break } - pipe = newPipeline(pos, t.lex.lineNumber(), decl) + pipe = t.newPipeline(pos, t.lex.lineNumber(), decl) for { switch token := t.nextNonSpace(); token.typ { case itemRightDelim, itemRightParen: @@ -442,7 +448,7 @@ func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int // TODO: Should we allow else-if in with and range? if t.peek().typ == itemIf { t.next() // Consume the "if" token. - elseList = newList(next.Position()) + elseList = t.newList(next.Position()) elseList.append(t.ifControl()) // Do not consume the next item - only one {{end}} required. break @@ -461,7 +467,7 @@ func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int // {{if pipeline}} itemList {{else}} itemList {{end}} // If keyword is past. func (t *Tree) ifControl() Node { - return newIf(t.parseControl(true, "if")) + return t.newIf(t.parseControl(true, "if")) } // Range: @@ -469,7 +475,7 @@ func (t *Tree) ifControl() Node { // {{range pipeline}} itemList {{else}} itemList {{end}} // Range keyword is past. func (t *Tree) rangeControl() Node { - return newRange(t.parseControl(false, "range")) + return t.newRange(t.parseControl(false, "range")) } // With: @@ -477,14 +483,14 @@ func (t *Tree) rangeControl() Node { // {{with pipeline}} itemList {{else}} itemList {{end}} // If keyword is past. func (t *Tree) withControl() Node { - return newWith(t.parseControl(false, "with")) + return t.newWith(t.parseControl(false, "with")) } // End: // {{end}} // End keyword is past. func (t *Tree) endControl() Node { - return newEnd(t.expect(itemRightDelim, "end").pos) + return t.newEnd(t.expect(itemRightDelim, "end").pos) } // Else: @@ -495,9 +501,9 @@ func (t *Tree) elseControl() Node { peek := t.peekNonSpace() if peek.typ == itemIf { // We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ". - return newElse(peek.pos, t.lex.lineNumber()) + return t.newElse(peek.pos, t.lex.lineNumber()) } - return newElse(t.expect(itemRightDelim, "else").pos, t.lex.lineNumber()) + return t.newElse(t.expect(itemRightDelim, "else").pos, t.lex.lineNumber()) } // Template: @@ -523,7 +529,7 @@ func (t *Tree) templateControl() Node { // Do not pop variables; they persist until "end". pipe = t.pipeline("template") } - return newTemplate(token.pos, t.lex.lineNumber(), name, pipe) + return t.newTemplate(token.pos, t.lex.lineNumber(), name, pipe) } // command: @@ -531,7 +537,7 @@ func (t *Tree) templateControl() Node { // space-separated arguments up to a pipeline character or right delimiter. // we consume the pipe character but leave the right delim to terminate the action. func (t *Tree) command() *CommandNode { - cmd := newCommand(t.peekNonSpace().pos) + cmd := t.newCommand(t.peekNonSpace().pos) for { t.peekNonSpace() // skip leading spaces. operand := t.operand() @@ -568,7 +574,7 @@ func (t *Tree) operand() Node { return nil } if t.peek().typ == itemField { - chain := newChain(t.peek().pos, node) + chain := t.newChain(t.peek().pos, node) for t.peek().typ == itemField { chain.Add(t.next().val) } @@ -578,9 +584,9 @@ func (t *Tree) operand() Node { // TODO: Switch to Chains always when we can. switch node.Type() { case NodeField: - node = newField(chain.Position(), chain.String()) + node = t.newField(chain.Position(), chain.String()) case NodeVariable: - node = newVariable(chain.Position(), chain.String()) + node = t.newVariable(chain.Position(), chain.String()) default: node = chain } @@ -605,19 +611,19 @@ func (t *Tree) term() Node { if !t.hasFunction(token.val) { t.errorf("function %q not defined", token.val) } - return NewIdentifier(token.val).SetPos(token.pos) + return NewIdentifier(token.val).SetTree(t).SetPos(token.pos) case itemDot: - return newDot(token.pos) + return t.newDot(token.pos) case itemNil: - return newNil(token.pos) + return t.newNil(token.pos) case itemVariable: return t.useVar(token.pos, token.val) case itemField: - return newField(token.pos, token.val) + return t.newField(token.pos, token.val) case itemBool: - return newBool(token.pos, token.val == "true") + return t.newBool(token.pos, token.val == "true") case itemCharConstant, itemComplex, itemNumber: - number, err := newNumber(token.pos, token.val, token.typ) + number, err := t.newNumber(token.pos, token.val, token.typ) if err != nil { t.error(err) } @@ -633,7 +639,7 @@ func (t *Tree) term() Node { if err != nil { t.error(err) } - return newString(token.pos, token.val, s) + return t.newString(token.pos, token.val, s) } t.backup() return nil @@ -660,7 +666,7 @@ func (t *Tree) popVars(n int) { // useVar returns a node for a variable reference. It errors if the // variable is not defined. func (t *Tree) useVar(pos Pos, name string) Node { - v := newVariable(pos, name) + v := t.newVariable(pos, name) for _, varName := range t.vars { if varName == v.Ident[0] { return v diff --git a/libgo/go/text/template/parse/parse_test.go b/libgo/go/text/template/parse/parse_test.go index ba1a18ec542..4a504fa7c83 100644 --- a/libgo/go/text/template/parse/parse_test.go +++ b/libgo/go/text/template/parse/parse_test.go @@ -69,6 +69,8 @@ var numberTests = []numberTest{ {text: "1+2."}, {text: "'x"}, {text: "'xx'"}, + // Issue 8622 - 0xe parsed as floating point. Very embarrassing. + {"0xef", true, true, true, false, 0xef, 0xef, 0xef, 0}, } func TestNumberParse(t *testing.T) { @@ -77,6 +79,7 @@ func TestNumberParse(t *testing.T) { // because imaginary comes out as a number. var c complex128 typ := itemNumber + var tree *Tree if test.text[0] == '\'' { typ = itemCharConstant } else { @@ -85,7 +88,7 @@ func TestNumberParse(t *testing.T) { typ = itemComplex } } - n, err := newNumber(0, test.text, typ) + n, err := tree.newNumber(0, test.text, typ) ok := test.isInt || test.isUint || test.isFloat || test.isComplex if ok && err != nil { t.Errorf("unexpected error for %q: %s", test.text, err) diff --git a/libgo/go/time/example_test.go b/libgo/go/time/example_test.go index cfa5b38c5f1..a37e8b86ddc 100644 --- a/libgo/go/time/example_test.go +++ b/libgo/go/time/example_test.go @@ -122,7 +122,7 @@ func ExampleTime_Round() { } // Output: // t.Round( 1ns) = 12:15:30.918273645 - // t.Round( 1us) = 12:15:30.918274 + // t.Round( 1µs) = 12:15:30.918274 // t.Round( 1ms) = 12:15:30.918 // t.Round( 1s) = 12:15:31 // t.Round( 2s) = 12:15:30 @@ -150,7 +150,7 @@ func ExampleTime_Truncate() { // Output: // t.Truncate( 1ns) = 12:15:30.918273645 - // t.Truncate( 1us) = 12:15:30.918273 + // t.Truncate( 1µs) = 12:15:30.918273 // t.Truncate( 1ms) = 12:15:30.918 // t.Truncate( 1s) = 12:15:30 // t.Truncate( 2s) = 12:15:30 diff --git a/libgo/go/time/format.go b/libgo/go/time/format.go index 9f210ea27df..04e79f32dcd 100644 --- a/libgo/go/time/format.go +++ b/libgo/go/time/format.go @@ -7,7 +7,7 @@ package time import "errors" // These are predefined layouts for use in Time.Format and Time.Parse. -// The reference time used in the layouts is: +// The reference time used in the layouts is the specific time: // Mon Jan 2 15:04:05 MST 2006 // which is Unix time 1136239445. Since MST is GMT-0700, // the reference time can be thought of as @@ -402,7 +402,7 @@ func (t Time) String() string { // Format returns a textual representation of the time value formatted // according to layout, which defines the format by showing how the reference -// time, +// time, defined to be // Mon Jan 2 15:04:05 -0700 MST 2006 // would be displayed if it were the value; it serves as an example of the // desired output. The same display rules will then be applied to the time @@ -556,7 +556,7 @@ func (t Time) Format(layout string) string { b = append(b, '+') } b = appendUint(b, uint(zone/60), '0') - if std == stdISO8601ColonTZ || std == stdNumColonTZ { + if std == stdISO8601ColonTZ || std == stdNumColonTZ || std == stdISO8601ColonSecondsTZ || std == stdNumColonSecondsTZ { b = append(b, ':') } b = appendUint(b, uint(zone%60), '0') @@ -676,6 +676,7 @@ func skip(value, prefix string) (string, error) { // Parse parses a formatted string and returns the time value it represents. // The layout defines the format by showing how the reference time, +// defined to be // Mon Jan 2 15:04:05 -0700 MST 2006 // would be interpreted if it were the value; it serves as an example of // the input format. The same interpretation will then be made to the @@ -704,7 +705,7 @@ func skip(value, prefix string) (string, error) { // The zone abbreviation "UTC" is recognized as UTC regardless of location. // If the zone abbreviation is unknown, Parse records the time as being // in a fabricated location with the given zone abbreviation and a zero offset. -// This choice means that such a time can be parse and reformatted with the +// This choice means that such a time can be parsed and reformatted with the // same layout losslessly, but the exact instant used in the representation will // differ by the actual zone offset. To avoid such problems, prefer time layouts // that use a numeric zone offset, or use ParseInLocation. diff --git a/libgo/go/time/format_test.go b/libgo/go/time/format_test.go index 01717cad3e0..75a08c74534 100644 --- a/libgo/go/time/format_test.go +++ b/libgo/go/time/format_test.go @@ -510,10 +510,11 @@ func TestParseSecondsInTimeZone(t *testing.T) { } func TestFormatSecondsInTimeZone(t *testing.T) { - d := Date(1871, 9, 17, 20, 4, 26, 0, FixedZone("LMT", -(34*60+8))) - timestr := d.Format("2006-01-02T15:04:05Z070000") - expected := "1871-09-17T20:04:26-003408" - if timestr != expected { - t.Errorf("Got %s, want %s", timestr, expected) + for _, test := range secondsTimeZoneOffsetTests { + d := Date(1871, 1, 1, 5, 33, 2, 0, FixedZone("LMT", test.expectedoffset)) + timestr := d.Format(test.format) + if timestr != test.value { + t.Errorf("Format = %s, want %s", timestr, test.value) + } } } diff --git a/libgo/go/time/genzabbrs.go b/libgo/go/time/genzabbrs.go index 7c637cb43a7..9eb0728a42e 100644 --- a/libgo/go/time/genzabbrs.go +++ b/libgo/go/time/genzabbrs.go @@ -7,22 +7,26 @@ // // usage: // -// go run genzabbrs.go | gofmt > $GOROOT/src/pkg/time/zoneinfo_abbrs_windows.go +// go run genzabbrs.go -output zoneinfo_abbrs_windows.go // package main import ( + "bytes" "encoding/xml" + "flag" + "go/format" "io/ioutil" "log" "net/http" - "os" "sort" "text/template" "time" ) +var filename = flag.String("output", "zoneinfo_abbrs_windows.go", "output file name") + // getAbbrs finds timezone abbreviations (standard and daylight saving time) // for location l. func getAbbrs(l *time.Location) (st, dt string) { @@ -105,6 +109,7 @@ func readWindowsZones() (zones, error) { } func main() { + flag.Parse() zs, err := readWindowsZones() if err != nil { log.Fatal(err) @@ -117,7 +122,16 @@ func main() { wzURL, zs, } - err = template.Must(template.New("prog").Parse(prog)).Execute(os.Stdout, v) + var buf bytes.Buffer + err = template.Must(template.New("prog").Parse(prog)).Execute(&buf, v) + if err != nil { + log.Fatal(err) + } + data, err := format.Source(buf.Bytes()) + if err != nil { + log.Fatal(err) + } + err = ioutil.WriteFile(*filename, data, 0644) if err != nil { log.Fatal(err) } diff --git a/libgo/go/time/internal_test.go b/libgo/go/time/internal_test.go index 2243d3668de..edd523bc80c 100644 --- a/libgo/go/time/internal_test.go +++ b/libgo/go/time/internal_test.go @@ -4,11 +4,6 @@ package time -import ( - "errors" - "runtime" -) - func init() { // force US/Pacific for time zone tests ForceUSPacificForTesting() @@ -17,14 +12,14 @@ func init() { var Interrupt = interrupt var DaysIn = daysIn -func empty(now int64, arg interface{}) {} +func empty(arg interface{}, seq uintptr) {} // Test that a runtimeTimer with a duration so large it overflows // does not cause other timers to hang. // // This test has to be in internal_test.go since it fiddles with // unexported data structures. -func CheckRuntimeTimerOverflow() error { +func CheckRuntimeTimerOverflow() { // We manually create a runtimeTimer to bypass the overflow // detection logic in NewTimer: we're testing the underlying // runtime.addtimer function. @@ -35,17 +30,7 @@ func CheckRuntimeTimerOverflow() error { } startTimer(r) - timeout := 100 * Millisecond - switch runtime.GOOS { - // Allow more time for gobuilder to succeed. - case "windows": - timeout = Second - case "plan9": - // TODO(0intro): We don't know why it is needed. - timeout = 3 * Second - } - - // Start a goroutine that should send on t.C before the timeout. + // Start a goroutine that should send on t.C right away. t := NewTimer(1) defer func() { @@ -64,29 +49,11 @@ func CheckRuntimeTimerOverflow() error { startTimer(r) }() - // Try to receive from t.C before the timeout. It will succeed - // iff the previous sleep was able to finish. We're forced to - // spin and yield after trying to receive since we can't start - // any more timers (they might hang due to the same bug we're - // now testing). - stop := Now().Add(timeout) - for { - select { - case <-t.C: - return nil // It worked! - default: - if Now().After(stop) { - return errors.New("runtime timer stuck: overflow in addtimer") - } - // Issue 6874. This test previously called runtime.Gosched to try to yield - // to the goroutine servicing t, however the scheduler has a bias towards the - // previously running goroutine in an idle system. Combined with high load due - // to all CPUs busy running tests t's goroutine could be delayed beyond the - // timeout window. - // - // Calling runtime.GC() reduces the worst case lantency for scheduling t by 20x - // under the current Go 1.3 scheduler. - runtime.GC() - } - } + // If the test fails, we will hang here until the timeout in the testing package + // fires, which is 10 minutes. It would be nice to catch the problem sooner, + // but there is no reliable way to guarantee that timerproc schedules without + // doing something involving timerproc itself. Previous failed attempts have + // tried calling runtime.Gosched and runtime.GC, but neither is reliable. + // So we fall back to hope: We hope we don't hang here. + <-t.C } diff --git a/libgo/go/time/sleep.go b/libgo/go/time/sleep.go index 6a03f417bd0..e7a2ee20598 100644 --- a/libgo/go/time/sleep.go +++ b/libgo/go/time/sleep.go @@ -14,11 +14,12 @@ func runtimeNano() int64 // Interface to timers implemented in package runtime. // Must be in sync with ../runtime/runtime.h:/^struct.Timer$ type runtimeTimer struct { - i int32 + i int when int64 period int64 - f func(int64, interface{}) // NOTE: must not be closure + f func(interface{}, uintptr) // NOTE: must not be closure arg interface{} + seq uintptr } // when is a helper function for setting the 'when' field of a runtimeTimer. @@ -42,6 +43,7 @@ func stopTimer(*runtimeTimer) bool // The Timer type represents a single event. // When the Timer expires, the current time will be sent on C, // unless the Timer was created by AfterFunc. +// A Timer must be created with NewTimer or AfterFunc. type Timer struct { C <-chan Time r runtimeTimer @@ -53,6 +55,9 @@ type Timer struct { // Stop does not close the channel, to prevent a read from the channel succeeding // incorrectly. func (t *Timer) Stop() bool { + if t.r.f == nil { + panic("time: Stop called on uninitialized Timer") + } return stopTimer(&t.r) } @@ -76,6 +81,9 @@ func NewTimer(d Duration) *Timer { // It returns true if the timer had been active, false if the timer had // expired or been stopped. func (t *Timer) Reset(d Duration) bool { + if t.r.f == nil { + panic("time: Reset called on uninitialized Timer") + } w := when(d) active := stopTimer(&t.r) t.r.when = w @@ -83,7 +91,7 @@ func (t *Timer) Reset(d Duration) bool { return active } -func sendTime(now int64, c interface{}) { +func sendTime(c interface{}, seq uintptr) { // Non-blocking send of time on c. // Used in NewTimer, it cannot block anyway (buffer). // Used in NewTicker, dropping sends on the floor is @@ -117,6 +125,6 @@ func AfterFunc(d Duration, f func()) *Timer { return t } -func goFunc(now int64, arg interface{}) { +func goFunc(arg interface{}, seq uintptr) { go arg.(func())() } diff --git a/libgo/go/time/sleep_test.go b/libgo/go/time/sleep_test.go index 7c2dcaf5471..c21eb997dc4 100644 --- a/libgo/go/time/sleep_test.go +++ b/libgo/go/time/sleep_test.go @@ -9,12 +9,21 @@ import ( "fmt" "runtime" "sort" + "strings" "sync" "sync/atomic" "testing" . "time" ) +// Go runtime uses different Windows timers for time.Now and sleeping. +// These can tick at different frequencies and can arrive out of sync. +// The effect can be seen, for example, as time.Sleep(100ms) is actually +// shorter then 100ms when measured as difference between time.Now before and +// after time.Sleep call. This was observed on Windows XP SP3 (windows/386). +// windowsInaccuracy is to ignore such errors. +const windowsInaccuracy = 17 * Millisecond + func TestSleep(t *testing.T) { const delay = 100 * Millisecond go func() { @@ -23,8 +32,12 @@ func TestSleep(t *testing.T) { }() start := Now() Sleep(delay) + delayadj := delay + if runtime.GOOS == "windows" { + delayadj -= windowsInaccuracy + } duration := Now().Sub(start) - if duration < delay { + if duration < delayadj { t.Fatalf("Sleep(%s) slept for only %s", delay, duration) } } @@ -150,10 +163,14 @@ func TestAfter(t *testing.T) { const delay = 100 * Millisecond start := Now() end := <-After(delay) - if duration := Now().Sub(start); duration < delay { + delayadj := delay + if runtime.GOOS == "windows" { + delayadj -= windowsInaccuracy + } + if duration := Now().Sub(start); duration < delayadj { t.Fatalf("After(%s) slept for only %d ns", delay, duration) } - if min := start.Add(delay); end.Before(min) { + if min := start.Add(delayadj); end.Before(min) { t.Fatalf("After(%s) expect >= %s, got %s", delay, min, end) } } @@ -388,7 +405,27 @@ func TestOverflowRuntimeTimer(t *testing.T) { if testing.Short() { t.Skip("skipping in short mode, see issue 6874") } - if err := CheckRuntimeTimerOverflow(); err != nil { - t.Fatalf(err.Error()) + // This may hang forever if timers are broken. See comment near + // the end of CheckRuntimeTimerOverflow in internal_test.go. + CheckRuntimeTimerOverflow() +} + +func checkZeroPanicString(t *testing.T) { + e := recover() + s, _ := e.(string) + if want := "called on uninitialized Timer"; !strings.Contains(s, want) { + t.Errorf("panic = %v; want substring %q", e, want) } } + +func TestZeroTimerResetPanics(t *testing.T) { + defer checkZeroPanicString(t) + var tr Timer + tr.Reset(1) +} + +func TestZeroTimerStopPanics(t *testing.T) { + defer checkZeroPanicString(t) + var tr Timer + tr.Stop() +} diff --git a/libgo/go/time/time.go b/libgo/go/time/time.go index 0a2b0914283..0300e846a4b 100644 --- a/libgo/go/time/time.go +++ b/libgo/go/time/time.go @@ -31,6 +31,11 @@ import "errors" // change the instant in time being denoted and therefore does not affect the // computations described in earlier paragraphs. // +// Note that the Go == operator compares not just the time instant but also the +// Location. Therefore, Time values should not be used as map or database keys +// without first guaranteeing that the identical Location has been set for all +// values, which can be achieved through use of the UTC or Local method. +// type Time struct { // sec gives the number of seconds elapsed since // January 1, year 1 00:00:00 UTC. @@ -39,14 +44,7 @@ type Time struct { // nsec specifies a non-negative nanosecond // offset within the second named by Seconds. // It must be in the range [0, 999999999]. - // - // It is declared as uintptr instead of int32 or uint32 - // to avoid garbage collector aliasing in the case where - // on a 64-bit system the int32 or uint32 field is written - // over the low half of a pointer, creating another pointer. - // TODO(rsc): When the garbage collector is completely - // precise, change back to int32. - nsec uintptr + nsec int32 // loc specifies the Location that should be used to // determine the minute, hour, month, day, and year @@ -475,29 +473,28 @@ func (d Duration) String() string { if u < uint64(Second) { // Special case: if duration is smaller than a second, // use smaller units, like 1.2ms - var ( - prec int - unit byte - ) + var prec int + w-- + buf[w] = 's' + w-- switch { case u == 0: return "0" case u < uint64(Microsecond): // print nanoseconds prec = 0 - unit = 'n' + buf[w] = 'n' case u < uint64(Millisecond): // print microseconds prec = 3 - unit = 'u' + // U+00B5 'µ' micro sign == 0xC2 0xB5 + w-- // Need room for two bytes. + copy(buf[w:], "µ") default: // print milliseconds prec = 6 - unit = 'm' + buf[w] = 'm' } - w -= 2 - buf[w] = unit - buf[w+1] = 's' w, u = fmtFrac(buf[:w], u, prec) w = fmtInt(buf[:w], u) } else { @@ -620,7 +617,7 @@ func (t Time) Add(d Duration) Time { t.sec-- nsec += 1e9 } - t.nsec = uintptr(nsec) + t.nsec = nsec return t } @@ -783,7 +780,7 @@ func now() (sec int64, nsec int32) // Now returns the current local time. func Now() Time { sec, nsec := now() - return Time{sec + unixToInternal, uintptr(nsec), Local} + return Time{sec + unixToInternal, nsec, Local} } // UTC returns t with the location set to UTC. @@ -900,7 +897,7 @@ func (t *Time) UnmarshalBinary(data []byte) error { int64(buf[3])<<32 | int64(buf[2])<<40 | int64(buf[1])<<48 | int64(buf[0])<<56 buf = buf[8:] - t.nsec = uintptr(int32(buf[3]) | int32(buf[2])<<8 | int32(buf[1])<<16 | int32(buf[0])<<24) + t.nsec = int32(buf[3]) | int32(buf[2])<<8 | int32(buf[1])<<16 | int32(buf[0])<<24 buf = buf[4:] offset := int(int16(buf[1])|int16(buf[0])<<8) * 60 @@ -979,7 +976,7 @@ func Unix(sec int64, nsec int64) Time { sec-- } } - return Time{sec + unixToInternal, uintptr(nsec), Local} + return Time{sec + unixToInternal, int32(nsec), Local} } func isLeap(year int) bool { @@ -1088,7 +1085,7 @@ func Date(year int, month Month, day, hour, min, sec, nsec int, loc *Location) T unix -= int64(offset) } - return Time{unix + unixToInternal, uintptr(nsec), loc} + return Time{unix + unixToInternal, int32(nsec), loc} } // Truncate returns the result of rounding t down to a multiple of d (since the zero time). diff --git a/libgo/go/time/time_test.go b/libgo/go/time/time_test.go index 4ae7da5a443..7e31dd78a92 100644 --- a/libgo/go/time/time_test.go +++ b/libgo/go/time/time_test.go @@ -535,7 +535,7 @@ var durationTests = []struct { }{ {"0", 0}, {"1ns", 1 * Nanosecond}, - {"1.1us", 1100 * Nanosecond}, + {"1.1µs", 1100 * Nanosecond}, {"2.2ms", 2200 * Microsecond}, {"3.3s", 3300 * Millisecond}, {"4m5s", 4*Minute + 5*Second}, diff --git a/libgo/go/time/zoneinfo_abbrs_windows.go b/libgo/go/time/zoneinfo_abbrs_windows.go index 80334371fe0..51a1a2f66d8 100644 --- a/libgo/go/time/zoneinfo_abbrs_windows.go +++ b/libgo/go/time/zoneinfo_abbrs_windows.go @@ -18,6 +18,7 @@ var abbrs = map[string]abbr{ "South Africa Standard Time": {"SAST", "SAST"}, // Africa/Johannesburg "W. Central Africa Standard Time": {"WAT", "WAT"}, // Africa/Lagos "E. Africa Standard Time": {"EAT", "EAT"}, // Africa/Nairobi + "Libya Standard Time": {"EET", "EET"}, // Africa/Tripoli "Namibia Standard Time": {"WAT", "WAST"}, // Africa/Windhoek "Alaskan Standard Time": {"AKST", "AKDT"}, // America/Anchorage "Paraguay Standard Time": {"PYT", "PYST"}, // America/Asuncion @@ -63,7 +64,6 @@ var abbrs = map[string]abbr{ "Nepal Standard Time": {"NPT", "NPT"}, // Asia/Katmandu "North Asia Standard Time": {"KRAT", "KRAT"}, // Asia/Krasnoyarsk "Magadan Standard Time": {"MAGT", "MAGT"}, // Asia/Magadan - "E. Europe Standard Time": {"EET", "EEST"}, // Asia/Nicosia "N. Central Asia Standard Time": {"NOVT", "NOVT"}, // Asia/Novosibirsk "Myanmar Standard Time": {"MMT", "MMT"}, // Asia/Rangoon "Arab Standard Time": {"AST", "AST"}, // Asia/Riyadh @@ -110,6 +110,7 @@ var abbrs = map[string]abbr{ "Fiji Standard Time": {"FJT", "FJT"}, // Pacific/Fiji "Central Pacific Standard Time": {"SBT", "SBT"}, // Pacific/Guadalcanal "Hawaiian Standard Time": {"HST", "HST"}, // Pacific/Honolulu + "Line Islands Standard Time": {"LINT", "LINT"}, // Pacific/Kiritimati "West Pacific Standard Time": {"PGT", "PGT"}, // Pacific/Port_Moresby "Tonga Standard Time": {"TOT", "TOT"}, // Pacific/Tongatapu } diff --git a/libgo/go/time/zoneinfo_windows.go b/libgo/go/time/zoneinfo_windows.go index 6046743e67d..02d8e0edcc8 100644 --- a/libgo/go/time/zoneinfo_windows.go +++ b/libgo/go/time/zoneinfo_windows.go @@ -11,6 +11,8 @@ import ( "unsafe" ) +//go:generate go run genzabbrs.go -output zoneinfo_abbrs_windows.go + // TODO(rsc): Fall back to copy of zoneinfo files. // BUG(brainman,rsc): On Windows, the operating system does not provide complete diff --git a/libgo/go/unicode/letter.go b/libgo/go/unicode/letter.go index 977bd2b3b05..7fe4241eddf 100644 --- a/libgo/go/unicode/letter.go +++ b/libgo/go/unicode/letter.go @@ -6,6 +6,9 @@ // Unicode code points. package unicode +// Tables are regenerated each time we update the Unicode version. +//go:generate go run maketables.go -tables=all -output tables.go + const ( MaxRune = '\U0010FFFF' // Maximum valid Unicode code point. ReplacementChar = '\uFFFD' // Represents invalid code points. diff --git a/libgo/go/unicode/script_test.go b/libgo/go/unicode/script_test.go index e2ba0011aca..795cb4e171b 100644 --- a/libgo/go/unicode/script_test.go +++ b/libgo/go/unicode/script_test.go @@ -14,14 +14,15 @@ type T struct { script string } -// Hand-chosen tests from Unicode 5.1.0, 6.0.0 and 6.2.0 mostly to discover when new -// scripts and categories arise. +// Hand-chosen tests from Unicode 5.1.0, 6.0.0, 6.2.0, 6.3.0 and 7.0.0 mostly to +// discover when new scripts and categories arise. var inTest = []T{ {0x06e2, "Arabic"}, {0x0567, "Armenian"}, {0x10b20, "Avestan"}, {0x1b37, "Balinese"}, {0xa6af, "Bamum"}, + {0x16ada, "Bassa_Vah"}, {0x1be1, "Batak"}, {0x09c2, "Bengali"}, {0x3115, "Bopomofo"}, @@ -31,6 +32,7 @@ var inTest = []T{ {0x11011, "Brahmi"}, {0x156d, "Canadian_Aboriginal"}, {0x102a9, "Carian"}, + {0x10563, "Caucasian_Albanian"}, {0x11111, "Chakma"}, {0xaa4d, "Cham"}, {0x13c2, "Cherokee"}, @@ -42,11 +44,14 @@ var inTest = []T{ {0xa663, "Cyrillic"}, {0x10430, "Deseret"}, {0x094a, "Devanagari"}, + {0x1BC00, "Duployan"}, {0x13001, "Egyptian_Hieroglyphs"}, + {0x10500, "Elbasan"}, {0x1271, "Ethiopic"}, {0x10fc, "Georgian"}, {0x2c40, "Glagolitic"}, {0x10347, "Gothic"}, + {0x11303, "Grantha"}, {0x03ae, "Greek"}, {0x0abf, "Gujarati"}, {0x0a24, "Gurmukhi"}, @@ -66,40 +71,56 @@ var inTest = []T{ {0xa928, "Kayah_Li"}, {0x10a11, "Kharoshthi"}, {0x17c6, "Khmer"}, + {0x11211, "Khojki"}, + {0x112df, "Khudawadi"}, {0x0eaa, "Lao"}, {0x1d79, "Latin"}, {0x1c10, "Lepcha"}, {0x1930, "Limbu"}, + {0x10755, "Linear_A"}, {0x1003c, "Linear_B"}, {0xa4e1, "Lisu"}, {0x10290, "Lycian"}, {0x10930, "Lydian"}, + {0x11173, "Mahajani"}, {0x0d42, "Malayalam"}, {0x0843, "Mandaic"}, + {0x10ac8, "Manichaean"}, {0xabd0, "Meetei_Mayek"}, + {0x1e800, "Mende_Kikakui"}, {0x1099f, "Meroitic_Hieroglyphs"}, {0x109a0, "Meroitic_Cursive"}, {0x16f00, "Miao"}, + {0x11611, "Modi"}, {0x1822, "Mongolian"}, + {0x16a60, "Mro"}, {0x104c, "Myanmar"}, + {0x10880, "Nabataean"}, {0x19c3, "New_Tai_Lue"}, {0x07f8, "Nko"}, {0x169b, "Ogham"}, {0x1c6a, "Ol_Chiki"}, {0x10310, "Old_Italic"}, + {0x10a80, "Old_North_Arabian"}, + {0x10350, "Old_Permic"}, {0x103c9, "Old_Persian"}, {0x10a6f, "Old_South_Arabian"}, {0x10c20, "Old_Turkic"}, {0x0b3e, "Oriya"}, {0x10491, "Osmanya"}, + {0x16b2b, "Pahawh_Hmong"}, + {0x10876, "Palmyrene"}, + {0x11ACE, "Pau_Cin_Hau"}, {0xa860, "Phags_Pa"}, {0x10918, "Phoenician"}, + {0x10baf, "Psalter_Pahlavi"}, {0xa949, "Rejang"}, {0x16c0, "Runic"}, {0x081d, "Samaritan"}, {0xa892, "Saurashtra"}, {0x111a0, "Sharada"}, {0x10463, "Shavian"}, + {0x115c1, "Siddham"}, {0x0dbd, "Sinhala"}, {0x110d0, "Sora_Sompeng"}, {0x1ba3, "Sundanese"}, @@ -117,8 +138,10 @@ var inTest = []T{ {0x0e46, "Thai"}, {0x0f36, "Tibetan"}, {0x2d55, "Tifinagh"}, + {0x114d9, "Tirhuta"}, {0x10388, "Ugaritic"}, {0xa60e, "Vai"}, + {0x118ff, "Warang_Citi"}, {0xa216, "Yi"}, } diff --git a/libgo/go/unicode/tables.go b/libgo/go/unicode/tables.go index 5670d1c5b17..8b77dd6036b 100644 --- a/libgo/go/unicode/tables.go +++ b/libgo/go/unicode/tables.go @@ -3,13 +3,13 @@ // license that can be found in the LICENSE file. // Generated by running -// maketables --tables=all --data=http://www.unicode.org/Public/6.3.0/ucd/UnicodeData.txt --casefolding=http://www.unicode.org/Public/6.3.0/ucd/CaseFolding.txt +// maketables --tables=all --data=http://www.unicode.org/Public/7.0.0/ucd/UnicodeData.txt --casefolding=http://www.unicode.org/Public/7.0.0/ucd/CaseFolding.txt // DO NOT EDIT package unicode // Version is the Unicode edition from which the tables are derived. -const Version = "6.3.0" +const Version = "7.0.0" // Categories is the set of Unicode category tables. var Categories = map[string]*RangeTable{ @@ -56,7 +56,7 @@ var _C = &RangeTable{ {0x0001, 0x001f, 1}, {0x007f, 0x009f, 1}, {0x00ad, 0x0600, 1363}, - {0x0601, 0x0604, 1}, + {0x0601, 0x0605, 1}, {0x061c, 0x06dd, 193}, {0x070f, 0x180e, 4351}, {0x200b, 0x200f, 1}, @@ -68,8 +68,9 @@ var _C = &RangeTable{ {0xfffa, 0xfffb, 1}, }, R32: []Range32{ - {0x110bd, 0x1d173, 49334}, - {0x1d174, 0x1d17a, 1}, + {0x110bd, 0x1bca0, 44003}, + {0x1bca1, 0x1bca3, 1}, + {0x1d173, 0x1d17a, 1}, {0xe0001, 0xe0020, 31}, {0xe0021, 0xe007f, 1}, {0xf0000, 0xffffd, 1}, @@ -89,7 +90,7 @@ var _Cc = &RangeTable{ var _Cf = &RangeTable{ R16: []Range16{ {0x00ad, 0x0600, 1363}, - {0x0601, 0x0604, 1}, + {0x0601, 0x0605, 1}, {0x061c, 0x06dd, 193}, {0x070f, 0x180e, 4351}, {0x200b, 0x200f, 1}, @@ -100,8 +101,9 @@ var _Cf = &RangeTable{ {0xfffa, 0xfffb, 1}, }, R32: []Range32{ - {0x110bd, 0x1d173, 49334}, - {0x1d174, 0x1d17a, 1}, + {0x110bd, 0x1bca0, 44003}, + {0x1bca1, 0x1bca3, 1}, + {0x1d173, 0x1d17a, 1}, {0xe0001, 0xe0020, 31}, {0xe0021, 0xe007f, 1}, }, @@ -138,13 +140,13 @@ var _L = &RangeTable{ {0x0370, 0x0374, 1}, {0x0376, 0x0377, 1}, {0x037a, 0x037d, 1}, - {0x0386, 0x0388, 2}, - {0x0389, 0x038a, 1}, + {0x037f, 0x0386, 7}, + {0x0388, 0x038a, 1}, {0x038c, 0x038e, 2}, {0x038f, 0x03a1, 1}, {0x03a3, 0x03f5, 1}, {0x03f7, 0x0481, 1}, - {0x048a, 0x0527, 1}, + {0x048a, 0x052f, 1}, {0x0531, 0x0556, 1}, {0x0559, 0x0561, 8}, {0x0562, 0x0587, 1}, @@ -168,13 +170,11 @@ var _L = &RangeTable{ {0x081a, 0x0824, 10}, {0x0828, 0x0840, 24}, {0x0841, 0x0858, 1}, - {0x08a0, 0x08a2, 2}, - {0x08a3, 0x08ac, 1}, + {0x08a0, 0x08b2, 1}, {0x0904, 0x0939, 1}, {0x093d, 0x0950, 19}, {0x0958, 0x0961, 1}, - {0x0971, 0x0977, 1}, - {0x0979, 0x097f, 1}, + {0x0971, 0x0980, 1}, {0x0985, 0x098c, 1}, {0x098f, 0x0990, 1}, {0x0993, 0x09a8, 1}, @@ -226,8 +226,7 @@ var _L = &RangeTable{ {0x0c06, 0x0c0c, 1}, {0x0c0e, 0x0c10, 1}, {0x0c12, 0x0c28, 1}, - {0x0c2a, 0x0c33, 1}, - {0x0c35, 0x0c39, 1}, + {0x0c2a, 0x0c39, 1}, {0x0c3d, 0x0c58, 27}, {0x0c59, 0x0c60, 7}, {0x0c61, 0x0c85, 36}, @@ -306,6 +305,7 @@ var _L = &RangeTable{ {0x166f, 0x167f, 1}, {0x1681, 0x169a, 1}, {0x16a0, 0x16ea, 1}, + {0x16f1, 0x16f8, 1}, {0x1700, 0x170c, 1}, {0x170e, 0x1711, 1}, {0x1720, 0x1731, 1}, @@ -318,7 +318,7 @@ var _L = &RangeTable{ {0x1880, 0x18a8, 1}, {0x18aa, 0x18b0, 6}, {0x18b1, 0x18f5, 1}, - {0x1900, 0x191c, 1}, + {0x1900, 0x191e, 1}, {0x1950, 0x196d, 1}, {0x1970, 0x1974, 1}, {0x1980, 0x19ab, 1}, @@ -406,14 +406,14 @@ var _L = &RangeTable{ {0xa610, 0xa61f, 1}, {0xa62a, 0xa62b, 1}, {0xa640, 0xa66e, 1}, - {0xa67f, 0xa697, 1}, + {0xa67f, 0xa69d, 1}, {0xa6a0, 0xa6e5, 1}, {0xa717, 0xa71f, 1}, {0xa722, 0xa788, 1}, {0xa78b, 0xa78e, 1}, - {0xa790, 0xa793, 1}, - {0xa7a0, 0xa7aa, 1}, - {0xa7f8, 0xa801, 1}, + {0xa790, 0xa7ad, 1}, + {0xa7b0, 0xa7b1, 1}, + {0xa7f7, 0xa801, 1}, {0xa803, 0xa805, 1}, {0xa807, 0xa80a, 1}, {0xa80c, 0xa822, 1}, @@ -425,13 +425,16 @@ var _L = &RangeTable{ {0xa930, 0xa946, 1}, {0xa960, 0xa97c, 1}, {0xa984, 0xa9b2, 1}, - {0xa9cf, 0xaa00, 49}, - {0xaa01, 0xaa28, 1}, + {0xa9cf, 0xa9e0, 17}, + {0xa9e1, 0xa9e4, 1}, + {0xa9e6, 0xa9ef, 1}, + {0xa9fa, 0xa9fe, 1}, + {0xaa00, 0xaa28, 1}, {0xaa40, 0xaa42, 1}, {0xaa44, 0xaa4b, 1}, {0xaa60, 0xaa76, 1}, - {0xaa7a, 0xaa80, 6}, - {0xaa81, 0xaaaf, 1}, + {0xaa7a, 0xaa7e, 4}, + {0xaa7f, 0xaaaf, 1}, {0xaab1, 0xaab5, 4}, {0xaab6, 0xaab9, 3}, {0xaaba, 0xaabd, 1}, @@ -444,6 +447,9 @@ var _L = &RangeTable{ {0xab11, 0xab16, 1}, {0xab20, 0xab26, 1}, {0xab28, 0xab2e, 1}, + {0xab30, 0xab5a, 1}, + {0xab5c, 0xab5f, 1}, + {0xab64, 0xab65, 1}, {0xabc0, 0xabe2, 1}, {0xac00, 0xd7a3, 1}, {0xd7b0, 0xd7c6, 1}, @@ -484,19 +490,27 @@ var _L = &RangeTable{ {0x10080, 0x100fa, 1}, {0x10280, 0x1029c, 1}, {0x102a0, 0x102d0, 1}, - {0x10300, 0x1031e, 1}, + {0x10300, 0x1031f, 1}, {0x10330, 0x10340, 1}, {0x10342, 0x10349, 1}, + {0x10350, 0x10375, 1}, {0x10380, 0x1039d, 1}, {0x103a0, 0x103c3, 1}, {0x103c8, 0x103cf, 1}, {0x10400, 0x1049d, 1}, + {0x10500, 0x10527, 1}, + {0x10530, 0x10563, 1}, + {0x10600, 0x10736, 1}, + {0x10740, 0x10755, 1}, + {0x10760, 0x10767, 1}, {0x10800, 0x10805, 1}, {0x10808, 0x1080a, 2}, {0x1080b, 0x10835, 1}, {0x10837, 0x10838, 1}, {0x1083c, 0x1083f, 3}, {0x10840, 0x10855, 1}, + {0x10860, 0x10876, 1}, + {0x10880, 0x1089e, 1}, {0x10900, 0x10915, 1}, {0x10920, 0x10939, 1}, {0x10980, 0x109b7, 1}, @@ -506,24 +520,61 @@ var _L = &RangeTable{ {0x10a15, 0x10a17, 1}, {0x10a19, 0x10a33, 1}, {0x10a60, 0x10a7c, 1}, + {0x10a80, 0x10a9c, 1}, + {0x10ac0, 0x10ac7, 1}, + {0x10ac9, 0x10ae4, 1}, {0x10b00, 0x10b35, 1}, {0x10b40, 0x10b55, 1}, {0x10b60, 0x10b72, 1}, + {0x10b80, 0x10b91, 1}, {0x10c00, 0x10c48, 1}, {0x11003, 0x11037, 1}, {0x11083, 0x110af, 1}, {0x110d0, 0x110e8, 1}, {0x11103, 0x11126, 1}, - {0x11183, 0x111b2, 1}, + {0x11150, 0x11172, 1}, + {0x11176, 0x11183, 13}, + {0x11184, 0x111b2, 1}, {0x111c1, 0x111c4, 1}, - {0x11680, 0x116aa, 1}, - {0x12000, 0x1236e, 1}, + {0x111da, 0x11200, 38}, + {0x11201, 0x11211, 1}, + {0x11213, 0x1122b, 1}, + {0x112b0, 0x112de, 1}, + {0x11305, 0x1130c, 1}, + {0x1130f, 0x11310, 1}, + {0x11313, 0x11328, 1}, + {0x1132a, 0x11330, 1}, + {0x11332, 0x11333, 1}, + {0x11335, 0x11339, 1}, + {0x1133d, 0x1135d, 32}, + {0x1135e, 0x11361, 1}, + {0x11480, 0x114af, 1}, + {0x114c4, 0x114c5, 1}, + {0x114c7, 0x11580, 185}, + {0x11581, 0x115ae, 1}, + {0x11600, 0x1162f, 1}, + {0x11644, 0x11680, 60}, + {0x11681, 0x116aa, 1}, + {0x118a0, 0x118df, 1}, + {0x118ff, 0x11ac0, 449}, + {0x11ac1, 0x11af8, 1}, + {0x12000, 0x12398, 1}, {0x13000, 0x1342e, 1}, {0x16800, 0x16a38, 1}, + {0x16a40, 0x16a5e, 1}, + {0x16ad0, 0x16aed, 1}, + {0x16b00, 0x16b2f, 1}, + {0x16b40, 0x16b43, 1}, + {0x16b63, 0x16b77, 1}, + {0x16b7d, 0x16b8f, 1}, {0x16f00, 0x16f44, 1}, {0x16f50, 0x16f93, 67}, {0x16f94, 0x16f9f, 1}, {0x1b000, 0x1b001, 1}, + {0x1bc00, 0x1bc6a, 1}, + {0x1bc70, 0x1bc7c, 1}, + {0x1bc80, 0x1bc88, 1}, + {0x1bc90, 0x1bc99, 1}, {0x1d400, 0x1d454, 1}, {0x1d456, 0x1d49c, 1}, {0x1d49e, 0x1d49f, 1}, @@ -554,6 +605,7 @@ var _L = &RangeTable{ {0x1d78a, 0x1d7a8, 1}, {0x1d7aa, 0x1d7c2, 1}, {0x1d7c4, 0x1d7cb, 1}, + {0x1e800, 0x1e8c4, 1}, {0x1ee00, 0x1ee03, 1}, {0x1ee05, 0x1ee1f, 1}, {0x1ee21, 0x1ee22, 1}, @@ -637,7 +689,7 @@ var _Ll = &RangeTable{ {0x0461, 0x0481, 2}, {0x048b, 0x04bf, 2}, {0x04c2, 0x04ce, 2}, - {0x04cf, 0x0527, 2}, + {0x04cf, 0x052f, 2}, {0x0561, 0x0587, 1}, {0x1d00, 0x1d2b, 1}, {0x1d6b, 0x1d77, 1}, @@ -684,7 +736,7 @@ var _Ll = &RangeTable{ {0x2d00, 0x2d25, 1}, {0x2d27, 0x2d2d, 6}, {0xa641, 0xa66d, 2}, - {0xa681, 0xa697, 2}, + {0xa681, 0xa69b, 2}, {0xa723, 0xa72f, 2}, {0xa730, 0xa731, 1}, {0xa733, 0xa771, 2}, @@ -693,14 +745,18 @@ var _Ll = &RangeTable{ {0xa77f, 0xa787, 2}, {0xa78c, 0xa78e, 2}, {0xa791, 0xa793, 2}, - {0xa7a1, 0xa7a9, 2}, - {0xa7fa, 0xfb00, 21254}, - {0xfb01, 0xfb06, 1}, + {0xa794, 0xa795, 1}, + {0xa797, 0xa7a9, 2}, + {0xa7fa, 0xab30, 822}, + {0xab31, 0xab5a, 1}, + {0xab64, 0xab65, 1}, + {0xfb00, 0xfb06, 1}, {0xfb13, 0xfb17, 1}, {0xff41, 0xff5a, 1}, }, R32: []Range32{ {0x10428, 0x1044f, 1}, + {0x118c0, 0x118df, 1}, {0x1d41a, 0x1d433, 1}, {0x1d44e, 0x1d454, 1}, {0x1d456, 0x1d467, 1}, @@ -765,15 +821,20 @@ var _Lm = &RangeTable{ {0xa015, 0xa4f8, 1251}, {0xa4f9, 0xa4fd, 1}, {0xa60c, 0xa67f, 115}, + {0xa69c, 0xa69d, 1}, {0xa717, 0xa71f, 1}, {0xa770, 0xa788, 24}, {0xa7f8, 0xa7f9, 1}, - {0xa9cf, 0xaa70, 161}, - {0xaadd, 0xaaf3, 22}, - {0xaaf4, 0xff70, 21628}, - {0xff9e, 0xff9f, 1}, + {0xa9cf, 0xa9e6, 23}, + {0xaa70, 0xaadd, 109}, + {0xaaf3, 0xaaf4, 1}, + {0xab5c, 0xab5f, 1}, + {0xff70, 0xff9e, 46}, + {0xff9f, 0xff9f, 1}, }, R32: []Range32{ + {0x16b40, 0x16b40, 1}, + {0x16b41, 0x16b43, 1}, {0x16f93, 0x16f9f, 1}, }, } @@ -800,13 +861,11 @@ var _Lo = &RangeTable{ {0x07cb, 0x07ea, 1}, {0x0800, 0x0815, 1}, {0x0840, 0x0858, 1}, - {0x08a0, 0x08a2, 2}, - {0x08a3, 0x08ac, 1}, + {0x08a0, 0x08b2, 1}, {0x0904, 0x0939, 1}, {0x093d, 0x0950, 19}, {0x0958, 0x0961, 1}, - {0x0972, 0x0977, 1}, - {0x0979, 0x097f, 1}, + {0x0972, 0x0980, 1}, {0x0985, 0x098c, 1}, {0x098f, 0x0990, 1}, {0x0993, 0x09a8, 1}, @@ -858,8 +917,7 @@ var _Lo = &RangeTable{ {0x0c06, 0x0c0c, 1}, {0x0c0e, 0x0c10, 1}, {0x0c12, 0x0c28, 1}, - {0x0c2a, 0x0c33, 1}, - {0x0c35, 0x0c39, 1}, + {0x0c2a, 0x0c39, 1}, {0x0c3d, 0x0c58, 27}, {0x0c59, 0x0c60, 7}, {0x0c61, 0x0c85, 36}, @@ -935,6 +993,7 @@ var _Lo = &RangeTable{ {0x166f, 0x167f, 1}, {0x1681, 0x169a, 1}, {0x16a0, 0x16ea, 1}, + {0x16f1, 0x16f8, 1}, {0x1700, 0x170c, 1}, {0x170e, 0x1711, 1}, {0x1720, 0x1731, 1}, @@ -948,7 +1007,7 @@ var _Lo = &RangeTable{ {0x1880, 0x18a8, 1}, {0x18aa, 0x18b0, 6}, {0x18b1, 0x18f5, 1}, - {0x1900, 0x191c, 1}, + {0x1900, 0x191e, 1}, {0x1950, 0x196d, 1}, {0x1970, 0x1974, 1}, {0x1980, 0x19ab, 1}, @@ -996,7 +1055,8 @@ var _Lo = &RangeTable{ {0xa62a, 0xa62b, 1}, {0xa66e, 0xa6a0, 50}, {0xa6a1, 0xa6e5, 1}, - {0xa7fb, 0xa801, 1}, + {0xa7f7, 0xa7fb, 4}, + {0xa7fc, 0xa801, 1}, {0xa803, 0xa805, 1}, {0xa807, 0xa80a, 1}, {0xa80c, 0xa822, 1}, @@ -1008,13 +1068,16 @@ var _Lo = &RangeTable{ {0xa930, 0xa946, 1}, {0xa960, 0xa97c, 1}, {0xa984, 0xa9b2, 1}, + {0xa9e0, 0xa9e4, 1}, + {0xa9e7, 0xa9ef, 1}, + {0xa9fa, 0xa9fe, 1}, {0xaa00, 0xaa28, 1}, {0xaa40, 0xaa42, 1}, {0xaa44, 0xaa4b, 1}, {0xaa60, 0xaa6f, 1}, {0xaa71, 0xaa76, 1}, - {0xaa7a, 0xaa80, 6}, - {0xaa81, 0xaaaf, 1}, + {0xaa7a, 0xaa7e, 4}, + {0xaa7f, 0xaaaf, 1}, {0xaab1, 0xaab5, 4}, {0xaab6, 0xaab9, 3}, {0xaaba, 0xaabd, 1}, @@ -1065,19 +1128,27 @@ var _Lo = &RangeTable{ {0x10080, 0x100fa, 1}, {0x10280, 0x1029c, 1}, {0x102a0, 0x102d0, 1}, - {0x10300, 0x1031e, 1}, + {0x10300, 0x1031f, 1}, {0x10330, 0x10340, 1}, {0x10342, 0x10349, 1}, + {0x10350, 0x10375, 1}, {0x10380, 0x1039d, 1}, {0x103a0, 0x103c3, 1}, {0x103c8, 0x103cf, 1}, {0x10450, 0x1049d, 1}, + {0x10500, 0x10527, 1}, + {0x10530, 0x10563, 1}, + {0x10600, 0x10736, 1}, + {0x10740, 0x10755, 1}, + {0x10760, 0x10767, 1}, {0x10800, 0x10805, 1}, {0x10808, 0x1080a, 2}, {0x1080b, 0x10835, 1}, {0x10837, 0x10838, 1}, {0x1083c, 0x1083f, 3}, {0x10840, 0x10855, 1}, + {0x10860, 0x10876, 1}, + {0x10880, 0x1089e, 1}, {0x10900, 0x10915, 1}, {0x10920, 0x10939, 1}, {0x10980, 0x109b7, 1}, @@ -1087,24 +1158,60 @@ var _Lo = &RangeTable{ {0x10a15, 0x10a17, 1}, {0x10a19, 0x10a33, 1}, {0x10a60, 0x10a7c, 1}, + {0x10a80, 0x10a9c, 1}, + {0x10ac0, 0x10ac7, 1}, + {0x10ac9, 0x10ae4, 1}, {0x10b00, 0x10b35, 1}, {0x10b40, 0x10b55, 1}, {0x10b60, 0x10b72, 1}, + {0x10b80, 0x10b91, 1}, {0x10c00, 0x10c48, 1}, {0x11003, 0x11037, 1}, {0x11083, 0x110af, 1}, {0x110d0, 0x110e8, 1}, {0x11103, 0x11126, 1}, - {0x11183, 0x111b2, 1}, + {0x11150, 0x11172, 1}, + {0x11176, 0x11183, 13}, + {0x11184, 0x111b2, 1}, {0x111c1, 0x111c4, 1}, - {0x11680, 0x116aa, 1}, - {0x12000, 0x1236e, 1}, + {0x111da, 0x11200, 38}, + {0x11201, 0x11211, 1}, + {0x11213, 0x1122b, 1}, + {0x112b0, 0x112de, 1}, + {0x11305, 0x1130c, 1}, + {0x1130f, 0x11310, 1}, + {0x11313, 0x11328, 1}, + {0x1132a, 0x11330, 1}, + {0x11332, 0x11333, 1}, + {0x11335, 0x11339, 1}, + {0x1133d, 0x1135d, 32}, + {0x1135e, 0x11361, 1}, + {0x11480, 0x114af, 1}, + {0x114c4, 0x114c5, 1}, + {0x114c7, 0x11580, 185}, + {0x11581, 0x115ae, 1}, + {0x11600, 0x1162f, 1}, + {0x11644, 0x11680, 60}, + {0x11681, 0x116aa, 1}, + {0x118ff, 0x11ac0, 449}, + {0x11ac1, 0x11af8, 1}, + {0x12000, 0x12398, 1}, {0x13000, 0x1342e, 1}, {0x16800, 0x16a38, 1}, + {0x16a40, 0x16a5e, 1}, + {0x16ad0, 0x16aed, 1}, + {0x16b00, 0x16b2f, 1}, + {0x16b63, 0x16b77, 1}, + {0x16b7d, 0x16b8f, 1}, {0x16f00, 0x16f44, 1}, {0x16f50, 0x1b000, 16560}, - {0x1b001, 0x1ee00, 15871}, - {0x1ee01, 0x1ee03, 1}, + {0x1b001, 0x1bc00, 3071}, + {0x1bc01, 0x1bc6a, 1}, + {0x1bc70, 0x1bc7c, 1}, + {0x1bc80, 0x1bc88, 1}, + {0x1bc90, 0x1bc99, 1}, + {0x1e800, 0x1e8c4, 1}, + {0x1ee00, 0x1ee03, 1}, {0x1ee05, 0x1ee1f, 1}, {0x1ee21, 0x1ee22, 1}, {0x1ee24, 0x1ee27, 3}, @@ -1185,8 +1292,9 @@ var _Lu = &RangeTable{ {0x0244, 0x0246, 1}, {0x0248, 0x024e, 2}, {0x0370, 0x0372, 2}, - {0x0376, 0x0386, 16}, - {0x0388, 0x038a, 1}, + {0x0376, 0x037f, 9}, + {0x0386, 0x0388, 2}, + {0x0389, 0x038a, 1}, {0x038c, 0x038e, 2}, {0x038f, 0x0391, 2}, {0x0392, 0x03a1, 1}, @@ -1200,7 +1308,7 @@ var _Lu = &RangeTable{ {0x0460, 0x0480, 2}, {0x048a, 0x04c0, 2}, {0x04c1, 0x04cd, 2}, - {0x04d0, 0x0526, 2}, + {0x04d0, 0x052e, 2}, {0x0531, 0x0556, 1}, {0x10a0, 0x10c5, 1}, {0x10c7, 0x10cd, 6}, @@ -1239,18 +1347,21 @@ var _Lu = &RangeTable{ {0x2ceb, 0x2ced, 2}, {0x2cf2, 0xa640, 31054}, {0xa642, 0xa66c, 2}, - {0xa680, 0xa696, 2}, + {0xa680, 0xa69a, 2}, {0xa722, 0xa72e, 2}, {0xa732, 0xa76e, 2}, {0xa779, 0xa77d, 2}, {0xa77e, 0xa786, 2}, {0xa78b, 0xa78d, 2}, {0xa790, 0xa792, 2}, - {0xa7a0, 0xa7aa, 2}, + {0xa796, 0xa7aa, 2}, + {0xa7ab, 0xa7ad, 1}, + {0xa7b0, 0xa7b1, 1}, {0xff21, 0xff3a, 1}, }, R32: []Range32{ {0x10400, 0x10427, 1}, + {0x118a0, 0x118bf, 1}, {0x1d400, 0x1d419, 1}, {0x1d434, 0x1d44d, 1}, {0x1d468, 0x1d481, 1}, @@ -1309,8 +1420,7 @@ var _M = &RangeTable{ {0x0825, 0x0827, 1}, {0x0829, 0x082d, 1}, {0x0859, 0x085b, 1}, - {0x08e4, 0x08fe, 1}, - {0x0900, 0x0903, 1}, + {0x08e4, 0x0903, 1}, {0x093a, 0x093c, 1}, {0x093e, 0x094f, 1}, {0x0951, 0x0957, 1}, @@ -1346,21 +1456,21 @@ var _M = &RangeTable{ {0x0bbf, 0x0bc2, 1}, {0x0bc6, 0x0bc8, 1}, {0x0bca, 0x0bcd, 1}, - {0x0bd7, 0x0c01, 42}, - {0x0c02, 0x0c03, 1}, + {0x0bd7, 0x0c00, 41}, + {0x0c01, 0x0c03, 1}, {0x0c3e, 0x0c44, 1}, {0x0c46, 0x0c48, 1}, {0x0c4a, 0x0c4d, 1}, {0x0c55, 0x0c56, 1}, {0x0c62, 0x0c63, 1}, - {0x0c82, 0x0c83, 1}, + {0x0c81, 0x0c83, 1}, {0x0cbc, 0x0cbe, 2}, {0x0cbf, 0x0cc4, 1}, {0x0cc6, 0x0cc8, 1}, {0x0cca, 0x0ccd, 1}, {0x0cd5, 0x0cd6, 1}, {0x0ce2, 0x0ce3, 1}, - {0x0d02, 0x0d03, 1}, + {0x0d01, 0x0d03, 1}, {0x0d3e, 0x0d44, 1}, {0x0d46, 0x0d48, 1}, {0x0d4a, 0x0d4d, 1}, @@ -1411,8 +1521,9 @@ var _M = &RangeTable{ {0x1a17, 0x1a1b, 1}, {0x1a55, 0x1a5e, 1}, {0x1a60, 0x1a7c, 1}, - {0x1a7f, 0x1b00, 129}, - {0x1b01, 0x1b04, 1}, + {0x1a7f, 0x1ab0, 49}, + {0x1ab1, 0x1abe, 1}, + {0x1b00, 0x1b04, 1}, {0x1b34, 0x1b44, 1}, {0x1b6b, 0x1b73, 1}, {0x1b80, 0x1b82, 1}, @@ -1423,7 +1534,8 @@ var _M = &RangeTable{ {0x1cd4, 0x1ce8, 1}, {0x1ced, 0x1cf2, 5}, {0x1cf3, 0x1cf4, 1}, - {0x1dc0, 0x1de6, 1}, + {0x1cf8, 0x1cf9, 1}, + {0x1dc0, 0x1df5, 1}, {0x1dfc, 0x1dff, 1}, {0x20d0, 0x20f0, 1}, {0x2cef, 0x2cf1, 1}, @@ -1444,9 +1556,11 @@ var _M = &RangeTable{ {0xa947, 0xa953, 1}, {0xa980, 0xa983, 1}, {0xa9b3, 0xa9c0, 1}, - {0xaa29, 0xaa36, 1}, + {0xa9e5, 0xaa29, 68}, + {0xaa2a, 0xaa36, 1}, {0xaa43, 0xaa4c, 9}, {0xaa4d, 0xaa7b, 46}, + {0xaa7c, 0xaa7d, 1}, {0xaab0, 0xaab2, 2}, {0xaab3, 0xaab4, 1}, {0xaab7, 0xaab8, 1}, @@ -1458,32 +1572,54 @@ var _M = &RangeTable{ {0xabec, 0xabed, 1}, {0xfb1e, 0xfe00, 738}, {0xfe01, 0xfe0f, 1}, - {0xfe20, 0xfe26, 1}, + {0xfe20, 0xfe2d, 1}, }, R32: []Range32{ - {0x101fd, 0x10a01, 2052}, - {0x10a02, 0x10a03, 1}, + {0x101fd, 0x102e0, 227}, + {0x10376, 0x1037a, 1}, + {0x10a01, 0x10a03, 1}, {0x10a05, 0x10a06, 1}, {0x10a0c, 0x10a0f, 1}, {0x10a38, 0x10a3a, 1}, - {0x10a3f, 0x11000, 1473}, + {0x10a3f, 0x10ae5, 166}, + {0x10ae6, 0x11000, 1306}, {0x11001, 0x11002, 1}, {0x11038, 0x11046, 1}, - {0x11080, 0x11082, 1}, + {0x1107f, 0x11082, 1}, {0x110b0, 0x110ba, 1}, {0x11100, 0x11102, 1}, {0x11127, 0x11134, 1}, - {0x11180, 0x11182, 1}, + {0x11173, 0x11180, 13}, + {0x11181, 0x11182, 1}, {0x111b3, 0x111c0, 1}, + {0x1122c, 0x11237, 1}, + {0x112df, 0x112ea, 1}, + {0x11301, 0x11303, 1}, + {0x1133c, 0x1133e, 2}, + {0x1133f, 0x11344, 1}, + {0x11347, 0x11348, 1}, + {0x1134b, 0x1134d, 1}, + {0x11357, 0x11362, 11}, + {0x11363, 0x11366, 3}, + {0x11367, 0x1136c, 1}, + {0x11370, 0x11374, 1}, + {0x114b0, 0x114c3, 1}, + {0x115af, 0x115b5, 1}, + {0x115b8, 0x115c0, 1}, + {0x11630, 0x11640, 1}, {0x116ab, 0x116b7, 1}, + {0x16af0, 0x16af4, 1}, + {0x16b30, 0x16b36, 1}, {0x16f51, 0x16f7e, 1}, {0x16f8f, 0x16f92, 1}, + {0x1bc9d, 0x1bc9e, 1}, {0x1d165, 0x1d169, 1}, {0x1d16d, 0x1d172, 1}, {0x1d17b, 0x1d182, 1}, {0x1d185, 0x1d18b, 1}, {0x1d1aa, 0x1d1ad, 1}, {0x1d242, 0x1d244, 1}, + {0x1e8d0, 0x1e8d6, 1}, {0xe0100, 0xe01ef, 1}, }, } @@ -1562,8 +1698,7 @@ var _Mc = &RangeTable{ {0x1b43, 0x1b44, 1}, {0x1b82, 0x1ba1, 31}, {0x1ba6, 0x1ba7, 1}, - {0x1baa, 0x1bac, 2}, - {0x1bad, 0x1be7, 58}, + {0x1baa, 0x1be7, 61}, {0x1bea, 0x1bec, 1}, {0x1bee, 0x1bf2, 4}, {0x1bf3, 0x1c24, 49}, @@ -1583,24 +1718,45 @@ var _Mc = &RangeTable{ {0xaa2f, 0xaa30, 1}, {0xaa33, 0xaa34, 1}, {0xaa4d, 0xaa7b, 46}, - {0xaaeb, 0xaaee, 3}, - {0xaaef, 0xaaf5, 6}, - {0xabe3, 0xabe4, 1}, - {0xabe6, 0xabe7, 1}, - {0xabe9, 0xabea, 1}, - {0xabec, 0xabec, 1}, + {0xaa7d, 0xaaeb, 110}, + {0xaaee, 0xaaef, 1}, + {0xaaf5, 0xabe3, 238}, + {0xabe4, 0xabe6, 2}, + {0xabe7, 0xabe9, 2}, + {0xabea, 0xabec, 2}, }, R32: []Range32{ - {0x11000, 0x11000, 1}, - {0x11002, 0x11082, 128}, - {0x110b0, 0x110b2, 1}, + {0x11000, 0x11002, 2}, + {0x11082, 0x110b0, 46}, + {0x110b1, 0x110b2, 1}, {0x110b7, 0x110b8, 1}, {0x1112c, 0x11182, 86}, {0x111b3, 0x111b5, 1}, {0x111bf, 0x111c0, 1}, - {0x116ac, 0x116ae, 2}, - {0x116af, 0x116b6, 7}, - {0x16f51, 0x16f7e, 1}, + {0x1122c, 0x1122e, 1}, + {0x11232, 0x11233, 1}, + {0x11235, 0x112e0, 171}, + {0x112e1, 0x112e2, 1}, + {0x11302, 0x11303, 1}, + {0x1133e, 0x1133f, 1}, + {0x11341, 0x11344, 1}, + {0x11347, 0x11348, 1}, + {0x1134b, 0x1134d, 1}, + {0x11357, 0x11362, 11}, + {0x11363, 0x114b0, 333}, + {0x114b1, 0x114b2, 1}, + {0x114b9, 0x114bb, 2}, + {0x114bc, 0x114be, 1}, + {0x114c1, 0x115af, 238}, + {0x115b0, 0x115b1, 1}, + {0x115b8, 0x115bb, 1}, + {0x115be, 0x11630, 114}, + {0x11631, 0x11632, 1}, + {0x1163b, 0x1163c, 1}, + {0x1163e, 0x116ac, 110}, + {0x116ae, 0x116af, 1}, + {0x116b6, 0x16f51, 22683}, + {0x16f52, 0x16f7e, 1}, {0x1d165, 0x1d166, 1}, {0x1d16d, 0x1d172, 1}, }, @@ -1609,7 +1765,8 @@ var _Mc = &RangeTable{ var _Me = &RangeTable{ R16: []Range16{ {0x0488, 0x0489, 1}, - {0x20dd, 0x20e0, 1}, + {0x1abe, 0x20dd, 1567}, + {0x20de, 0x20e0, 1}, {0x20e2, 0x20e4, 1}, {0xa670, 0xa672, 1}, }, @@ -1639,8 +1796,7 @@ var _Mn = &RangeTable{ {0x0825, 0x0827, 1}, {0x0829, 0x082d, 1}, {0x0859, 0x085b, 1}, - {0x08e4, 0x08fe, 1}, - {0x0900, 0x0902, 1}, + {0x08e4, 0x0902, 1}, {0x093a, 0x093c, 2}, {0x0941, 0x0948, 1}, {0x094d, 0x0951, 4}, @@ -1667,16 +1823,17 @@ var _Mn = &RangeTable{ {0x0b4d, 0x0b56, 9}, {0x0b62, 0x0b63, 1}, {0x0b82, 0x0bc0, 62}, - {0x0bcd, 0x0c3e, 113}, - {0x0c3f, 0x0c40, 1}, + {0x0bcd, 0x0c00, 51}, + {0x0c3e, 0x0c40, 1}, {0x0c46, 0x0c48, 1}, {0x0c4a, 0x0c4d, 1}, {0x0c55, 0x0c56, 1}, {0x0c62, 0x0c63, 1}, - {0x0cbc, 0x0cbf, 3}, - {0x0cc6, 0x0ccc, 6}, - {0x0ccd, 0x0ce2, 21}, - {0x0ce3, 0x0d41, 94}, + {0x0c81, 0x0cbc, 59}, + {0x0cbf, 0x0cc6, 7}, + {0x0ccc, 0x0ccd, 1}, + {0x0ce2, 0x0ce3, 1}, + {0x0d01, 0x0d41, 64}, {0x0d42, 0x0d44, 1}, {0x0d4d, 0x0d62, 21}, {0x0d63, 0x0dca, 103}, @@ -1728,8 +1885,9 @@ var _Mn = &RangeTable{ {0x1a60, 0x1a62, 2}, {0x1a65, 0x1a6c, 1}, {0x1a73, 0x1a7c, 1}, - {0x1a7f, 0x1b00, 129}, - {0x1b01, 0x1b03, 1}, + {0x1a7f, 0x1ab0, 49}, + {0x1ab1, 0x1abd, 1}, + {0x1b00, 0x1b03, 1}, {0x1b34, 0x1b36, 2}, {0x1b37, 0x1b3a, 1}, {0x1b3c, 0x1b42, 6}, @@ -1737,17 +1895,18 @@ var _Mn = &RangeTable{ {0x1b80, 0x1b81, 1}, {0x1ba2, 0x1ba5, 1}, {0x1ba8, 0x1ba9, 1}, - {0x1bab, 0x1be6, 59}, - {0x1be8, 0x1be9, 1}, - {0x1bed, 0x1bef, 2}, - {0x1bf0, 0x1bf1, 1}, + {0x1bab, 0x1bad, 1}, + {0x1be6, 0x1be8, 2}, + {0x1be9, 0x1bed, 4}, + {0x1bef, 0x1bf1, 1}, {0x1c2c, 0x1c33, 1}, {0x1c36, 0x1c37, 1}, {0x1cd0, 0x1cd2, 1}, {0x1cd4, 0x1ce0, 1}, {0x1ce2, 0x1ce8, 1}, {0x1ced, 0x1cf4, 7}, - {0x1dc0, 0x1de6, 1}, + {0x1cf8, 0x1cf9, 1}, + {0x1dc0, 0x1df5, 1}, {0x1dfc, 0x1dff, 1}, {0x20d0, 0x20dc, 1}, {0x20e1, 0x20e5, 4}, @@ -1770,13 +1929,13 @@ var _Mn = &RangeTable{ {0xa980, 0xa982, 1}, {0xa9b3, 0xa9b6, 3}, {0xa9b7, 0xa9b9, 1}, - {0xa9bc, 0xaa29, 109}, - {0xaa2a, 0xaa2e, 1}, + {0xa9bc, 0xa9e5, 41}, + {0xaa29, 0xaa2e, 1}, {0xaa31, 0xaa32, 1}, {0xaa35, 0xaa36, 1}, {0xaa43, 0xaa4c, 9}, - {0xaab0, 0xaab2, 2}, - {0xaab3, 0xaab4, 1}, + {0xaa7c, 0xaab0, 52}, + {0xaab2, 0xaab4, 1}, {0xaab7, 0xaab8, 1}, {0xaabe, 0xaabf, 1}, {0xaac1, 0xaaec, 43}, @@ -1784,33 +1943,58 @@ var _Mn = &RangeTable{ {0xabe5, 0xabe8, 3}, {0xabed, 0xfb1e, 20273}, {0xfe00, 0xfe0f, 1}, - {0xfe20, 0xfe26, 1}, + {0xfe20, 0xfe2d, 1}, }, R32: []Range32{ - {0x101fd, 0x10a01, 2052}, - {0x10a02, 0x10a03, 1}, + {0x101fd, 0x102e0, 227}, + {0x10376, 0x1037a, 1}, + {0x10a01, 0x10a03, 1}, {0x10a05, 0x10a06, 1}, {0x10a0c, 0x10a0f, 1}, {0x10a38, 0x10a3a, 1}, - {0x10a3f, 0x11001, 1474}, + {0x10a3f, 0x10ae5, 166}, + {0x10ae6, 0x11001, 1307}, {0x11038, 0x11046, 1}, - {0x11080, 0x11081, 1}, + {0x1107f, 0x11081, 1}, {0x110b3, 0x110b6, 1}, {0x110b9, 0x110ba, 1}, {0x11100, 0x11102, 1}, {0x11127, 0x1112b, 1}, {0x1112d, 0x11134, 1}, - {0x11180, 0x11181, 1}, - {0x111b6, 0x111be, 1}, - {0x116ab, 0x116ad, 2}, - {0x116b0, 0x116b5, 1}, - {0x116b7, 0x16f8f, 22744}, - {0x16f90, 0x16f92, 1}, + {0x11173, 0x11180, 13}, + {0x11181, 0x111b6, 53}, + {0x111b7, 0x111be, 1}, + {0x1122f, 0x11231, 1}, + {0x11234, 0x11236, 2}, + {0x11237, 0x112df, 168}, + {0x112e3, 0x112ea, 1}, + {0x11301, 0x1133c, 59}, + {0x11340, 0x11366, 38}, + {0x11367, 0x1136c, 1}, + {0x11370, 0x11374, 1}, + {0x114b3, 0x114b8, 1}, + {0x114ba, 0x114bf, 5}, + {0x114c0, 0x114c2, 2}, + {0x114c3, 0x115b2, 239}, + {0x115b3, 0x115b5, 1}, + {0x115bc, 0x115bd, 1}, + {0x115bf, 0x115c0, 1}, + {0x11633, 0x1163a, 1}, + {0x1163d, 0x1163f, 2}, + {0x11640, 0x116ab, 107}, + {0x116ad, 0x116b0, 3}, + {0x116b1, 0x116b5, 1}, + {0x116b7, 0x16af0, 21561}, + {0x16af1, 0x16af4, 1}, + {0x16b30, 0x16b36, 1}, + {0x16f8f, 0x16f92, 1}, + {0x1bc9d, 0x1bc9e, 1}, {0x1d167, 0x1d169, 1}, {0x1d17b, 0x1d182, 1}, {0x1d185, 0x1d18b, 1}, {0x1d1aa, 0x1d1ad, 1}, {0x1d242, 0x1d244, 1}, + {0x1e8d0, 0x1e8d6, 1}, {0xe0100, 0xe01ef, 1}, }, } @@ -1836,6 +2020,7 @@ var _N = &RangeTable{ {0x0c78, 0x0c7e, 1}, {0x0ce6, 0x0cef, 1}, {0x0d66, 0x0d75, 1}, + {0x0de6, 0x0def, 1}, {0x0e50, 0x0e59, 1}, {0x0ed0, 0x0ed9, 1}, {0x0f20, 0x0f33, 1}, @@ -1877,6 +2062,7 @@ var _N = &RangeTable{ {0xa8d0, 0xa8d9, 1}, {0xa900, 0xa909, 1}, {0xa9d0, 0xa9d9, 1}, + {0xa9f0, 0xa9f9, 1}, {0xaa50, 0xaa59, 1}, {0xabf0, 0xabf9, 1}, {0xff10, 0xff19, 1}, @@ -1884,27 +2070,42 @@ var _N = &RangeTable{ R32: []Range32{ {0x10107, 0x10133, 1}, {0x10140, 0x10178, 1}, - {0x1018a, 0x10320, 406}, - {0x10321, 0x10323, 1}, + {0x1018a, 0x1018b, 1}, + {0x102e1, 0x102fb, 1}, + {0x10320, 0x10323, 1}, {0x10341, 0x1034a, 9}, {0x103d1, 0x103d5, 1}, {0x104a0, 0x104a9, 1}, {0x10858, 0x1085f, 1}, + {0x10879, 0x1087f, 1}, + {0x108a7, 0x108af, 1}, {0x10916, 0x1091b, 1}, {0x10a40, 0x10a47, 1}, {0x10a7d, 0x10a7e, 1}, + {0x10a9d, 0x10a9f, 1}, + {0x10aeb, 0x10aef, 1}, {0x10b58, 0x10b5f, 1}, {0x10b78, 0x10b7f, 1}, + {0x10ba9, 0x10baf, 1}, {0x10e60, 0x10e7e, 1}, {0x11052, 0x1106f, 1}, {0x110f0, 0x110f9, 1}, {0x11136, 0x1113f, 1}, {0x111d0, 0x111d9, 1}, + {0x111e1, 0x111f4, 1}, + {0x112f0, 0x112f9, 1}, + {0x114d0, 0x114d9, 1}, + {0x11650, 0x11659, 1}, {0x116c0, 0x116c9, 1}, - {0x12400, 0x12462, 1}, + {0x118e0, 0x118f2, 1}, + {0x12400, 0x1246e, 1}, + {0x16a60, 0x16a69, 1}, + {0x16b50, 0x16b59, 1}, + {0x16b5b, 0x16b61, 1}, {0x1d360, 0x1d371, 1}, {0x1d7ce, 0x1d7ff, 1}, - {0x1f100, 0x1f10a, 1}, + {0x1e8c7, 0x1e8cf, 1}, + {0x1f100, 0x1f10c, 1}, }, LatinOffset: 4, } @@ -1924,6 +2125,7 @@ var _Nd = &RangeTable{ {0x0c66, 0x0c6f, 1}, {0x0ce6, 0x0cef, 1}, {0x0d66, 0x0d6f, 1}, + {0x0de6, 0x0def, 1}, {0x0e50, 0x0e59, 1}, {0x0ed0, 0x0ed9, 1}, {0x0f20, 0x0f29, 1}, @@ -1943,6 +2145,7 @@ var _Nd = &RangeTable{ {0xa8d0, 0xa8d9, 1}, {0xa900, 0xa909, 1}, {0xa9d0, 0xa9d9, 1}, + {0xa9f0, 0xa9f9, 1}, {0xaa50, 0xaa59, 1}, {0xabf0, 0xabf9, 1}, {0xff10, 0xff19, 1}, @@ -1953,7 +2156,13 @@ var _Nd = &RangeTable{ {0x110f0, 0x110f9, 1}, {0x11136, 0x1113f, 1}, {0x111d0, 0x111d9, 1}, + {0x112f0, 0x112f9, 1}, + {0x114d0, 0x114d9, 1}, + {0x11650, 0x11659, 1}, {0x116c0, 0x116c9, 1}, + {0x118e0, 0x118e9, 1}, + {0x16a60, 0x16a69, 1}, + {0x16b50, 0x16b59, 1}, {0x1d7ce, 0x1d7ff, 1}, }, LatinOffset: 1, @@ -1973,7 +2182,7 @@ var _Nl = &RangeTable{ {0x10140, 0x10174, 1}, {0x10341, 0x1034a, 9}, {0x103d1, 0x103d5, 1}, - {0x12400, 0x12462, 1}, + {0x12400, 0x1246e, 1}, }, } @@ -2010,18 +2219,28 @@ var _No = &RangeTable{ R32: []Range32{ {0x10107, 0x10133, 1}, {0x10175, 0x10178, 1}, - {0x1018a, 0x10320, 406}, - {0x10321, 0x10323, 1}, + {0x1018a, 0x1018b, 1}, + {0x102e1, 0x102fb, 1}, + {0x10320, 0x10323, 1}, {0x10858, 0x1085f, 1}, + {0x10879, 0x1087f, 1}, + {0x108a7, 0x108af, 1}, {0x10916, 0x1091b, 1}, {0x10a40, 0x10a47, 1}, {0x10a7d, 0x10a7e, 1}, + {0x10a9d, 0x10a9f, 1}, + {0x10aeb, 0x10aef, 1}, {0x10b58, 0x10b5f, 1}, {0x10b78, 0x10b7f, 1}, + {0x10ba9, 0x10baf, 1}, {0x10e60, 0x10e7e, 1}, {0x11052, 0x11065, 1}, + {0x111e1, 0x111f4, 1}, + {0x118ea, 0x118f2, 1}, + {0x16b5b, 0x16b61, 1}, {0x1d360, 0x1d371, 1}, - {0x1f100, 0x1f10a, 1}, + {0x1e8c7, 0x1e8cf, 1}, + {0x1f100, 0x1f10c, 1}, }, LatinOffset: 3, } @@ -2104,7 +2323,7 @@ var _P = &RangeTable{ {0x2cfe, 0x2cff, 1}, {0x2d70, 0x2e00, 144}, {0x2e01, 0x2e2e, 1}, - {0x2e30, 0x2e3b, 1}, + {0x2e30, 0x2e42, 1}, {0x3001, 0x3003, 1}, {0x3008, 0x3011, 1}, {0x3014, 0x301f, 1}, @@ -2144,17 +2363,29 @@ var _P = &RangeTable{ R32: []Range32{ {0x10100, 0x10102, 1}, {0x1039f, 0x103d0, 49}, - {0x10857, 0x1091f, 200}, - {0x1093f, 0x10a50, 273}, - {0x10a51, 0x10a58, 1}, - {0x10a7f, 0x10b39, 186}, - {0x10b3a, 0x10b3f, 1}, + {0x1056f, 0x10857, 744}, + {0x1091f, 0x1093f, 32}, + {0x10a50, 0x10a58, 1}, + {0x10a7f, 0x10af0, 113}, + {0x10af1, 0x10af6, 1}, + {0x10b39, 0x10b3f, 1}, + {0x10b99, 0x10b9c, 1}, {0x11047, 0x1104d, 1}, {0x110bb, 0x110bc, 1}, {0x110be, 0x110c1, 1}, {0x11140, 0x11143, 1}, + {0x11174, 0x11175, 1}, {0x111c5, 0x111c8, 1}, - {0x12470, 0x12473, 1}, + {0x111cd, 0x11238, 107}, + {0x11239, 0x1123d, 1}, + {0x114c6, 0x115c1, 251}, + {0x115c2, 0x115c9, 1}, + {0x11641, 0x11643, 1}, + {0x12470, 0x12474, 1}, + {0x16a6e, 0x16a6f, 1}, + {0x16af5, 0x16b37, 66}, + {0x16b38, 0x16b3b, 1}, + {0x16b44, 0x1bc9f, 20827}, }, LatinOffset: 11, } @@ -2177,10 +2408,11 @@ var _Pd = &RangeTable{ {0x2011, 0x2015, 1}, {0x2e17, 0x2e1a, 3}, {0x2e3a, 0x2e3b, 1}, - {0x301c, 0x3030, 20}, - {0x30a0, 0xfe31, 52625}, - {0xfe32, 0xfe58, 38}, - {0xfe63, 0xff0d, 170}, + {0x2e40, 0x301c, 476}, + {0x3030, 0x30a0, 112}, + {0xfe31, 0xfe32, 1}, + {0xfe58, 0xfe63, 11}, + {0xff0d, 0xff0d, 1}, }, } @@ -2202,7 +2434,7 @@ var _Pe = &RangeTable{ {0x3009, 0x3011, 2}, {0x3015, 0x301b, 2}, {0x301e, 0x301f, 1}, - {0xfd3f, 0xfe18, 217}, + {0xfd3e, 0xfe18, 218}, {0xfe36, 0xfe44, 2}, {0xfe48, 0xfe5a, 18}, {0xfe5c, 0xfe5e, 2}, @@ -2307,7 +2539,9 @@ var _Po = &RangeTable{ {0x2e1f, 0x2e2a, 11}, {0x2e2b, 0x2e2e, 1}, {0x2e30, 0x2e39, 1}, - {0x3001, 0x3003, 1}, + {0x2e3c, 0x2e3f, 1}, + {0x2e41, 0x3001, 448}, + {0x3002, 0x3003, 1}, {0x303d, 0x30fb, 190}, {0xa4fe, 0xa4ff, 1}, {0xa60d, 0xa60f, 1}, @@ -2346,17 +2580,29 @@ var _Po = &RangeTable{ {0x10100, 0x10100, 1}, {0x10101, 0x10102, 1}, {0x1039f, 0x103d0, 49}, - {0x10857, 0x1091f, 200}, - {0x1093f, 0x10a50, 273}, - {0x10a51, 0x10a58, 1}, - {0x10a7f, 0x10b39, 186}, - {0x10b3a, 0x10b3f, 1}, + {0x1056f, 0x10857, 744}, + {0x1091f, 0x1093f, 32}, + {0x10a50, 0x10a58, 1}, + {0x10a7f, 0x10af0, 113}, + {0x10af1, 0x10af6, 1}, + {0x10b39, 0x10b3f, 1}, + {0x10b99, 0x10b9c, 1}, {0x11047, 0x1104d, 1}, {0x110bb, 0x110bc, 1}, {0x110be, 0x110c1, 1}, {0x11140, 0x11143, 1}, + {0x11174, 0x11175, 1}, {0x111c5, 0x111c8, 1}, - {0x12470, 0x12473, 1}, + {0x111cd, 0x11238, 107}, + {0x11239, 0x1123d, 1}, + {0x114c6, 0x115c1, 251}, + {0x115c2, 0x115c9, 1}, + {0x11641, 0x11643, 1}, + {0x12470, 0x12474, 1}, + {0x16a6e, 0x16a6f, 1}, + {0x16af5, 0x16b37, 66}, + {0x16b38, 0x16b3b, 1}, + {0x16b44, 0x1bc9f, 20827}, }, LatinOffset: 8, } @@ -2377,9 +2623,10 @@ var _Ps = &RangeTable{ {0x29d8, 0x29da, 2}, {0x29fc, 0x2e22, 1062}, {0x2e24, 0x2e28, 2}, - {0x3008, 0x3010, 2}, + {0x2e42, 0x3008, 454}, + {0x300a, 0x3010, 2}, {0x3014, 0x301a, 2}, - {0x301d, 0xfd3e, 52513}, + {0x301d, 0xfd3f, 52514}, {0xfe17, 0xfe35, 30}, {0xfe37, 0xfe43, 2}, {0xfe47, 0xfe59, 18}, @@ -2410,7 +2657,8 @@ var _S = &RangeTable{ {0x02f0, 0x02ff, 1}, {0x0375, 0x0384, 15}, {0x0385, 0x03f6, 113}, - {0x0482, 0x058f, 269}, + {0x0482, 0x058d, 267}, + {0x058e, 0x058f, 1}, {0x0606, 0x0608, 1}, {0x060b, 0x060e, 3}, {0x060f, 0x06de, 207}, @@ -2446,7 +2694,7 @@ var _S = &RangeTable{ {0x2044, 0x2052, 14}, {0x207a, 0x207c, 1}, {0x208a, 0x208c, 1}, - {0x20a0, 0x20ba, 1}, + {0x20a0, 0x20bd, 1}, {0x2100, 0x2101, 1}, {0x2103, 0x2106, 1}, {0x2108, 0x2109, 1}, @@ -2461,19 +2709,21 @@ var _S = &RangeTable{ {0x214f, 0x2190, 65}, {0x2191, 0x2307, 1}, {0x230c, 0x2328, 1}, - {0x232b, 0x23f3, 1}, + {0x232b, 0x23fa, 1}, {0x2400, 0x2426, 1}, {0x2440, 0x244a, 1}, {0x249c, 0x24e9, 1}, - {0x2500, 0x26ff, 1}, - {0x2701, 0x2767, 1}, + {0x2500, 0x2767, 1}, {0x2794, 0x27c4, 1}, {0x27c7, 0x27e5, 1}, {0x27f0, 0x2982, 1}, {0x2999, 0x29d7, 1}, {0x29dc, 0x29fb, 1}, - {0x29fe, 0x2b4c, 1}, - {0x2b50, 0x2b59, 1}, + {0x29fe, 0x2b73, 1}, + {0x2b76, 0x2b95, 1}, + {0x2b98, 0x2bb9, 1}, + {0x2bbd, 0x2bc8, 1}, + {0x2bca, 0x2bd1, 1}, {0x2ce5, 0x2cea, 1}, {0x2e80, 0x2e99, 1}, {0x2e9b, 0x2ef3, 1}, @@ -2502,8 +2752,8 @@ var _S = &RangeTable{ {0xa828, 0xa82b, 1}, {0xa836, 0xa839, 1}, {0xaa77, 0xaa79, 1}, - {0xfb29, 0xfbb2, 137}, - {0xfbb3, 0xfbc1, 1}, + {0xab5b, 0xfb29, 20430}, + {0xfbb2, 0xfbc1, 1}, {0xfdfc, 0xfdfd, 1}, {0xfe62, 0xfe64, 2}, {0xfe65, 0xfe66, 1}, @@ -2519,8 +2769,14 @@ var _S = &RangeTable{ R32: []Range32{ {0x10137, 0x1013f, 1}, {0x10179, 0x10189, 1}, - {0x10190, 0x1019b, 1}, - {0x101d0, 0x101fc, 1}, + {0x1018c, 0x10190, 4}, + {0x10191, 0x1019b, 1}, + {0x101a0, 0x101d0, 48}, + {0x101d1, 0x101fc, 1}, + {0x10877, 0x10878, 1}, + {0x10ac8, 0x16b3c, 24692}, + {0x16b3d, 0x16b3f, 1}, + {0x16b45, 0x1bc9c, 20823}, {0x1d000, 0x1d0f5, 1}, {0x1d100, 0x1d126, 1}, {0x1d129, 0x1d164, 1}, @@ -2540,9 +2796,9 @@ var _S = &RangeTable{ {0x1f000, 0x1f02b, 1}, {0x1f030, 0x1f093, 1}, {0x1f0a0, 0x1f0ae, 1}, - {0x1f0b1, 0x1f0be, 1}, + {0x1f0b1, 0x1f0bf, 1}, {0x1f0c1, 0x1f0cf, 1}, - {0x1f0d1, 0x1f0df, 1}, + {0x1f0d1, 0x1f0f5, 1}, {0x1f110, 0x1f12e, 1}, {0x1f130, 0x1f16b, 1}, {0x1f170, 0x1f19a, 1}, @@ -2550,24 +2806,25 @@ var _S = &RangeTable{ {0x1f210, 0x1f23a, 1}, {0x1f240, 0x1f248, 1}, {0x1f250, 0x1f251, 1}, - {0x1f300, 0x1f320, 1}, - {0x1f330, 0x1f335, 1}, - {0x1f337, 0x1f37c, 1}, - {0x1f380, 0x1f393, 1}, - {0x1f3a0, 0x1f3c4, 1}, - {0x1f3c6, 0x1f3ca, 1}, - {0x1f3e0, 0x1f3f0, 1}, - {0x1f400, 0x1f43e, 1}, - {0x1f440, 0x1f442, 2}, - {0x1f443, 0x1f4f7, 1}, - {0x1f4f9, 0x1f4fc, 1}, - {0x1f500, 0x1f53d, 1}, - {0x1f540, 0x1f543, 1}, - {0x1f550, 0x1f567, 1}, - {0x1f5fb, 0x1f640, 1}, - {0x1f645, 0x1f64f, 1}, - {0x1f680, 0x1f6c5, 1}, + {0x1f300, 0x1f32c, 1}, + {0x1f330, 0x1f37d, 1}, + {0x1f380, 0x1f3ce, 1}, + {0x1f3d4, 0x1f3f7, 1}, + {0x1f400, 0x1f4fe, 1}, + {0x1f500, 0x1f54a, 1}, + {0x1f550, 0x1f579, 1}, + {0x1f57b, 0x1f5a3, 1}, + {0x1f5a5, 0x1f642, 1}, + {0x1f645, 0x1f6cf, 1}, + {0x1f6e0, 0x1f6ec, 1}, + {0x1f6f0, 0x1f6f3, 1}, {0x1f700, 0x1f773, 1}, + {0x1f780, 0x1f7d4, 1}, + {0x1f800, 0x1f80b, 1}, + {0x1f810, 0x1f847, 1}, + {0x1f850, 0x1f859, 1}, + {0x1f860, 0x1f887, 1}, + {0x1f890, 0x1f8ad, 1}, }, LatinOffset: 10, } @@ -2581,7 +2838,7 @@ var _Sc = &RangeTable{ {0x09fb, 0x0af1, 246}, {0x0bf9, 0x0e3f, 582}, {0x17db, 0x20a0, 2245}, - {0x20a1, 0x20ba, 1}, + {0x20a1, 0x20bd, 1}, {0xa838, 0xfdfc, 21956}, {0xfe69, 0xff04, 155}, {0xffe0, 0xffe1, 1}, @@ -2611,7 +2868,8 @@ var _Sk = &RangeTable{ {0xa700, 0xa716, 1}, {0xa720, 0xa721, 1}, {0xa789, 0xa78a, 1}, - {0xfbb2, 0xfbc1, 1}, + {0xab5b, 0xfbb2, 20567}, + {0xfbb3, 0xfbc1, 1}, {0xff3e, 0xff40, 2}, {0xffe3, 0xffe3, 1}, }, @@ -2679,7 +2937,8 @@ var _So = &RangeTable{ R16: []Range16{ {0x00a6, 0x00a9, 3}, {0x00ae, 0x00b0, 2}, - {0x0482, 0x060e, 396}, + {0x0482, 0x058d, 267}, + {0x058e, 0x060e, 128}, {0x060f, 0x06de, 207}, {0x06e9, 0x06fd, 20}, {0x06fe, 0x07f6, 248}, @@ -2728,7 +2987,7 @@ var _So = &RangeTable{ {0x232b, 0x237b, 1}, {0x237d, 0x239a, 1}, {0x23b4, 0x23db, 1}, - {0x23e2, 0x23f3, 1}, + {0x23e2, 0x23fa, 1}, {0x2400, 0x2426, 1}, {0x2440, 0x244a, 1}, {0x249c, 0x24e9, 1}, @@ -2736,13 +2995,16 @@ var _So = &RangeTable{ {0x25b8, 0x25c0, 1}, {0x25c2, 0x25f7, 1}, {0x2600, 0x266e, 1}, - {0x2670, 0x26ff, 1}, - {0x2701, 0x2767, 1}, + {0x2670, 0x2767, 1}, {0x2794, 0x27bf, 1}, {0x2800, 0x28ff, 1}, {0x2b00, 0x2b2f, 1}, {0x2b45, 0x2b46, 1}, - {0x2b50, 0x2b59, 1}, + {0x2b4d, 0x2b73, 1}, + {0x2b76, 0x2b95, 1}, + {0x2b98, 0x2bb9, 1}, + {0x2bbd, 0x2bc8, 1}, + {0x2bca, 0x2bd1, 1}, {0x2ce5, 0x2cea, 1}, {0x2e80, 0x2e99, 1}, {0x2e9b, 0x2ef3, 1}, @@ -2777,8 +3039,14 @@ var _So = &RangeTable{ {0x10137, 0x10137, 1}, {0x10138, 0x1013f, 1}, {0x10179, 0x10189, 1}, - {0x10190, 0x1019b, 1}, - {0x101d0, 0x101fc, 1}, + {0x1018c, 0x10190, 4}, + {0x10191, 0x1019b, 1}, + {0x101a0, 0x101d0, 48}, + {0x101d1, 0x101fc, 1}, + {0x10877, 0x10878, 1}, + {0x10ac8, 0x16b3c, 24692}, + {0x16b3d, 0x16b3f, 1}, + {0x16b45, 0x1bc9c, 20823}, {0x1d000, 0x1d0f5, 1}, {0x1d100, 0x1d126, 1}, {0x1d129, 0x1d164, 1}, @@ -2792,9 +3060,9 @@ var _So = &RangeTable{ {0x1f000, 0x1f02b, 1}, {0x1f030, 0x1f093, 1}, {0x1f0a0, 0x1f0ae, 1}, - {0x1f0b1, 0x1f0be, 1}, + {0x1f0b1, 0x1f0bf, 1}, {0x1f0c1, 0x1f0cf, 1}, - {0x1f0d1, 0x1f0df, 1}, + {0x1f0d1, 0x1f0f5, 1}, {0x1f110, 0x1f12e, 1}, {0x1f130, 0x1f16b, 1}, {0x1f170, 0x1f19a, 1}, @@ -2802,24 +3070,25 @@ var _So = &RangeTable{ {0x1f210, 0x1f23a, 1}, {0x1f240, 0x1f248, 1}, {0x1f250, 0x1f251, 1}, - {0x1f300, 0x1f320, 1}, - {0x1f330, 0x1f335, 1}, - {0x1f337, 0x1f37c, 1}, - {0x1f380, 0x1f393, 1}, - {0x1f3a0, 0x1f3c4, 1}, - {0x1f3c6, 0x1f3ca, 1}, - {0x1f3e0, 0x1f3f0, 1}, - {0x1f400, 0x1f43e, 1}, - {0x1f440, 0x1f442, 2}, - {0x1f443, 0x1f4f7, 1}, - {0x1f4f9, 0x1f4fc, 1}, - {0x1f500, 0x1f53d, 1}, - {0x1f540, 0x1f543, 1}, - {0x1f550, 0x1f567, 1}, - {0x1f5fb, 0x1f640, 1}, - {0x1f645, 0x1f64f, 1}, - {0x1f680, 0x1f6c5, 1}, + {0x1f300, 0x1f32c, 1}, + {0x1f330, 0x1f37d, 1}, + {0x1f380, 0x1f3ce, 1}, + {0x1f3d4, 0x1f3f7, 1}, + {0x1f400, 0x1f4fe, 1}, + {0x1f500, 0x1f54a, 1}, + {0x1f550, 0x1f579, 1}, + {0x1f57b, 0x1f5a3, 1}, + {0x1f5a5, 0x1f642, 1}, + {0x1f645, 0x1f6cf, 1}, + {0x1f6e0, 0x1f6ec, 1}, + {0x1f6f0, 0x1f6f3, 1}, {0x1f700, 0x1f773, 1}, + {0x1f780, 0x1f7d4, 1}, + {0x1f800, 0x1f80b, 1}, + {0x1f810, 0x1f847, 1}, + {0x1f850, 0x1f859, 1}, + {0x1f860, 0x1f887, 1}, + {0x1f890, 0x1f8ad, 1}, }, LatinOffset: 2, } @@ -2911,7 +3180,7 @@ var ( ) // Generated by running -// maketables --scripts=all --url=http://www.unicode.org/Public/6.3.0/ucd/ +// maketables --scripts=all --url=http://www.unicode.org/Public/7.0.0/ucd/ // DO NOT EDIT // Scripts is the set of Unicode script tables. @@ -2921,6 +3190,7 @@ var Scripts = map[string]*RangeTable{ "Avestan": Avestan, "Balinese": Balinese, "Bamum": Bamum, + "Bassa_Vah": Bassa_Vah, "Batak": Batak, "Bengali": Bengali, "Bopomofo": Bopomofo, @@ -2930,6 +3200,7 @@ var Scripts = map[string]*RangeTable{ "Buhid": Buhid, "Canadian_Aboriginal": Canadian_Aboriginal, "Carian": Carian, + "Caucasian_Albanian": Caucasian_Albanian, "Chakma": Chakma, "Cham": Cham, "Cherokee": Cherokee, @@ -2940,11 +3211,14 @@ var Scripts = map[string]*RangeTable{ "Cyrillic": Cyrillic, "Deseret": Deseret, "Devanagari": Devanagari, + "Duployan": Duployan, "Egyptian_Hieroglyphs": Egyptian_Hieroglyphs, + "Elbasan": Elbasan, "Ethiopic": Ethiopic, "Georgian": Georgian, "Glagolitic": Glagolitic, "Gothic": Gothic, + "Grantha": Grantha, "Greek": Greek, "Gujarati": Gujarati, "Gurmukhi": Gurmukhi, @@ -2964,40 +3238,56 @@ var Scripts = map[string]*RangeTable{ "Kayah_Li": Kayah_Li, "Kharoshthi": Kharoshthi, "Khmer": Khmer, + "Khojki": Khojki, + "Khudawadi": Khudawadi, "Lao": Lao, "Latin": Latin, "Lepcha": Lepcha, "Limbu": Limbu, + "Linear_A": Linear_A, "Linear_B": Linear_B, "Lisu": Lisu, "Lycian": Lycian, "Lydian": Lydian, + "Mahajani": Mahajani, "Malayalam": Malayalam, "Mandaic": Mandaic, + "Manichaean": Manichaean, "Meetei_Mayek": Meetei_Mayek, + "Mende_Kikakui": Mende_Kikakui, "Meroitic_Cursive": Meroitic_Cursive, "Meroitic_Hieroglyphs": Meroitic_Hieroglyphs, "Miao": Miao, + "Modi": Modi, "Mongolian": Mongolian, + "Mro": Mro, "Myanmar": Myanmar, + "Nabataean": Nabataean, "New_Tai_Lue": New_Tai_Lue, "Nko": Nko, "Ogham": Ogham, "Ol_Chiki": Ol_Chiki, "Old_Italic": Old_Italic, + "Old_North_Arabian": Old_North_Arabian, + "Old_Permic": Old_Permic, "Old_Persian": Old_Persian, "Old_South_Arabian": Old_South_Arabian, "Old_Turkic": Old_Turkic, "Oriya": Oriya, "Osmanya": Osmanya, + "Pahawh_Hmong": Pahawh_Hmong, + "Palmyrene": Palmyrene, + "Pau_Cin_Hau": Pau_Cin_Hau, "Phags_Pa": Phags_Pa, "Phoenician": Phoenician, + "Psalter_Pahlavi": Psalter_Pahlavi, "Rejang": Rejang, "Runic": Runic, "Samaritan": Samaritan, "Saurashtra": Saurashtra, "Sharada": Sharada, "Shavian": Shavian, + "Siddham": Siddham, "Sinhala": Sinhala, "Sora_Sompeng": Sora_Sompeng, "Sundanese": Sundanese, @@ -3015,8 +3305,10 @@ var Scripts = map[string]*RangeTable{ "Thai": Thai, "Tibetan": Tibetan, "Tifinagh": Tifinagh, + "Tirhuta": Tirhuta, "Ugaritic": Ugaritic, "Vai": Vai, + "Warang_Citi": Warang_Citi, "Yi": Yi, } @@ -3025,7 +3317,6 @@ var _Arabic = &RangeTable{ {0x0600, 0x0604, 1}, {0x0606, 0x060b, 1}, {0x060d, 0x061a, 1}, - {0x061c, 0x061c, 1}, {0x061e, 0x061e, 1}, {0x0620, 0x063f, 1}, {0x0641, 0x064a, 1}, @@ -3034,14 +3325,13 @@ var _Arabic = &RangeTable{ {0x0671, 0x06dc, 1}, {0x06de, 0x06ff, 1}, {0x0750, 0x077f, 1}, - {0x08a0, 0x08a0, 1}, - {0x08a2, 0x08ac, 1}, - {0x08e4, 0x08fe, 1}, + {0x08a0, 0x08b2, 1}, + {0x08e4, 0x08ff, 1}, {0xfb50, 0xfbc1, 1}, {0xfbd3, 0xfd3d, 1}, {0xfd50, 0xfd8f, 1}, {0xfd92, 0xfdc7, 1}, - {0xfdf0, 0xfdfc, 1}, + {0xfdf0, 0xfdfd, 1}, {0xfe70, 0xfe74, 1}, {0xfe76, 0xfefc, 1}, }, @@ -3090,7 +3380,7 @@ var _Armenian = &RangeTable{ {0x0559, 0x055f, 1}, {0x0561, 0x0587, 1}, {0x058a, 0x058a, 1}, - {0x058f, 0x058f, 1}, + {0x058d, 0x058f, 1}, {0xfb13, 0xfb17, 1}, }, } @@ -3119,6 +3409,14 @@ var _Bamum = &RangeTable{ }, } +var _Bassa_Vah = &RangeTable{ + R16: []Range16{}, + R32: []Range32{ + {0x16ad0, 0x16aed, 1}, + {0x16af0, 0x16af5, 1}, + }, +} + var _Batak = &RangeTable{ R16: []Range16{ {0x1bc0, 0x1bf3, 1}, @@ -3128,7 +3426,7 @@ var _Batak = &RangeTable{ var _Bengali = &RangeTable{ R16: []Range16{ - {0x0981, 0x0983, 1}, + {0x0980, 0x0983, 1}, {0x0985, 0x098c, 1}, {0x098f, 0x0990, 1}, {0x0993, 0x09a8, 1}, @@ -3158,6 +3456,7 @@ var _Brahmi = &RangeTable{ R32: []Range32{ {0x11000, 0x1104d, 1}, {0x11052, 0x1106f, 1}, + {0x1107f, 0x1107f, 1}, }, } @@ -3194,6 +3493,14 @@ var _Carian = &RangeTable{ }, } +var _Caucasian_Albanian = &RangeTable{ + R16: []Range16{}, + R32: []Range32{ + {0x10530, 0x10563, 1}, + {0x1056f, 0x1056f, 1}, + }, +} + var _Chakma = &RangeTable{ R16: []Range16{}, R32: []Range32{ @@ -3234,8 +3541,9 @@ var _Common = &RangeTable{ {0x0385, 0x0385, 1}, {0x0387, 0x0387, 1}, {0x0589, 0x0589, 1}, + {0x0605, 0x0605, 1}, {0x060c, 0x060c, 1}, - {0x061b, 0x061b, 1}, + {0x061b, 0x061c, 1}, {0x061f, 0x061f, 1}, {0x0640, 0x0640, 1}, {0x0660, 0x0669, 1}, @@ -3258,21 +3566,23 @@ var _Common = &RangeTable{ {0x2066, 0x2070, 1}, {0x2074, 0x207e, 1}, {0x2080, 0x208e, 1}, - {0x20a0, 0x20ba, 1}, + {0x20a0, 0x20bd, 1}, {0x2100, 0x2125, 1}, {0x2127, 0x2129, 1}, {0x212c, 0x2131, 1}, {0x2133, 0x214d, 1}, {0x214f, 0x215f, 1}, {0x2189, 0x2189, 1}, - {0x2190, 0x23f3, 1}, + {0x2190, 0x23fa, 1}, {0x2400, 0x2426, 1}, {0x2440, 0x244a, 1}, - {0x2460, 0x26ff, 1}, - {0x2701, 0x27ff, 1}, - {0x2900, 0x2b4c, 1}, - {0x2b50, 0x2b59, 1}, - {0x2e00, 0x2e3b, 1}, + {0x2460, 0x27ff, 1}, + {0x2900, 0x2b73, 1}, + {0x2b76, 0x2b95, 1}, + {0x2b98, 0x2bb9, 1}, + {0x2bbd, 0x2bc8, 1}, + {0x2bca, 0x2bd1, 1}, + {0x2e00, 0x2e42, 1}, {0x2ff0, 0x2ffb, 1}, {0x3000, 0x3004, 1}, {0x3006, 0x3006, 1}, @@ -3291,9 +3601,10 @@ var _Common = &RangeTable{ {0xa700, 0xa721, 1}, {0xa788, 0xa78a, 1}, {0xa830, 0xa839, 1}, + {0xa92e, 0xa92e, 1}, {0xa9cf, 0xa9cf, 1}, + {0xab5b, 0xab5b, 1}, {0xfd3e, 0xfd3f, 1}, - {0xfdfd, 0xfdfd, 1}, {0xfe10, 0xfe19, 1}, {0xfe30, 0xfe52, 1}, {0xfe54, 0xfe66, 1}, @@ -3314,6 +3625,8 @@ var _Common = &RangeTable{ {0x10137, 0x1013f, 1}, {0x10190, 0x1019b, 1}, {0x101d0, 0x101fc, 1}, + {0x102e1, 0x102fb, 1}, + {0x1bca0, 0x1bca3, 1}, {0x1d000, 0x1d0f5, 1}, {0x1d100, 0x1d126, 1}, {0x1d129, 0x1d166, 1}, @@ -3347,10 +3660,10 @@ var _Common = &RangeTable{ {0x1f000, 0x1f02b, 1}, {0x1f030, 0x1f093, 1}, {0x1f0a0, 0x1f0ae, 1}, - {0x1f0b1, 0x1f0be, 1}, + {0x1f0b1, 0x1f0bf, 1}, {0x1f0c1, 0x1f0cf, 1}, - {0x1f0d1, 0x1f0df, 1}, - {0x1f100, 0x1f10a, 1}, + {0x1f0d1, 0x1f0f5, 1}, + {0x1f100, 0x1f10c, 1}, {0x1f110, 0x1f12e, 1}, {0x1f130, 0x1f16b, 1}, {0x1f170, 0x1f19a, 1}, @@ -3359,24 +3672,25 @@ var _Common = &RangeTable{ {0x1f210, 0x1f23a, 1}, {0x1f240, 0x1f248, 1}, {0x1f250, 0x1f251, 1}, - {0x1f300, 0x1f320, 1}, - {0x1f330, 0x1f335, 1}, - {0x1f337, 0x1f37c, 1}, - {0x1f380, 0x1f393, 1}, - {0x1f3a0, 0x1f3c4, 1}, - {0x1f3c6, 0x1f3ca, 1}, - {0x1f3e0, 0x1f3f0, 1}, - {0x1f400, 0x1f43e, 1}, - {0x1f440, 0x1f440, 1}, - {0x1f442, 0x1f4f7, 1}, - {0x1f4f9, 0x1f4fc, 1}, - {0x1f500, 0x1f53d, 1}, - {0x1f540, 0x1f543, 1}, - {0x1f550, 0x1f567, 1}, - {0x1f5fb, 0x1f640, 1}, - {0x1f645, 0x1f64f, 1}, - {0x1f680, 0x1f6c5, 1}, + {0x1f300, 0x1f32c, 1}, + {0x1f330, 0x1f37d, 1}, + {0x1f380, 0x1f3ce, 1}, + {0x1f3d4, 0x1f3f7, 1}, + {0x1f400, 0x1f4fe, 1}, + {0x1f500, 0x1f54a, 1}, + {0x1f550, 0x1f579, 1}, + {0x1f57b, 0x1f5a3, 1}, + {0x1f5a5, 0x1f642, 1}, + {0x1f645, 0x1f6cf, 1}, + {0x1f6e0, 0x1f6ec, 1}, + {0x1f6f0, 0x1f6f3, 1}, {0x1f700, 0x1f773, 1}, + {0x1f780, 0x1f7d4, 1}, + {0x1f800, 0x1f80b, 1}, + {0x1f810, 0x1f847, 1}, + {0x1f850, 0x1f859, 1}, + {0x1f860, 0x1f887, 1}, + {0x1f890, 0x1f8ad, 1}, {0xe0001, 0xe0001, 1}, {0xe0020, 0xe007f, 1}, }, @@ -3394,9 +3708,9 @@ var _Coptic = &RangeTable{ var _Cuneiform = &RangeTable{ R16: []Range16{}, R32: []Range32{ - {0x12000, 0x1236e, 1}, - {0x12400, 0x12462, 1}, - {0x12470, 0x12473, 1}, + {0x12000, 0x12398, 1}, + {0x12400, 0x1246e, 1}, + {0x12470, 0x12474, 1}, }, } @@ -3415,11 +3729,11 @@ var _Cypriot = &RangeTable{ var _Cyrillic = &RangeTable{ R16: []Range16{ {0x0400, 0x0484, 1}, - {0x0487, 0x0527, 1}, + {0x0487, 0x052f, 1}, {0x1d2b, 0x1d2b, 1}, {0x1d78, 0x1d78, 1}, {0x2de0, 0x2dff, 1}, - {0xa640, 0xa697, 1}, + {0xa640, 0xa69d, 1}, {0xa69f, 0xa69f, 1}, }, } @@ -3435,12 +3749,22 @@ var _Devanagari = &RangeTable{ R16: []Range16{ {0x0900, 0x0950, 1}, {0x0953, 0x0963, 1}, - {0x0966, 0x0977, 1}, - {0x0979, 0x097f, 1}, + {0x0966, 0x097f, 1}, {0xa8e0, 0xa8fb, 1}, }, } +var _Duployan = &RangeTable{ + R16: []Range16{}, + R32: []Range32{ + {0x1bc00, 0x1bc6a, 1}, + {0x1bc70, 0x1bc7c, 1}, + {0x1bc80, 0x1bc88, 1}, + {0x1bc90, 0x1bc99, 1}, + {0x1bc9c, 0x1bc9f, 1}, + }, +} + var _Egyptian_Hieroglyphs = &RangeTable{ R16: []Range16{}, R32: []Range32{ @@ -3448,6 +3772,13 @@ var _Egyptian_Hieroglyphs = &RangeTable{ }, } +var _Elbasan = &RangeTable{ + R16: []Range16{}, + R32: []Range32{ + {0x10500, 0x10527, 1}, + }, +} + var _Ethiopic = &RangeTable{ R16: []Range16{ {0x1200, 0x1248, 1}, @@ -3512,11 +3843,32 @@ var _Gothic = &RangeTable{ }, } +var _Grantha = &RangeTable{ + R16: []Range16{}, + R32: []Range32{ + {0x11301, 0x11303, 1}, + {0x11305, 0x1130c, 1}, + {0x1130f, 0x11310, 1}, + {0x11313, 0x11328, 1}, + {0x1132a, 0x11330, 1}, + {0x11332, 0x11333, 1}, + {0x11335, 0x11339, 1}, + {0x1133c, 0x11344, 1}, + {0x11347, 0x11348, 1}, + {0x1134b, 0x1134d, 1}, + {0x11357, 0x11357, 1}, + {0x1135d, 0x11363, 1}, + {0x11366, 0x1136c, 1}, + {0x11370, 0x11374, 1}, + }, +} + var _Greek = &RangeTable{ R16: []Range16{ {0x0370, 0x0373, 1}, {0x0375, 0x0377, 1}, {0x037a, 0x037d, 1}, + {0x037f, 0x037f, 1}, {0x0384, 0x0384, 1}, {0x0386, 0x0386, 1}, {0x0388, 0x038a, 1}, @@ -3545,9 +3897,11 @@ var _Greek = &RangeTable{ {0x1ff2, 0x1ff4, 1}, {0x1ff6, 0x1ffe, 1}, {0x2126, 0x2126, 1}, + {0xab65, 0xab65, 1}, }, R32: []Range32{ - {0x10140, 0x1018a, 1}, + {0x10140, 0x1018c, 1}, + {0x101a0, 0x101a0, 1}, {0x1d200, 0x1d245, 1}, }, } @@ -3678,22 +4032,25 @@ var _Inherited = &RangeTable{ {0x064b, 0x0655, 1}, {0x0670, 0x0670, 1}, {0x0951, 0x0952, 1}, + {0x1ab0, 0x1abe, 1}, {0x1cd0, 0x1cd2, 1}, {0x1cd4, 0x1ce0, 1}, {0x1ce2, 0x1ce8, 1}, {0x1ced, 0x1ced, 1}, {0x1cf4, 0x1cf4, 1}, - {0x1dc0, 0x1de6, 1}, + {0x1cf8, 0x1cf9, 1}, + {0x1dc0, 0x1df5, 1}, {0x1dfc, 0x1dff, 1}, {0x200c, 0x200d, 1}, {0x20d0, 0x20f0, 1}, {0x302a, 0x302d, 1}, {0x3099, 0x309a, 1}, {0xfe00, 0xfe0f, 1}, - {0xfe20, 0xfe26, 1}, + {0xfe20, 0xfe2d, 1}, }, R32: []Range32{ {0x101fd, 0x101fd, 1}, + {0x102e0, 0x102e0, 1}, {0x1d167, 0x1d169, 1}, {0x1d17b, 0x1d182, 1}, {0x1d185, 0x1d18b, 1}, @@ -3735,7 +4092,7 @@ var _Kaithi = &RangeTable{ var _Kannada = &RangeTable{ R16: []Range16{ - {0x0c82, 0x0c83, 1}, + {0x0c81, 0x0c83, 1}, {0x0c85, 0x0c8c, 1}, {0x0c8e, 0x0c90, 1}, {0x0c92, 0x0ca8, 1}, @@ -3769,7 +4126,8 @@ var _Katakana = &RangeTable{ var _Kayah_Li = &RangeTable{ R16: []Range16{ - {0xa900, 0xa92f, 1}, + {0xa900, 0xa92d, 1}, + {0xa92f, 0xa92f, 1}, }, } @@ -3796,6 +4154,22 @@ var _Khmer = &RangeTable{ }, } +var _Khojki = &RangeTable{ + R16: []Range16{}, + R32: []Range32{ + {0x11200, 0x11211, 1}, + {0x11213, 0x1123d, 1}, + }, +} + +var _Khudawadi = &RangeTable{ + R16: []Range16{}, + R32: []Range32{ + {0x112b0, 0x112ea, 1}, + {0x112f0, 0x112f9, 1}, + }, +} + var _Lao = &RangeTable{ R16: []Range16{ {0x0e81, 0x0e82, 1}, @@ -3845,9 +4219,12 @@ var _Latin = &RangeTable{ {0x2c60, 0x2c7f, 1}, {0xa722, 0xa787, 1}, {0xa78b, 0xa78e, 1}, - {0xa790, 0xa793, 1}, - {0xa7a0, 0xa7aa, 1}, - {0xa7f8, 0xa7ff, 1}, + {0xa790, 0xa7ad, 1}, + {0xa7b0, 0xa7b1, 1}, + {0xa7f7, 0xa7ff, 1}, + {0xab30, 0xab5a, 1}, + {0xab5c, 0xab5f, 1}, + {0xab64, 0xab64, 1}, {0xfb00, 0xfb06, 1}, {0xff21, 0xff3a, 1}, {0xff41, 0xff5a, 1}, @@ -3865,7 +4242,7 @@ var _Lepcha = &RangeTable{ var _Limbu = &RangeTable{ R16: []Range16{ - {0x1900, 0x191c, 1}, + {0x1900, 0x191e, 1}, {0x1920, 0x192b, 1}, {0x1930, 0x193b, 1}, {0x1940, 0x1940, 1}, @@ -3873,6 +4250,15 @@ var _Limbu = &RangeTable{ }, } +var _Linear_A = &RangeTable{ + R16: []Range16{}, + R32: []Range32{ + {0x10600, 0x10736, 1}, + {0x10740, 0x10755, 1}, + {0x10760, 0x10767, 1}, + }, +} + var _Linear_B = &RangeTable{ R16: []Range16{}, R32: []Range32{ @@ -3907,9 +4293,16 @@ var _Lydian = &RangeTable{ }, } +var _Mahajani = &RangeTable{ + R16: []Range16{}, + R32: []Range32{ + {0x11150, 0x11176, 1}, + }, +} + var _Malayalam = &RangeTable{ R16: []Range16{ - {0x0d02, 0x0d03, 1}, + {0x0d01, 0x0d03, 1}, {0x0d05, 0x0d0c, 1}, {0x0d0e, 0x0d10, 1}, {0x0d12, 0x0d3a, 1}, @@ -3930,6 +4323,14 @@ var _Mandaic = &RangeTable{ }, } +var _Manichaean = &RangeTable{ + R16: []Range16{}, + R32: []Range32{ + {0x10ac0, 0x10ae6, 1}, + {0x10aeb, 0x10af6, 1}, + }, +} + var _Meetei_Mayek = &RangeTable{ R16: []Range16{ {0xaae0, 0xaaf6, 1}, @@ -3938,6 +4339,14 @@ var _Meetei_Mayek = &RangeTable{ }, } +var _Mende_Kikakui = &RangeTable{ + R16: []Range16{}, + R32: []Range32{ + {0x1e800, 0x1e8c4, 1}, + {0x1e8c7, 0x1e8d6, 1}, + }, +} + var _Meroitic_Cursive = &RangeTable{ R16: []Range16{}, R32: []Range32{ @@ -3962,6 +4371,14 @@ var _Miao = &RangeTable{ }, } +var _Modi = &RangeTable{ + R16: []Range16{}, + R32: []Range32{ + {0x11600, 0x11644, 1}, + {0x11650, 0x11659, 1}, + }, +} + var _Mongolian = &RangeTable{ R16: []Range16{ {0x1800, 0x1801, 1}, @@ -3973,10 +4390,28 @@ var _Mongolian = &RangeTable{ }, } +var _Mro = &RangeTable{ + R16: []Range16{}, + R32: []Range32{ + {0x16a40, 0x16a5e, 1}, + {0x16a60, 0x16a69, 1}, + {0x16a6e, 0x16a6f, 1}, + }, +} + var _Myanmar = &RangeTable{ R16: []Range16{ {0x1000, 0x109f, 1}, - {0xaa60, 0xaa7b, 1}, + {0xa9e0, 0xa9fe, 1}, + {0xaa60, 0xaa7f, 1}, + }, +} + +var _Nabataean = &RangeTable{ + R16: []Range16{}, + R32: []Range32{ + {0x10880, 0x1089e, 1}, + {0x108a7, 0x108af, 1}, }, } @@ -4010,8 +4445,21 @@ var _Ol_Chiki = &RangeTable{ var _Old_Italic = &RangeTable{ R16: []Range16{}, R32: []Range32{ - {0x10300, 0x1031e, 1}, - {0x10320, 0x10323, 1}, + {0x10300, 0x10323, 1}, + }, +} + +var _Old_North_Arabian = &RangeTable{ + R16: []Range16{}, + R32: []Range32{ + {0x10a80, 0x10a9f, 1}, + }, +} + +var _Old_Permic = &RangeTable{ + R16: []Range16{}, + R32: []Range32{ + {0x10350, 0x1037a, 1}, }, } @@ -4064,6 +4512,31 @@ var _Osmanya = &RangeTable{ }, } +var _Pahawh_Hmong = &RangeTable{ + R16: []Range16{}, + R32: []Range32{ + {0x16b00, 0x16b45, 1}, + {0x16b50, 0x16b59, 1}, + {0x16b5b, 0x16b61, 1}, + {0x16b63, 0x16b77, 1}, + {0x16b7d, 0x16b8f, 1}, + }, +} + +var _Palmyrene = &RangeTable{ + R16: []Range16{}, + R32: []Range32{ + {0x10860, 0x1087f, 1}, + }, +} + +var _Pau_Cin_Hau = &RangeTable{ + R16: []Range16{}, + R32: []Range32{ + {0x11ac0, 0x11af8, 1}, + }, +} + var _Phags_Pa = &RangeTable{ R16: []Range16{ {0xa840, 0xa877, 1}, @@ -4078,6 +4551,15 @@ var _Phoenician = &RangeTable{ }, } +var _Psalter_Pahlavi = &RangeTable{ + R16: []Range16{}, + R32: []Range32{ + {0x10b80, 0x10b91, 1}, + {0x10b99, 0x10b9c, 1}, + {0x10ba9, 0x10baf, 1}, + }, +} + var _Rejang = &RangeTable{ R16: []Range16{ {0xa930, 0xa953, 1}, @@ -4088,7 +4570,7 @@ var _Rejang = &RangeTable{ var _Runic = &RangeTable{ R16: []Range16{ {0x16a0, 0x16ea, 1}, - {0x16ee, 0x16f0, 1}, + {0x16ee, 0x16f8, 1}, }, } @@ -4110,7 +4592,8 @@ var _Sharada = &RangeTable{ R16: []Range16{}, R32: []Range32{ {0x11180, 0x111c8, 1}, - {0x111d0, 0x111d9, 1}, + {0x111cd, 0x111cd, 1}, + {0x111d0, 0x111da, 1}, }, } @@ -4121,6 +4604,14 @@ var _Shavian = &RangeTable{ }, } +var _Siddham = &RangeTable{ + R16: []Range16{}, + R32: []Range32{ + {0x11580, 0x115b5, 1}, + {0x115b8, 0x115c9, 1}, + }, +} + var _Sinhala = &RangeTable{ R16: []Range16{ {0x0d82, 0x0d83, 1}, @@ -4133,8 +4624,12 @@ var _Sinhala = &RangeTable{ {0x0dcf, 0x0dd4, 1}, {0x0dd6, 0x0dd6, 1}, {0x0dd8, 0x0ddf, 1}, + {0x0de6, 0x0def, 1}, {0x0df2, 0x0df4, 1}, }, + R32: []Range32{ + {0x111e1, 0x111f4, 1}, + }, } var _Sora_Sompeng = &RangeTable{ @@ -4236,12 +4731,11 @@ var _Tamil = &RangeTable{ var _Telugu = &RangeTable{ R16: []Range16{ - {0x0c01, 0x0c03, 1}, + {0x0c00, 0x0c03, 1}, {0x0c05, 0x0c0c, 1}, {0x0c0e, 0x0c10, 1}, {0x0c12, 0x0c28, 1}, - {0x0c2a, 0x0c33, 1}, - {0x0c35, 0x0c39, 1}, + {0x0c2a, 0x0c39, 1}, {0x0c3d, 0x0c44, 1}, {0x0c46, 0x0c48, 1}, {0x0c4a, 0x0c4d, 1}, @@ -4286,6 +4780,14 @@ var _Tifinagh = &RangeTable{ }, } +var _Tirhuta = &RangeTable{ + R16: []Range16{}, + R32: []Range32{ + {0x11480, 0x114c7, 1}, + {0x114d0, 0x114d9, 1}, + }, +} + var _Ugaritic = &RangeTable{ R16: []Range16{}, R32: []Range32{ @@ -4300,6 +4802,14 @@ var _Vai = &RangeTable{ }, } +var _Warang_Citi = &RangeTable{ + R16: []Range16{}, + R32: []Range32{ + {0x118a0, 0x118f2, 1}, + {0x118ff, 0x118ff, 1}, + }, +} + var _Yi = &RangeTable{ R16: []Range16{ {0xa000, 0xa48c, 1}, @@ -4314,6 +4824,7 @@ var ( Avestan = _Avestan // Avestan is the set of Unicode characters in script Avestan. Balinese = _Balinese // Balinese is the set of Unicode characters in script Balinese. Bamum = _Bamum // Bamum is the set of Unicode characters in script Bamum. + Bassa_Vah = _Bassa_Vah // Bassa_Vah is the set of Unicode characters in script Bassa_Vah. Batak = _Batak // Batak is the set of Unicode characters in script Batak. Bengali = _Bengali // Bengali is the set of Unicode characters in script Bengali. Bopomofo = _Bopomofo // Bopomofo is the set of Unicode characters in script Bopomofo. @@ -4323,6 +4834,7 @@ var ( Buhid = _Buhid // Buhid is the set of Unicode characters in script Buhid. Canadian_Aboriginal = _Canadian_Aboriginal // Canadian_Aboriginal is the set of Unicode characters in script Canadian_Aboriginal. Carian = _Carian // Carian is the set of Unicode characters in script Carian. + Caucasian_Albanian = _Caucasian_Albanian // Caucasian_Albanian is the set of Unicode characters in script Caucasian_Albanian. Chakma = _Chakma // Chakma is the set of Unicode characters in script Chakma. Cham = _Cham // Cham is the set of Unicode characters in script Cham. Cherokee = _Cherokee // Cherokee is the set of Unicode characters in script Cherokee. @@ -4333,11 +4845,14 @@ var ( Cyrillic = _Cyrillic // Cyrillic is the set of Unicode characters in script Cyrillic. Deseret = _Deseret // Deseret is the set of Unicode characters in script Deseret. Devanagari = _Devanagari // Devanagari is the set of Unicode characters in script Devanagari. + Duployan = _Duployan // Duployan is the set of Unicode characters in script Duployan. Egyptian_Hieroglyphs = _Egyptian_Hieroglyphs // Egyptian_Hieroglyphs is the set of Unicode characters in script Egyptian_Hieroglyphs. + Elbasan = _Elbasan // Elbasan is the set of Unicode characters in script Elbasan. Ethiopic = _Ethiopic // Ethiopic is the set of Unicode characters in script Ethiopic. Georgian = _Georgian // Georgian is the set of Unicode characters in script Georgian. Glagolitic = _Glagolitic // Glagolitic is the set of Unicode characters in script Glagolitic. Gothic = _Gothic // Gothic is the set of Unicode characters in script Gothic. + Grantha = _Grantha // Grantha is the set of Unicode characters in script Grantha. Greek = _Greek // Greek is the set of Unicode characters in script Greek. Gujarati = _Gujarati // Gujarati is the set of Unicode characters in script Gujarati. Gurmukhi = _Gurmukhi // Gurmukhi is the set of Unicode characters in script Gurmukhi. @@ -4357,40 +4872,56 @@ var ( Kayah_Li = _Kayah_Li // Kayah_Li is the set of Unicode characters in script Kayah_Li. Kharoshthi = _Kharoshthi // Kharoshthi is the set of Unicode characters in script Kharoshthi. Khmer = _Khmer // Khmer is the set of Unicode characters in script Khmer. + Khojki = _Khojki // Khojki is the set of Unicode characters in script Khojki. + Khudawadi = _Khudawadi // Khudawadi is the set of Unicode characters in script Khudawadi. Lao = _Lao // Lao is the set of Unicode characters in script Lao. Latin = _Latin // Latin is the set of Unicode characters in script Latin. Lepcha = _Lepcha // Lepcha is the set of Unicode characters in script Lepcha. Limbu = _Limbu // Limbu is the set of Unicode characters in script Limbu. + Linear_A = _Linear_A // Linear_A is the set of Unicode characters in script Linear_A. Linear_B = _Linear_B // Linear_B is the set of Unicode characters in script Linear_B. Lisu = _Lisu // Lisu is the set of Unicode characters in script Lisu. Lycian = _Lycian // Lycian is the set of Unicode characters in script Lycian. Lydian = _Lydian // Lydian is the set of Unicode characters in script Lydian. + Mahajani = _Mahajani // Mahajani is the set of Unicode characters in script Mahajani. Malayalam = _Malayalam // Malayalam is the set of Unicode characters in script Malayalam. Mandaic = _Mandaic // Mandaic is the set of Unicode characters in script Mandaic. + Manichaean = _Manichaean // Manichaean is the set of Unicode characters in script Manichaean. Meetei_Mayek = _Meetei_Mayek // Meetei_Mayek is the set of Unicode characters in script Meetei_Mayek. + Mende_Kikakui = _Mende_Kikakui // Mende_Kikakui is the set of Unicode characters in script Mende_Kikakui. Meroitic_Cursive = _Meroitic_Cursive // Meroitic_Cursive is the set of Unicode characters in script Meroitic_Cursive. Meroitic_Hieroglyphs = _Meroitic_Hieroglyphs // Meroitic_Hieroglyphs is the set of Unicode characters in script Meroitic_Hieroglyphs. Miao = _Miao // Miao is the set of Unicode characters in script Miao. + Modi = _Modi // Modi is the set of Unicode characters in script Modi. Mongolian = _Mongolian // Mongolian is the set of Unicode characters in script Mongolian. + Mro = _Mro // Mro is the set of Unicode characters in script Mro. Myanmar = _Myanmar // Myanmar is the set of Unicode characters in script Myanmar. + Nabataean = _Nabataean // Nabataean is the set of Unicode characters in script Nabataean. New_Tai_Lue = _New_Tai_Lue // New_Tai_Lue is the set of Unicode characters in script New_Tai_Lue. Nko = _Nko // Nko is the set of Unicode characters in script Nko. Ogham = _Ogham // Ogham is the set of Unicode characters in script Ogham. Ol_Chiki = _Ol_Chiki // Ol_Chiki is the set of Unicode characters in script Ol_Chiki. Old_Italic = _Old_Italic // Old_Italic is the set of Unicode characters in script Old_Italic. + Old_North_Arabian = _Old_North_Arabian // Old_North_Arabian is the set of Unicode characters in script Old_North_Arabian. + Old_Permic = _Old_Permic // Old_Permic is the set of Unicode characters in script Old_Permic. Old_Persian = _Old_Persian // Old_Persian is the set of Unicode characters in script Old_Persian. Old_South_Arabian = _Old_South_Arabian // Old_South_Arabian is the set of Unicode characters in script Old_South_Arabian. Old_Turkic = _Old_Turkic // Old_Turkic is the set of Unicode characters in script Old_Turkic. Oriya = _Oriya // Oriya is the set of Unicode characters in script Oriya. Osmanya = _Osmanya // Osmanya is the set of Unicode characters in script Osmanya. + Pahawh_Hmong = _Pahawh_Hmong // Pahawh_Hmong is the set of Unicode characters in script Pahawh_Hmong. + Palmyrene = _Palmyrene // Palmyrene is the set of Unicode characters in script Palmyrene. + Pau_Cin_Hau = _Pau_Cin_Hau // Pau_Cin_Hau is the set of Unicode characters in script Pau_Cin_Hau. Phags_Pa = _Phags_Pa // Phags_Pa is the set of Unicode characters in script Phags_Pa. Phoenician = _Phoenician // Phoenician is the set of Unicode characters in script Phoenician. + Psalter_Pahlavi = _Psalter_Pahlavi // Psalter_Pahlavi is the set of Unicode characters in script Psalter_Pahlavi. Rejang = _Rejang // Rejang is the set of Unicode characters in script Rejang. Runic = _Runic // Runic is the set of Unicode characters in script Runic. Samaritan = _Samaritan // Samaritan is the set of Unicode characters in script Samaritan. Saurashtra = _Saurashtra // Saurashtra is the set of Unicode characters in script Saurashtra. Sharada = _Sharada // Sharada is the set of Unicode characters in script Sharada. Shavian = _Shavian // Shavian is the set of Unicode characters in script Shavian. + Siddham = _Siddham // Siddham is the set of Unicode characters in script Siddham. Sinhala = _Sinhala // Sinhala is the set of Unicode characters in script Sinhala. Sora_Sompeng = _Sora_Sompeng // Sora_Sompeng is the set of Unicode characters in script Sora_Sompeng. Sundanese = _Sundanese // Sundanese is the set of Unicode characters in script Sundanese. @@ -4408,13 +4939,15 @@ var ( Thai = _Thai // Thai is the set of Unicode characters in script Thai. Tibetan = _Tibetan // Tibetan is the set of Unicode characters in script Tibetan. Tifinagh = _Tifinagh // Tifinagh is the set of Unicode characters in script Tifinagh. + Tirhuta = _Tirhuta // Tirhuta is the set of Unicode characters in script Tirhuta. Ugaritic = _Ugaritic // Ugaritic is the set of Unicode characters in script Ugaritic. Vai = _Vai // Vai is the set of Unicode characters in script Vai. + Warang_Citi = _Warang_Citi // Warang_Citi is the set of Unicode characters in script Warang_Citi. Yi = _Yi // Yi is the set of Unicode characters in script Yi. ) // Generated by running -// maketables --props=all --url=http://www.unicode.org/Public/6.3.0/ucd/ +// maketables --props=all --url=http://www.unicode.org/Public/7.0.0/ucd/ // DO NOT EDIT // Properties is the set of Unicode property tables. @@ -4486,6 +5019,7 @@ var _Dash = &RangeTable{ {0x2e17, 0x2e17, 1}, {0x2e1a, 0x2e1a, 1}, {0x2e3a, 0x2e3b, 1}, + {0x2e40, 0x2e40, 1}, {0x301c, 0x301c, 1}, {0x3030, 0x3030, 1}, {0x30a0, 0x30a0, 1}, @@ -4583,6 +5117,7 @@ var _Diacritic = &RangeTable{ {0x1939, 0x193b, 1}, {0x1a75, 0x1a7c, 1}, {0x1a7f, 0x1a7f, 1}, + {0x1ab0, 0x1abd, 1}, {0x1b34, 0x1b34, 1}, {0x1b44, 0x1b44, 1}, {0x1b6b, 0x1b73, 1}, @@ -4592,8 +5127,10 @@ var _Diacritic = &RangeTable{ {0x1cd0, 0x1ce8, 1}, {0x1ced, 0x1ced, 1}, {0x1cf4, 0x1cf4, 1}, + {0x1cf8, 0x1cf9, 1}, {0x1d2c, 0x1d6a, 1}, {0x1dc4, 0x1dcf, 1}, + {0x1df5, 0x1df5, 1}, {0x1dfd, 0x1dff, 1}, {0x1fbd, 0x1fbd, 1}, {0x1fbf, 0x1fc1, 1}, @@ -4609,6 +5146,7 @@ var _Diacritic = &RangeTable{ {0xa66f, 0xa66f, 1}, {0xa67c, 0xa67d, 1}, {0xa67f, 0xa67f, 1}, + {0xa69c, 0xa69d, 1}, {0xa6f0, 0xa6f1, 1}, {0xa717, 0xa721, 1}, {0xa788, 0xa788, 1}, @@ -4619,12 +5157,14 @@ var _Diacritic = &RangeTable{ {0xa953, 0xa953, 1}, {0xa9b3, 0xa9b3, 1}, {0xa9c0, 0xa9c0, 1}, - {0xaa7b, 0xaa7b, 1}, + {0xa9e5, 0xa9e5, 1}, + {0xaa7b, 0xaa7d, 1}, {0xaabf, 0xaac2, 1}, {0xaaf6, 0xaaf6, 1}, + {0xab5b, 0xab5f, 1}, {0xabec, 0xabed, 1}, {0xfb1e, 0xfb1e, 1}, - {0xfe20, 0xfe26, 1}, + {0xfe20, 0xfe2d, 1}, {0xff3e, 0xff3e, 1}, {0xff40, 0xff40, 1}, {0xff70, 0xff70, 1}, @@ -4632,16 +5172,30 @@ var _Diacritic = &RangeTable{ {0xffe3, 0xffe3, 1}, }, R32: []Range32{ + {0x102e0, 0x102e0, 1}, + {0x10ae5, 0x10ae6, 1}, {0x110b9, 0x110ba, 1}, {0x11133, 0x11134, 1}, + {0x11173, 0x11173, 1}, {0x111c0, 0x111c0, 1}, + {0x11235, 0x11236, 1}, + {0x112e9, 0x112ea, 1}, + {0x1133c, 0x1133c, 1}, + {0x1134d, 0x1134d, 1}, + {0x11366, 0x1136c, 1}, + {0x11370, 0x11374, 1}, + {0x114c2, 0x114c3, 1}, + {0x115bf, 0x115c0, 1}, + {0x1163f, 0x1163f, 1}, {0x116b6, 0x116b7, 1}, + {0x16af0, 0x16af4, 1}, {0x16f8f, 0x16f9f, 1}, {0x1d167, 0x1d169, 1}, {0x1d16d, 0x1d172, 1}, {0x1d17b, 0x1d182, 1}, {0x1d185, 0x1d18b, 1}, {0x1d1aa, 0x1d1ad, 1}, + {0x1e8d0, 0x1e8d6, 1}, }, LatinOffset: 6, } @@ -4666,11 +5220,17 @@ var _Extender = &RangeTable{ {0xa015, 0xa015, 1}, {0xa60c, 0xa60c, 1}, {0xa9cf, 0xa9cf, 1}, + {0xa9e6, 0xa9e6, 1}, {0xaa70, 0xaa70, 1}, {0xaadd, 0xaadd, 1}, {0xaaf3, 0xaaf4, 1}, {0xff70, 0xff70, 1}, }, + R32: []Range32{ + {0x1135d, 0x1135d, 1}, + {0x115c6, 0x115c8, 1}, + {0x16b42, 0x16b43, 1}, + }, LatinOffset: 1, } @@ -4798,8 +5358,7 @@ var _Other_Alphabetic = &RangeTable{ {0x0825, 0x0827, 1}, {0x0829, 0x082c, 1}, {0x08e4, 0x08e9, 1}, - {0x08f0, 0x08fe, 1}, - {0x0900, 0x0903, 1}, + {0x08f0, 0x0903, 1}, {0x093a, 0x093b, 1}, {0x093e, 0x094c, 1}, {0x094e, 0x094f, 1}, @@ -4834,19 +5393,19 @@ var _Other_Alphabetic = &RangeTable{ {0x0bc6, 0x0bc8, 1}, {0x0bca, 0x0bcc, 1}, {0x0bd7, 0x0bd7, 1}, - {0x0c01, 0x0c03, 1}, + {0x0c00, 0x0c03, 1}, {0x0c3e, 0x0c44, 1}, {0x0c46, 0x0c48, 1}, {0x0c4a, 0x0c4c, 1}, {0x0c55, 0x0c56, 1}, {0x0c62, 0x0c63, 1}, - {0x0c82, 0x0c83, 1}, + {0x0c81, 0x0c83, 1}, {0x0cbe, 0x0cc4, 1}, {0x0cc6, 0x0cc8, 1}, {0x0cca, 0x0ccc, 1}, {0x0cd5, 0x0cd6, 1}, {0x0ce2, 0x0ce3, 1}, - {0x0d02, 0x0d03, 1}, + {0x0d01, 0x0d03, 1}, {0x0d3e, 0x0d44, 1}, {0x0d46, 0x0d48, 1}, {0x0d4a, 0x0d4c, 1}, @@ -4899,6 +5458,7 @@ var _Other_Alphabetic = &RangeTable{ {0x1be7, 0x1bf1, 1}, {0x1c24, 0x1c35, 1}, {0x1cf2, 0x1cf3, 1}, + {0x1de7, 0x1df4, 1}, {0x24b6, 0x24e9, 1}, {0x2de0, 0x2dff, 1}, {0xa674, 0xa67b, 1}, @@ -4923,6 +5483,7 @@ var _Other_Alphabetic = &RangeTable{ {0xfb1e, 0xfb1e, 1}, }, R32: []Range32{ + {0x10376, 0x1037a, 1}, {0x10a01, 0x10a03, 1}, {0x10a05, 0x10a06, 1}, {0x10a0c, 0x10a0f, 1}, @@ -4934,8 +5495,27 @@ var _Other_Alphabetic = &RangeTable{ {0x11127, 0x11132, 1}, {0x11180, 0x11182, 1}, {0x111b3, 0x111bf, 1}, + {0x1122c, 0x11234, 1}, + {0x11237, 0x11237, 1}, + {0x112df, 0x112e8, 1}, + {0x11301, 0x11303, 1}, + {0x1133e, 0x11344, 1}, + {0x11347, 0x11348, 1}, + {0x1134b, 0x1134c, 1}, + {0x11357, 0x11357, 1}, + {0x11362, 0x11363, 1}, + {0x114b0, 0x114c1, 1}, + {0x115af, 0x115b5, 1}, + {0x115b8, 0x115be, 1}, + {0x11630, 0x1163e, 1}, + {0x11640, 0x11640, 1}, {0x116ab, 0x116b5, 1}, + {0x16b30, 0x16b36, 1}, {0x16f51, 0x16f7e, 1}, + {0x1bc9e, 0x1bc9e, 1}, + {0x1f130, 0x1f149, 1}, + {0x1f150, 0x1f169, 1}, + {0x1f170, 0x1f189, 1}, }, } @@ -4976,6 +5556,11 @@ var _Other_Grapheme_Extend = &RangeTable{ {0xff9e, 0xff9f, 1}, }, R32: []Range32{ + {0x1133e, 0x1133e, 1}, + {0x11357, 0x11357, 1}, + {0x114b0, 0x114b0, 1}, + {0x114bd, 0x114bd, 1}, + {0x115af, 0x115af, 1}, {0x1d165, 0x1d165, 1}, {0x1d16e, 0x1d172, 1}, }, @@ -5017,8 +5602,10 @@ var _Other_Lowercase = &RangeTable{ {0x2170, 0x217f, 1}, {0x24d0, 0x24e9, 1}, {0x2c7c, 0x2c7d, 1}, + {0xa69c, 0xa69d, 1}, {0xa770, 0xa770, 1}, {0xa7f8, 0xa7f9, 1}, + {0xab5c, 0xab5f, 1}, }, LatinOffset: 2, } @@ -5170,6 +5757,11 @@ var _Other_Uppercase = &RangeTable{ {0x2160, 0x216f, 1}, {0x24b6, 0x24cf, 1}, }, + R32: []Range32{ + {0x1f130, 0x1f149, 1}, + {0x1f150, 0x1f169, 1}, + {0x1f170, 0x1f189, 1}, + }, } var _Pattern_Syntax = &RangeTable{ @@ -5225,6 +5817,7 @@ var _Quotation_Mark = &RangeTable{ {0x00bb, 0x00bb, 1}, {0x2018, 0x201f, 1}, {0x2039, 0x203a, 1}, + {0x2e42, 0x2e42, 1}, {0x300c, 0x300f, 1}, {0x301d, 0x301f, 1}, {0xfe41, 0xfe44, 1}, @@ -5248,8 +5841,6 @@ var _STerm = &RangeTable{ {0x0021, 0x0021, 1}, {0x002e, 0x002e, 1}, {0x003f, 0x003f, 1}, - {0x055c, 0x055c, 1}, - {0x055e, 0x055e, 1}, {0x0589, 0x0589, 1}, {0x061f, 0x061f, 1}, {0x06d4, 0x06d4, 1}, @@ -5272,6 +5863,7 @@ var _STerm = &RangeTable{ {0x203c, 0x203d, 1}, {0x2047, 0x2049, 1}, {0x2e2e, 0x2e2e, 1}, + {0x2e3c, 0x2e3c, 1}, {0x3002, 0x3002, 1}, {0xa4ff, 0xa4ff, 1}, {0xa60e, 0xa60f, 1}, @@ -5297,6 +5889,17 @@ var _STerm = &RangeTable{ {0x110be, 0x110c1, 1}, {0x11141, 0x11143, 1}, {0x111c5, 0x111c6, 1}, + {0x111cd, 0x111cd, 1}, + {0x11238, 0x11239, 1}, + {0x1123b, 0x1123c, 1}, + {0x115c2, 0x115c3, 1}, + {0x115c9, 0x115c9, 1}, + {0x11641, 0x11642, 1}, + {0x16a6e, 0x16a6f, 1}, + {0x16af5, 0x16af5, 1}, + {0x16b37, 0x16b38, 1}, + {0x16b44, 0x16b44, 1}, + {0x1bc9f, 0x1bc9f, 1}, }, LatinOffset: 3, } @@ -5368,6 +5971,7 @@ var _Terminal_Punctuation = &RangeTable{ {0x1361, 0x1368, 1}, {0x166d, 0x166e, 1}, {0x16eb, 0x16ed, 1}, + {0x1735, 0x1736, 1}, {0x17d4, 0x17d6, 1}, {0x17da, 0x17da, 1}, {0x1802, 0x1805, 1}, @@ -5381,6 +5985,8 @@ var _Terminal_Punctuation = &RangeTable{ {0x203c, 0x203d, 1}, {0x2047, 0x2049, 1}, {0x2e2e, 0x2e2e, 1}, + {0x2e3c, 0x2e3c, 1}, + {0x2e41, 0x2e41, 1}, {0x3001, 0x3002, 1}, {0xa4fe, 0xa4ff, 1}, {0xa60d, 0xa60f, 1}, @@ -5408,12 +6014,25 @@ var _Terminal_Punctuation = &RangeTable{ {0x103d0, 0x103d0, 1}, {0x10857, 0x10857, 1}, {0x1091f, 0x1091f, 1}, + {0x10a56, 0x10a57, 1}, + {0x10af0, 0x10af5, 1}, {0x10b3a, 0x10b3f, 1}, + {0x10b99, 0x10b9c, 1}, {0x11047, 0x1104d, 1}, {0x110be, 0x110c1, 1}, {0x11141, 0x11143, 1}, {0x111c5, 0x111c6, 1}, - {0x12470, 0x12473, 1}, + {0x111cd, 0x111cd, 1}, + {0x11238, 0x1123c, 1}, + {0x115c2, 0x115c5, 1}, + {0x115c9, 0x115c9, 1}, + {0x11641, 0x11642, 1}, + {0x12470, 0x12474, 1}, + {0x16a6e, 0x16a6f, 1}, + {0x16af5, 0x16af5, 1}, + {0x16b37, 0x16b39, 1}, + {0x16b44, 0x16b44, 1}, + {0x1bc9f, 0x1bc9f, 1}, }, LatinOffset: 5, } @@ -5500,7 +6119,7 @@ var ( ) // Generated by running -// maketables --data=http://www.unicode.org/Public/6.3.0/ucd/UnicodeData.txt --casefolding=http://www.unicode.org/Public/6.3.0/ucd/CaseFolding.txt +// maketables --data=http://www.unicode.org/Public/7.0.0/ucd/UnicodeData.txt --casefolding=http://www.unicode.org/Public/7.0.0/ucd/CaseFolding.txt // DO NOT EDIT // CaseRanges is the table describing case mappings for all letters with @@ -5598,13 +6217,16 @@ var _CaseRanges = []CaseRange{ {0x0256, 0x0257, d{-205, 0, -205}}, {0x0259, 0x0259, d{-202, 0, -202}}, {0x025B, 0x025B, d{-203, 0, -203}}, + {0x025C, 0x025C, d{42319, 0, 42319}}, {0x0260, 0x0260, d{-205, 0, -205}}, + {0x0261, 0x0261, d{42315, 0, 42315}}, {0x0263, 0x0263, d{-207, 0, -207}}, {0x0265, 0x0265, d{42280, 0, 42280}}, {0x0266, 0x0266, d{42308, 0, 42308}}, {0x0268, 0x0268, d{-209, 0, -209}}, {0x0269, 0x0269, d{-211, 0, -211}}, {0x026B, 0x026B, d{10743, 0, 10743}}, + {0x026C, 0x026C, d{42305, 0, 42305}}, {0x026F, 0x026F, d{-211, 0, -211}}, {0x0271, 0x0271, d{10749, 0, 10749}}, {0x0272, 0x0272, d{-213, 0, -213}}, @@ -5612,15 +6234,18 @@ var _CaseRanges = []CaseRange{ {0x027D, 0x027D, d{10727, 0, 10727}}, {0x0280, 0x0280, d{-218, 0, -218}}, {0x0283, 0x0283, d{-218, 0, -218}}, + {0x0287, 0x0287, d{42282, 0, 42282}}, {0x0288, 0x0288, d{-218, 0, -218}}, {0x0289, 0x0289, d{-69, 0, -69}}, {0x028A, 0x028B, d{-217, 0, -217}}, {0x028C, 0x028C, d{-71, 0, -71}}, {0x0292, 0x0292, d{-219, 0, -219}}, + {0x029E, 0x029E, d{42258, 0, 42258}}, {0x0345, 0x0345, d{84, 0, 84}}, {0x0370, 0x0373, d{UpperLower, UpperLower, UpperLower}}, {0x0376, 0x0377, d{UpperLower, UpperLower, UpperLower}}, {0x037B, 0x037D, d{130, 0, 130}}, + {0x037F, 0x037F, d{0, 116, 0}}, {0x0386, 0x0386, d{0, 38, 0}}, {0x0388, 0x038A, d{0, 37, 0}}, {0x038C, 0x038C, d{0, 64, 0}}, @@ -5644,6 +6269,7 @@ var _CaseRanges = []CaseRange{ {0x03F0, 0x03F0, d{-86, 0, -86}}, {0x03F1, 0x03F1, d{-80, 0, -80}}, {0x03F2, 0x03F2, d{7, 0, 7}}, + {0x03F3, 0x03F3, d{-116, 0, -116}}, {0x03F4, 0x03F4, d{0, -60, 0}}, {0x03F5, 0x03F5, d{-96, 0, -96}}, {0x03F7, 0x03F8, d{UpperLower, UpperLower, UpperLower}}, @@ -5659,7 +6285,7 @@ var _CaseRanges = []CaseRange{ {0x04C0, 0x04C0, d{0, 15, 0}}, {0x04C1, 0x04CE, d{UpperLower, UpperLower, UpperLower}}, {0x04CF, 0x04CF, d{-15, 0, -15}}, - {0x04D0, 0x0527, d{UpperLower, UpperLower, UpperLower}}, + {0x04D0, 0x052F, d{UpperLower, UpperLower, UpperLower}}, {0x0531, 0x0556, d{0, 48, 0}}, {0x0561, 0x0586, d{-48, 0, -48}}, {0x10A0, 0x10C5, d{0, 7264, 0}}, @@ -5757,7 +6383,7 @@ var _CaseRanges = []CaseRange{ {0x2D27, 0x2D27, d{-7264, 0, -7264}}, {0x2D2D, 0x2D2D, d{-7264, 0, -7264}}, {0xA640, 0xA66D, d{UpperLower, UpperLower, UpperLower}}, - {0xA680, 0xA697, d{UpperLower, UpperLower, UpperLower}}, + {0xA680, 0xA69B, d{UpperLower, UpperLower, UpperLower}}, {0xA722, 0xA72F, d{UpperLower, UpperLower, UpperLower}}, {0xA732, 0xA76F, d{UpperLower, UpperLower, UpperLower}}, {0xA779, 0xA77C, d{UpperLower, UpperLower, UpperLower}}, @@ -5766,12 +6392,19 @@ var _CaseRanges = []CaseRange{ {0xA78B, 0xA78C, d{UpperLower, UpperLower, UpperLower}}, {0xA78D, 0xA78D, d{0, -42280, 0}}, {0xA790, 0xA793, d{UpperLower, UpperLower, UpperLower}}, - {0xA7A0, 0xA7A9, d{UpperLower, UpperLower, UpperLower}}, + {0xA796, 0xA7A9, d{UpperLower, UpperLower, UpperLower}}, {0xA7AA, 0xA7AA, d{0, -42308, 0}}, + {0xA7AB, 0xA7AB, d{0, -42319, 0}}, + {0xA7AC, 0xA7AC, d{0, -42315, 0}}, + {0xA7AD, 0xA7AD, d{0, -42305, 0}}, + {0xA7B0, 0xA7B0, d{0, -42258, 0}}, + {0xA7B1, 0xA7B1, d{0, -42282, 0}}, {0xFF21, 0xFF3A, d{0, 32, 0}}, {0xFF41, 0xFF5A, d{-32, 0, -32}}, {0x10400, 0x10427, d{0, 40, 0}}, {0x10428, 0x1044F, d{-40, 0, -40}}, + {0x118A0, 0x118BF, d{0, 32, 0}}, + {0x118C0, 0x118DF, d{-32, 0, -32}}, } var properties = [MaxLatin1 + 1]uint8{ 0x00: pC, // '\x00' @@ -6181,8 +6814,8 @@ var foldLl = &RangeTable{ {0x0248, 0x024e, 2}, {0x0345, 0x0370, 43}, {0x0372, 0x0376, 4}, - {0x0386, 0x0388, 2}, - {0x0389, 0x038a, 1}, + {0x037f, 0x0386, 7}, + {0x0388, 0x038a, 1}, {0x038c, 0x038e, 2}, {0x038f, 0x0391, 2}, {0x0392, 0x03a1, 1}, @@ -6195,7 +6828,7 @@ var foldLl = &RangeTable{ {0x0460, 0x0480, 2}, {0x048a, 0x04c0, 2}, {0x04c1, 0x04cd, 2}, - {0x04d0, 0x0526, 2}, + {0x04d0, 0x052e, 2}, {0x0531, 0x0556, 1}, {0x10a0, 0x10c5, 1}, {0x10c7, 0x10cd, 6}, @@ -6230,18 +6863,21 @@ var foldLl = &RangeTable{ {0x2ceb, 0x2ced, 2}, {0x2cf2, 0xa640, 31054}, {0xa642, 0xa66c, 2}, - {0xa680, 0xa696, 2}, + {0xa680, 0xa69a, 2}, {0xa722, 0xa72e, 2}, {0xa732, 0xa76e, 2}, {0xa779, 0xa77d, 2}, {0xa77e, 0xa786, 2}, {0xa78b, 0xa78d, 2}, {0xa790, 0xa792, 2}, - {0xa7a0, 0xa7aa, 2}, + {0xa796, 0xa7aa, 2}, + {0xa7ab, 0xa7ad, 1}, + {0xa7b0, 0xa7b1, 1}, {0xff21, 0xff3a, 1}, }, R32: []Range32{ {0x10400, 0x10427, 1}, + {0x118a0, 0x118bf, 1}, }, LatinOffset: 3, } @@ -6297,30 +6933,31 @@ var foldLu = &RangeTable{ {0x0250, 0x0254, 1}, {0x0256, 0x0257, 1}, {0x0259, 0x025b, 2}, - {0x0260, 0x0263, 3}, - {0x0265, 0x0266, 1}, - {0x0268, 0x0269, 1}, - {0x026b, 0x026f, 4}, + {0x025c, 0x0260, 4}, + {0x0261, 0x0265, 2}, + {0x0266, 0x0268, 2}, + {0x0269, 0x026b, 2}, + {0x026c, 0x026f, 3}, {0x0271, 0x0272, 1}, {0x0275, 0x027d, 8}, {0x0280, 0x0283, 3}, - {0x0288, 0x028c, 1}, - {0x0292, 0x0345, 179}, - {0x0371, 0x0373, 2}, - {0x0377, 0x037b, 4}, + {0x0287, 0x028c, 1}, + {0x0292, 0x029e, 12}, + {0x0345, 0x0371, 44}, + {0x0373, 0x037b, 4}, {0x037c, 0x037d, 1}, {0x03ac, 0x03af, 1}, {0x03b1, 0x03ce, 1}, {0x03d0, 0x03d1, 1}, {0x03d5, 0x03d7, 1}, {0x03d9, 0x03ef, 2}, - {0x03f0, 0x03f2, 1}, + {0x03f0, 0x03f3, 1}, {0x03f5, 0x03fb, 3}, {0x0430, 0x045f, 1}, {0x0461, 0x0481, 2}, {0x048b, 0x04bf, 2}, {0x04c2, 0x04ce, 2}, - {0x04cf, 0x0527, 2}, + {0x04cf, 0x052f, 2}, {0x0561, 0x0586, 1}, {0x1d79, 0x1d7d, 4}, {0x1e01, 0x1e95, 2}, @@ -6349,18 +6986,19 @@ var foldLu = &RangeTable{ {0x2d01, 0x2d25, 1}, {0x2d27, 0x2d2d, 6}, {0xa641, 0xa66d, 2}, - {0xa681, 0xa697, 2}, + {0xa681, 0xa69b, 2}, {0xa723, 0xa72f, 2}, {0xa733, 0xa76f, 2}, {0xa77a, 0xa77c, 2}, {0xa77f, 0xa787, 2}, {0xa78c, 0xa791, 5}, - {0xa793, 0xa7a1, 14}, - {0xa7a3, 0xa7a9, 2}, + {0xa793, 0xa797, 4}, + {0xa799, 0xa7a9, 2}, {0xff41, 0xff5a, 1}, }, R32: []Range32{ {0x10428, 0x1044f, 1}, + {0x118c0, 0x118df, 1}, }, LatinOffset: 4, } @@ -6385,7 +7023,7 @@ var foldMn = &RangeTable{ // If there is no entry for a script name, there are no such points. var FoldScript = map[string]*RangeTable{} -// Range entries: 3471 16-bit, 832 32-bit, 4303 total. -// Range bytes: 20826 16-bit, 9984 32-bit, 30810 total. +// Range entries: 3532 16-bit, 1204 32-bit, 4736 total. +// Range bytes: 21192 16-bit, 14448 32-bit, 35640 total. // Fold orbit bytes: 63 pairs, 252 bytes diff --git a/libgo/go/unicode/utf8/utf8.go b/libgo/go/unicode/utf8/utf8.go index 0dc859a041b..9ac37184d69 100644 --- a/libgo/go/unicode/utf8/utf8.go +++ b/libgo/go/unicode/utf8/utf8.go @@ -211,8 +211,11 @@ func FullRuneInString(s string) bool { return !short } -// DecodeRune unpacks the first UTF-8 encoding in p and returns the rune and its width in bytes. -// If the encoding is invalid, it returns (RuneError, 1), an impossible result for correct UTF-8. +// DecodeRune unpacks the first UTF-8 encoding in p and returns the rune and +// its width in bytes. If p is empty it returns (RuneError, 0). Otherwise, if +// the encoding is invalid, it returns (RuneError, 1). Both are impossible +// results for correct UTF-8. +// // An encoding is invalid if it is incorrect UTF-8, encodes a rune that is // out of range, or is not the shortest possible UTF-8 encoding for the // value. No other validation is performed. @@ -221,8 +224,10 @@ func DecodeRune(p []byte) (r rune, size int) { return } -// DecodeRuneInString is like DecodeRune but its input is a string. -// If the encoding is invalid, it returns (RuneError, 1), an impossible result for correct UTF-8. +// DecodeRuneInString is like DecodeRune but its input is a string. If s is +// empty it returns (RuneError, 0). Otherwise, if the encoding is invalid, it +// returns (RuneError, 1). Both are impossible results for correct UTF-8. +// // An encoding is invalid if it is incorrect UTF-8, encodes a rune that is // out of range, or is not the shortest possible UTF-8 encoding for the // value. No other validation is performed. @@ -231,8 +236,11 @@ func DecodeRuneInString(s string) (r rune, size int) { return } -// DecodeLastRune unpacks the last UTF-8 encoding in p and returns the rune and its width in bytes. -// If the encoding is invalid, it returns (RuneError, 1), an impossible result for correct UTF-8. +// DecodeLastRune unpacks the last UTF-8 encoding in p and returns the rune and +// its width in bytes. If p is empty it returns (RuneError, 0). Otherwise, if +// the encoding is invalid, it returns (RuneError, 1). Both are impossible +// results for correct UTF-8. +// // An encoding is invalid if it is incorrect UTF-8, encodes a rune that is // out of range, or is not the shortest possible UTF-8 encoding for the // value. No other validation is performed. @@ -268,8 +276,10 @@ func DecodeLastRune(p []byte) (r rune, size int) { return r, size } -// DecodeLastRuneInString is like DecodeLastRune but its input is a string. -// If the encoding is invalid, it returns (RuneError, 1), an impossible result for correct UTF-8. +// DecodeLastRuneInString is like DecodeLastRune but its input is a string. If +// s is empty it returns (RuneError, 0). Otherwise, if the encoding is invalid, +// it returns (RuneError, 1). Both are impossible results for correct UTF-8. +// // An encoding is invalid if it is incorrect UTF-8, encodes a rune that is // out of range, or is not the shortest possible UTF-8 encoding for the // value. No other validation is performed. @@ -372,7 +382,7 @@ func RuneCount(p []byte) int { // RuneCountInString is like RuneCount but its input is a string. func RuneCountInString(s string) (n int) { - for _ = range s { + for range s { n++ } return diff --git a/libgo/merge.sh b/libgo/merge.sh index fb0d1afe82f..53af8a9899f 100755 --- a/libgo/merge.sh +++ b/libgo/merge.sh @@ -124,11 +124,11 @@ merge() { merge_c() { from=$1 to=$2 - oldfile=${OLDDIR}/src/pkg/runtime/$from + oldfile=${OLDDIR}/src/runtime/$from if test -f ${oldfile}; then sed -e 's/·/_/g' < ${oldfile} > ${oldfile}.tmp oldfile=${oldfile}.tmp - newfile=${NEWDIR}/src/pkg/runtime/$from + newfile=${NEWDIR}/src/runtime/$from sed -e 's/·/_/g' < ${newfile} > ${newfile}.tmp newfile=${newfile}.tmp libgofile=runtime/$to @@ -136,16 +136,16 @@ merge_c() { fi } -(cd ${NEWDIR}/src/pkg && find . -name '*.go' -print) | while read f; do - oldfile=${OLDDIR}/src/pkg/$f - newfile=${NEWDIR}/src/pkg/$f +(cd ${NEWDIR}/src && find . -name '*.go' -print) | while read f; do + oldfile=${OLDDIR}/src/$f + newfile=${NEWDIR}/src/$f libgofile=go/$f merge $f ${oldfile} ${newfile} ${libgofile} done -(cd ${NEWDIR}/src/pkg && find . -name testdata -print) | while read d; do - oldtd=${OLDDIR}/src/pkg/$d - newtd=${NEWDIR}/src/pkg/$d +(cd ${NEWDIR}/src && find . -name testdata -print) | while read d; do + oldtd=${OLDDIR}/src/$d + newtd=${NEWDIR}/src/$d libgotd=go/$d if ! test -d ${oldtd}; then continue @@ -195,15 +195,16 @@ done runtime="chan.goc chan.h cpuprof.goc env_posix.c heapdump.c lock_futex.c lfstack.goc lock_sema.c mcache.c mcentral.c mfixalloc.c mgc0.c mgc0.h mheap.c msize.c netpoll.goc netpoll_epoll.c netpoll_kqueue.c netpoll_stub.c panic.c print.c proc.c race.h rdebug.goc runtime.c runtime.h signal_unix.c signal_unix.h malloc.h malloc.goc mprof.goc parfor.c runtime1.goc sema.goc sigqueue.goc string.goc time.goc" for f in $runtime; do - merge_c $f $f + # merge_c $f $f + true done -merge_c os_linux.c thread-linux.c -merge_c mem_linux.c mem.c +# merge_c os_linux.c thread-linux.c +# merge_c mem_linux.c mem.c -(cd ${OLDDIR}/src/pkg && find . -name '*.go' -print) | while read f; do - oldfile=${OLDDIR}/src/pkg/$f - newfile=${NEWDIR}/src/pkg/$f +(cd ${OLDDIR}/src && find . -name '*.go' -print) | while read f; do + oldfile=${OLDDIR}/src/$f + newfile=${NEWDIR}/src/$f libgofile=go/$f if test -f ${newfile}; then continue diff --git a/libgo/runtime/env_posix.c b/libgo/runtime/env_posix.c index ff4bf0c5b1f..ee3e4514554 100644 --- a/libgo/runtime/env_posix.c +++ b/libgo/runtime/env_posix.c @@ -9,7 +9,7 @@ #include "arch.h" #include "malloc.h" -extern Slice syscall_Envs __asm__ (GOSYM_PREFIX "syscall.Envs"); +extern Slice envs; const byte* runtime_getenv(const char *s) @@ -22,8 +22,8 @@ runtime_getenv(const char *s) bs = (const byte*)s; len = runtime_findnull(bs); - envv = (String*)syscall_Envs.__values; - envc = syscall_Envs.__count; + envv = (String*)envs.__values; + envc = envs.__count; for(i=0; i<envc; i++){ if(envv[i].len <= len) continue; diff --git a/libgo/runtime/go-assert-interface.c b/libgo/runtime/go-assert-interface.c index 2510f9aef8b..427916f8c42 100644 --- a/libgo/runtime/go-assert-interface.c +++ b/libgo/runtime/go-assert-interface.c @@ -36,7 +36,7 @@ __go_assert_interface (const struct __go_type_descriptor *lhs_descriptor, /* A type assertion to an empty interface just returns the object descriptor. */ - __go_assert (lhs_descriptor->__code == GO_INTERFACE); + __go_assert ((lhs_descriptor->__code & GO_CODE_MASK) == GO_INTERFACE); lhs_interface = (const struct __go_interface_type *) lhs_descriptor; if (lhs_interface->__methods.__count == 0) return rhs_descriptor; diff --git a/libgo/runtime/go-can-convert-interface.c b/libgo/runtime/go-can-convert-interface.c index 4de558077a7..aac889d346d 100644 --- a/libgo/runtime/go-can-convert-interface.c +++ b/libgo/runtime/go-can-convert-interface.c @@ -31,7 +31,7 @@ __go_can_convert_to_interface ( if (from_descriptor == NULL) return 0; - __go_assert (to_descriptor->__code == GO_INTERFACE); + __go_assert ((to_descriptor->__code & GO_CODE_MASK) == GO_INTERFACE); to_interface = (const struct __go_interface_type *) to_descriptor; to_method_count = to_interface->__methods.__count; to_method = ((const struct __go_interface_method *) diff --git a/libgo/runtime/go-check-interface.c b/libgo/runtime/go-check-interface.c index c29971adac2..722a4219ab2 100644 --- a/libgo/runtime/go-check-interface.c +++ b/libgo/runtime/go-check-interface.c @@ -30,9 +30,9 @@ __go_check_interface_type ( if (lhs_descriptor != rhs_descriptor && !__go_type_descriptors_equal (lhs_descriptor, rhs_descriptor) - && (lhs_descriptor->__code != GO_UNSAFE_POINTER + && ((lhs_descriptor->__code & GO_CODE_MASK) != GO_UNSAFE_POINTER || !__go_is_pointer_type (rhs_descriptor)) - && (rhs_descriptor->__code != GO_UNSAFE_POINTER + && ((rhs_descriptor->__code & GO_CODE_MASK) != GO_UNSAFE_POINTER || !__go_is_pointer_type (lhs_descriptor))) { struct __go_empty_interface panic_arg; diff --git a/libgo/runtime/go-convert-interface.c b/libgo/runtime/go-convert-interface.c index 3eee6bf4a8f..0e8a3062435 100644 --- a/libgo/runtime/go-convert-interface.c +++ b/libgo/runtime/go-convert-interface.c @@ -41,7 +41,7 @@ __go_convert_interface_2 (const struct __go_type_descriptor *lhs_descriptor, return NULL; } - __go_assert (lhs_descriptor->__code == GO_INTERFACE); + __go_assert ((lhs_descriptor->__code & GO_CODE_MASK) == GO_INTERFACE); lhs_interface = (const struct __go_interface_type *) lhs_descriptor; lhs_method_count = lhs_interface->__methods.__count; lhs_methods = ((const struct __go_interface_method *) diff --git a/libgo/runtime/go-make-slice.c b/libgo/runtime/go-make-slice.c index 855bb17ce59..ccd07e5ac51 100644 --- a/libgo/runtime/go-make-slice.c +++ b/libgo/runtime/go-make-slice.c @@ -30,7 +30,7 @@ __go_make_slice2 (const struct __go_type_descriptor *td, uintptr_t len, uintptr_t size; struct __go_open_array ret; - __go_assert (td->__code == GO_SLICE); + __go_assert ((td->__code & GO_CODE_MASK) == GO_SLICE); std = (const struct __go_slice_type *) td; ilen = (intgo) len; diff --git a/libgo/runtime/go-reflect-map.c b/libgo/runtime/go-reflect-map.c index ab116e85950..58e1b34a1ea 100644 --- a/libgo/runtime/go-reflect-map.c +++ b/libgo/runtime/go-reflect-map.c @@ -24,7 +24,7 @@ mapaccess (struct __go_map_type *mt, void *m, void *key) { struct __go_map *map = (struct __go_map *) m; - __go_assert (mt->__common.__code == GO_MAP); + __go_assert ((mt->__common.__code & GO_CODE_MASK) == GO_MAP); if (map == NULL) return NULL; else @@ -40,7 +40,7 @@ mapassign (struct __go_map_type *mt, void *m, void *key, void *val) struct __go_map *map = (struct __go_map *) m; void *p; - __go_assert (mt->__common.__code == GO_MAP); + __go_assert ((mt->__common.__code & GO_CODE_MASK) == GO_MAP); if (map == NULL) runtime_panicstring ("assignment to entry in nil map"); p = __go_map_index (map, key, 1); @@ -55,7 +55,7 @@ mapdelete (struct __go_map_type *mt, void *m, void *key) { struct __go_map *map = (struct __go_map *) m; - __go_assert (mt->__common.__code == GO_MAP); + __go_assert ((mt->__common.__code & GO_CODE_MASK) == GO_MAP); if (map == NULL) return; __go_map_delete (map, key); @@ -81,7 +81,7 @@ mapiterinit (struct __go_map_type *mt, void *m) { struct __go_hash_iter *it; - __go_assert (mt->__common.__code == GO_MAP); + __go_assert ((mt->__common.__code & GO_CODE_MASK) == GO_MAP); it = __go_alloc (sizeof (struct __go_hash_iter)); __go_mapiterinit ((struct __go_map *) m, it); return (unsigned char *) it; diff --git a/libgo/runtime/go-type.h b/libgo/runtime/go-type.h index 74e83400598..d7693353039 100644 --- a/libgo/runtime/go-type.h +++ b/libgo/runtime/go-type.h @@ -54,9 +54,11 @@ struct String; #define GO_STRUCT 25 #define GO_UNSAFE_POINTER 26 +#define GO_DIRECT_IFACE (1 << 5) +#define GO_GC_PROG (1 << 6) #define GO_NO_POINTERS (1 << 7) -#define GO_CODE_MASK 0x7f +#define GO_CODE_MASK 0x1f /* For each Go type the compiler constructs one of these structures. This is used for type reflection, interfaces, maps, and reference @@ -310,7 +312,8 @@ struct __go_struct_type static inline _Bool __go_is_pointer_type (const struct __go_type_descriptor *td) { - return td->__code == GO_PTR || td->__code == GO_UNSAFE_POINTER; + return ((td->__code & GO_CODE_MASK) == GO_PTR + || (td->__code & GO_CODE_MASK) == GO_UNSAFE_POINTER); } extern _Bool diff --git a/libgo/runtime/go-unsafe-pointer.c b/libgo/runtime/go-unsafe-pointer.c index 729e9a19736..71364f511b4 100644 --- a/libgo/runtime/go-unsafe-pointer.c +++ b/libgo/runtime/go-unsafe-pointer.c @@ -44,7 +44,7 @@ const uintptr unsafe_Pointer_gc[] = {sizeof(void*), GC_APTR, 0, GC_END}; const struct __go_type_descriptor unsafe_Pointer = { /* __code */ - GO_UNSAFE_POINTER, + GO_UNSAFE_POINTER | GO_DIRECT_IFACE, /* __align */ __alignof (void *), /* __field_align */ @@ -89,7 +89,7 @@ const struct __go_ptr_type pointer_unsafe_Pointer = /* __common */ { /* __code */ - GO_PTR, + GO_PTR | GO_DIRECT_IFACE, /* __align */ __alignof (void *), /* __field_align */ diff --git a/libgo/runtime/go-unsetenv.c b/libgo/runtime/go-unsetenv.c new file mode 100644 index 00000000000..409436a0d3f --- /dev/null +++ b/libgo/runtime/go-unsetenv.c @@ -0,0 +1,54 @@ +/* go-unsetenv.c -- unset an environment variable from Go. + + Copyright 2015 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. */ + +#include "config.h" + +#include <stddef.h> +#include <stdlib.h> + +#include "go-alloc.h" +#include "runtime.h" +#include "arch.h" +#include "malloc.h" + +/* Unset an environment variable from Go. This is called by + syscall.Unsetenv. */ + +void unsetenv_c (String) __asm__ (GOSYM_PREFIX "syscall.unsetenv_c"); + +void +unsetenv_c (String k) +{ + const byte *ks; + unsigned char *kn; + intgo len; + + ks = k.str; + if (ks == NULL) + ks = (const byte *) ""; + kn = NULL; + +#ifdef HAVE_UNSETENV + + if (ks != NULL && ks[k.len] != 0) + { + // Objects that are explicitly freed must be at least 16 bytes in size, + // so that they are not allocated using tiny alloc. + len = k.len + 1; + if (len < TinySize) + len = TinySize; + kn = __go_alloc (len); + __builtin_memcpy (kn, ks, k.len); + ks = kn; + } + + unsetenv ((const char *) ks); + +#endif /* !defined(HAVE_UNSETENV) */ + + if (kn != NULL) + __go_free (kn); +} diff --git a/libgo/runtime/malloc.goc b/libgo/runtime/malloc.goc index f240ffbd8ee..b05c5fa4e22 100644 --- a/libgo/runtime/malloc.goc +++ b/libgo/runtime/malloc.goc @@ -25,6 +25,7 @@ package runtime #define string __reflection #define KindPtr GO_PTR #define KindNoPointers GO_NO_POINTERS +#define kindMask GO_CODE_MASK // GCCGO SPECIFIC CHANGE // @@ -935,7 +936,7 @@ func SetFinalizer(obj Eface, finalizer Eface) { runtime_printf("runtime.SetFinalizer: first argument is nil interface\n"); goto throw; } - if(obj.__type_descriptor->__code != GO_PTR) { + if((obj.__type_descriptor->kind&kindMask) != GO_PTR) { runtime_printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.__type_descriptor->__reflection); goto throw; } @@ -956,14 +957,14 @@ func SetFinalizer(obj Eface, finalizer Eface) { if(!runtime_mlookup(obj.__object, &base, &size, nil) || obj.__object != base) { // As an implementation detail we allow to set finalizers for an inner byte // of an object if it could come from tiny alloc (see mallocgc for details). - if(ot->__element_type == nil || (ot->__element_type->__code&KindNoPointers) == 0 || ot->__element_type->__size >= TinySize) { + if(ot->__element_type == nil || (ot->__element_type->kind&KindNoPointers) == 0 || ot->__element_type->__size >= TinySize) { runtime_printf("runtime.SetFinalizer: pointer not at beginning of allocated block (%p)\n", obj.__object); goto throw; } } if(finalizer.__type_descriptor != nil) { runtime_createfing(); - if(finalizer.__type_descriptor->__code != GO_FUNC) + if((finalizer.__type_descriptor->kind&kindMask) != GO_FUNC) goto badfunc; ft = (const FuncType*)finalizer.__type_descriptor; if(ft->__dotdotdot || ft->__in.__count != 1) @@ -971,12 +972,12 @@ func SetFinalizer(obj Eface, finalizer Eface) { fint = *(Type**)ft->__in.__values; if(__go_type_descriptors_equal(fint, obj.__type_descriptor)) { // ok - same type - } else if(fint->__code == GO_PTR && (fint->__uncommon == nil || fint->__uncommon->__name == nil || obj.type->__uncommon == nil || obj.type->__uncommon->__name == nil) && __go_type_descriptors_equal(((const PtrType*)fint)->__element_type, ((const PtrType*)obj.type)->__element_type)) { + } else if((fint->kind&kindMask) == GO_PTR && (fint->__uncommon == nil || fint->__uncommon->__name == nil || obj.type->__uncommon == nil || obj.type->__uncommon->__name == nil) && __go_type_descriptors_equal(((const PtrType*)fint)->__element_type, ((const PtrType*)obj.type)->__element_type)) { // ok - not same type, but both pointers, // one or the other is unnamed, and same element type, so assignable. - } else if(fint->kind == GO_INTERFACE && ((const InterfaceType*)fint)->__methods.__count == 0) { + } else if((fint->kind&kindMask) == GO_INTERFACE && ((const InterfaceType*)fint)->__methods.__count == 0) { // ok - satisfies empty interface - } else if(fint->kind == GO_INTERFACE && __go_convert_interface_2(fint, obj.__type_descriptor, 1) != nil) { + } else if((fint->kind&kindMask) == GO_INTERFACE && __go_convert_interface_2(fint, obj.__type_descriptor, 1) != nil) { // ok - satisfies non-empty interface } else goto badfunc; diff --git a/libgo/runtime/mgc0.c b/libgo/runtime/mgc0.c index b09054c02cc..0867abfd168 100644 --- a/libgo/runtime/mgc0.c +++ b/libgo/runtime/mgc0.c @@ -71,6 +71,7 @@ typedef struct __go_map Hmap; #define string __reflection #define KindPtr GO_PTR #define KindNoPointers GO_NO_POINTERS +#define kindMask GO_CODE_MASK // PtrType aka __go_ptr_type #define elem __element_type @@ -946,7 +947,7 @@ scanblock(Workbuf *wbuf, bool keepworking) continue; obj = eface->__object; - if((t->__code & ~KindNoPointers) == KindPtr) { + if((t->__code & kindMask) == KindPtr) { // Only use type information if it is a pointer-containing type. // This matches the GC programs written by cmd/gc/reflect.c's // dgcsym1 in case TPTR32/case TPTR64. See rationale there. @@ -984,7 +985,7 @@ scanblock(Workbuf *wbuf, bool keepworking) continue; obj = iface->__object; - if((t->__code & ~KindNoPointers) == KindPtr) { + if((t->__code & kindMask) == KindPtr) { // Only use type information if it is a pointer-containing type. // This matches the GC programs written by cmd/gc/reflect.c's // dgcsym1 in case TPTR32/case TPTR64. See rationale there. @@ -2369,6 +2370,8 @@ gc(struct gc_args *args) // Sweep all spans eagerly. while(runtime_sweepone() != (uintptr)-1) gcstats.npausesweep++; + // Do an additional mProf_GC, because all 'free' events are now real as well. + runtime_MProf_GC(); } runtime_MProf_GC(); @@ -2514,7 +2517,7 @@ runfinq(void* dummy __attribute__ ((unused))) f = &fb->fin[i]; fint = ((const Type**)f->ft->__in.array)[0]; - if(fint->__code == KindPtr) { + if((fint->__code & kindMask) == KindPtr) { // direct use of pointer param = &f->arg; } else if(((const InterfaceType*)fint)->__methods.__count == 0) { diff --git a/libgo/runtime/netpoll.goc b/libgo/runtime/netpoll.goc index 5308e01c8e9..2f3fa455f3d 100644 --- a/libgo/runtime/netpoll.goc +++ b/libgo/runtime/netpoll.goc @@ -79,9 +79,9 @@ static struct static bool netpollblock(PollDesc*, int32, bool); static G* netpollunblock(PollDesc*, int32, bool); -static void deadline(int64, Eface); -static void readDeadline(int64, Eface); -static void writeDeadline(int64, Eface); +static void deadline(Eface, uintptr); +static void readDeadline(Eface, uintptr); +static void writeDeadline(Eface, uintptr); static PollDesc* allocPollDesc(void); static intgo checkerr(PollDesc *pd, int32 mode); @@ -197,22 +197,25 @@ func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) { // Copy current seq into the timer arg. // Timer func will check the seq against current descriptor seq, // if they differ the descriptor was reused or timers were reset. - pd->rt.arg.type = (Type*)pd->seq; + pd->rt.arg.type = nil; // should be *pollDesc type descriptor. pd->rt.arg.data = pd; + pd->rt.seq = pd->seq; runtime_addtimer(&pd->rt); } else { if(pd->rd > 0) { pd->rt.fv = &readDeadlineFn; pd->rt.when = pd->rd; - pd->rt.arg.type = (Type*)pd->seq; + pd->rt.arg.type = nil; // should be *pollDesc type descriptor. pd->rt.arg.data = pd; + pd->rt.seq = pd->seq; runtime_addtimer(&pd->rt); } if(pd->wd > 0) { pd->wt.fv = &writeDeadlineFn; pd->wt.when = pd->wd; - pd->wt.arg.type = (Type*)pd->seq; + pd->wt.arg.type = nil; // should be *pollDesc type descriptor. pd->wt.arg.data = pd; + pd->wt.seq = pd->seq; runtime_addtimer(&pd->wt); } } @@ -389,19 +392,16 @@ netpollunblock(PollDesc *pd, int32 mode, bool ioready) } static void -deadlineimpl(int64 now, Eface arg, bool read, bool write) +deadlineimpl(Eface arg, uintptr seq, bool read, bool write) { PollDesc *pd; - uint32 seq; G *rg, *wg; - USED(now); pd = (PollDesc*)arg.data; - // This is the seq when the timer was set. - // If it's stale, ignore the timer event. - seq = (uintptr)arg.type; rg = wg = nil; runtime_lock(pd); + // Seq arg is seq when the timer was set. + // If it's stale, ignore the timer event. if(seq != pd->seq) { // The descriptor was reused or timers were reset. runtime_unlock(pd); @@ -429,21 +429,21 @@ deadlineimpl(int64 now, Eface arg, bool read, bool write) } static void -deadline(int64 now, Eface arg) +deadline(Eface arg, uintptr seq) { - deadlineimpl(now, arg, true, true); + deadlineimpl(arg, seq, true, true); } static void -readDeadline(int64 now, Eface arg) +readDeadline(Eface arg, uintptr seq) { - deadlineimpl(now, arg, true, false); + deadlineimpl(arg, seq, true, false); } static void -writeDeadline(int64 now, Eface arg) +writeDeadline(Eface arg, uintptr seq) { - deadlineimpl(now, arg, false, true); + deadlineimpl(arg, seq, false, true); } static PollDesc* diff --git a/libgo/runtime/runtime.c b/libgo/runtime/runtime.c index 496e77b75c5..6e0d164707d 100644 --- a/libgo/runtime/runtime.c +++ b/libgo/runtime/runtime.c @@ -59,8 +59,8 @@ runtime_gotraceback(bool *crash) static int32 argc; static byte** argv; -extern Slice os_Args __asm__ (GOSYM_PREFIX "os.Args"); -extern Slice syscall_Envs __asm__ (GOSYM_PREFIX "syscall.Envs"); +static Slice args; +Slice envs; void (*runtime_sysargs)(int32, uint8**); @@ -92,9 +92,9 @@ runtime_goargs(void) s = runtime_malloc(argc*sizeof s[0]); for(i=0; i<argc; i++) s[i] = runtime_gostringnocopy((const byte*)argv[i]); - os_Args.__values = (void*)s; - os_Args.__count = argc; - os_Args.__capacity = argc; + args.__values = (void*)s; + args.__count = argc; + args.__capacity = argc; } void @@ -109,9 +109,26 @@ runtime_goenvs_unix(void) s = runtime_malloc(n*sizeof s[0]); for(i=0; i<n; i++) s[i] = runtime_gostringnocopy(argv[argc+1+i]); - syscall_Envs.__values = (void*)s; - syscall_Envs.__count = n; - syscall_Envs.__capacity = n; + envs.__values = (void*)s; + envs.__count = n; + envs.__capacity = n; +} + +// Called from the syscall package. +Slice runtime_envs(void) __asm__ (GOSYM_PREFIX "syscall.runtime_envs"); + +Slice +runtime_envs() +{ + return envs; +} + +Slice os_runtime_args(void) __asm__ (GOSYM_PREFIX "os.runtime_args"); + +Slice +os_runtime_args() +{ + return args; } int32 @@ -127,8 +144,8 @@ runtime_atoi(const byte *p) static struct root_list runtime_roots = { nil, - { { &syscall_Envs, sizeof syscall_Envs }, - { &os_Args, sizeof os_Args }, + { { &envs, sizeof envs }, + { &args, sizeof args }, { nil, 0 } }, }; diff --git a/libgo/runtime/runtime.h b/libgo/runtime/runtime.h index c96290a0b06..1f1358ae947 100644 --- a/libgo/runtime/runtime.h +++ b/libgo/runtime/runtime.h @@ -400,7 +400,7 @@ struct Timers // If this struct changes, adjust ../syscall/net_nacl.go:/runtimeTimer. struct Timer { - int32 i; // heap index + intgo i; // heap index // Timer wakes up at when, and then at when+period, ... (period > 0 only) // each time calling f(now, arg) in the timer goroutine, so f must be @@ -409,6 +409,7 @@ struct Timer int64 period; FuncVal *fv; Eface arg; + uintptr seq; }; // Lock-free stack node. @@ -774,8 +775,6 @@ void runtime_printany(Eface) __asm__ (GOSYM_PREFIX "runtime.Printany"); void runtime_newTypeAssertionError(const String*, const String*, const String*, const String*, Eface*) __asm__ (GOSYM_PREFIX "runtime.NewTypeAssertionError"); -void runtime_newErrorString(String, Eface*) - __asm__ (GOSYM_PREFIX "runtime.NewErrorString"); void runtime_newErrorCString(const char*, Eface*) __asm__ (GOSYM_PREFIX "runtime.NewErrorCString"); diff --git a/libgo/runtime/runtime1.goc b/libgo/runtime/runtime1.goc index e643965fda5..6d8f09a6c5f 100644 --- a/libgo/runtime/runtime1.goc +++ b/libgo/runtime/runtime1.goc @@ -74,3 +74,16 @@ func sync.runtime_procPin() (p int) { func sync.runtime_procUnpin() { runtime_m()->locks--; } + +func sync_atomic.runtime_procPin() (p int) { + M *mp; + + mp = runtime_m(); + // Disable preemption. + mp->locks++; + p = mp->p->id; +} + +func sync_atomic.runtime_procUnpin() { + runtime_m()->locks--; +} diff --git a/libgo/runtime/time.goc b/libgo/runtime/time.goc index cb13bbf39a2..ee24b9c52df 100644 --- a/libgo/runtime/time.goc +++ b/libgo/runtime/time.goc @@ -66,9 +66,9 @@ static void siftdown(int32); // Ready the goroutine e.data. static void -ready(int64 now, Eface e) +ready(Eface e, uintptr seq) { - USED(now); + USED(seq); runtime_ready(e.__object); } @@ -91,6 +91,7 @@ runtime_tsleep(int64 ns, const char *reason) t.period = 0; t.fv = &readyv; t.arg.__object = g; + t.seq = 0; runtime_lock(&timers); addtimer(&t); runtime_parkunlock(&timers, reason); @@ -203,8 +204,9 @@ timerproc(void* dummy __attribute__ ((unused))) int64 delta, now; Timer *t; FuncVal *fv; - void (*f)(int64, Eface); + void (*f)(Eface, uintptr); Eface arg; + uintptr seq; for(;;) { runtime_lock(&timers); @@ -233,9 +235,10 @@ timerproc(void* dummy __attribute__ ((unused))) fv = t->fv; f = (void*)t->fv->fn; arg = t->arg; + seq = t->seq; runtime_unlock(&timers); __go_set_closure(fv); - f(now, arg); + f(arg, seq); // clear f and arg to avoid leak while sleeping for next timer f = nil; diff --git a/libgo/testsuite/gotest b/libgo/testsuite/gotest index 1bcd4b9d313..3596d727ff1 100755 --- a/libgo/testsuite/gotest +++ b/libgo/testsuite/gotest @@ -335,6 +335,15 @@ if [ "x$xgofiles" != "x" ]; then havex=true fi +testmain= +if $havex && fgrep 'func TestMain(' $xgofiles >/dev/null 2>&1; then + package=`grep '^package[ ]' $xgofiles | sed 1q | sed -e 's/.* //'` + testmain="${package}.TestMain" +elif test -n "$gofiles" && fgrep 'func TestMain(' $gofiles >/dev/null 2>&1; then + package=`grep '^package[ ]' $gofiles | sed 1q | sed -e 's/.* //'` + testmain="${package}.TestMain" +fi + set -e package=`echo ${srcdir} | sed -e 's|^.*libgo/go/||'` @@ -415,14 +424,19 @@ localname() { fi echo 'import "testing"' echo 'import __regexp__ "regexp"' # rename in case tested package is called regexp + if ! test -n "$testmain"; then + echo 'import __os__ "os"' + fi # test array echo echo 'var tests = []testing.InternalTest {' for i in $tests do n=$(testname $i) - j=$(localname $i) - echo ' {"'$n'", '$j'},' + if test "$n" != "TestMain"; then + j=$(localname $i) + echo ' {"'$n'", '$j'},' + fi done echo '}' @@ -467,8 +481,15 @@ func matchString(pat, str string) (result bool, err error) { } func main() { - testing.Main(matchString, tests, benchmarks, examples) -}' + m := testing.MainStart(matchString, tests, benchmarks, examples) +' + if test -n "$testmain"; then + echo " ${testmain}(m)" + else + echo ' __os__.Exit(m.Run())' + fi + + echo '}' }>_testmain.go case "x$dejagnu" in |