diff options
Diffstat (limited to 'libbanshee/engine')
51 files changed, 16222 insertions, 0 deletions
diff --git a/libbanshee/engine/ChangeLog b/libbanshee/engine/ChangeLog new file mode 100644 index 00000000000..5309328c6fa --- /dev/null +++ b/libbanshee/engine/ChangeLog @@ -0,0 +1,14 @@ +2003-07-01 Daniel Berlin <dberlin@dberlin.org> + + * bool.h: Can't include gcc's system.h and bool.h at the same time. + +2003-06-25 Daniel Berlin <dberlin@dberlin.org> + + * compiler.h: Fix the grouping of the #if + +2003-02-02 Daniel Berlin <dberlin@dberlin.org> + + * compiler.h: Only define HAVE_VARIADIC_MACROS if IN_GCC is not + defined. + * util.h: Add prototypes for min and max. + diff --git a/libbanshee/engine/Makefile.am b/libbanshee/engine/Makefile.am new file mode 100644 index 00000000000..d3a8ba6d805 --- /dev/null +++ b/libbanshee/engine/Makefile.am @@ -0,0 +1,6 @@ +AM_CFLAGS = -I$(srcdir)/../libcompat -I$(top_srcdir)/../include -I$(srcdir)/../include -I. -Ddeletes= -Dtraditional= -Dsameregion= -Dparentptr= @ac_libbanshee_warn_cflags@ +noinst_LIBRARIES = libbansheeengine.a +libbansheeengine_a_SOURCES = array.c bounds.c hash.c hashset.c list.c stamp.c \ +ufind.c util.c setif-sort.c termhash.c setif-var.c flow-var.c flowrow-sort.c \ +setst-var.c jcollection.c banshee.c buffer.c setst-sort.c term-var.c \ +term-sort.c dot.c diff --git a/libbanshee/engine/Makefile.in b/libbanshee/engine/Makefile.in new file mode 100644 index 00000000000..ddc83d51db3 --- /dev/null +++ b/libbanshee/engine/Makefile.in @@ -0,0 +1,405 @@ +# Makefile.in generated by automake 1.7.6 from Makefile.am. +# @configure_input@ + +# Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003 +# Free Software Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +srcdir = @srcdir@ +top_srcdir = @top_srcdir@ +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +top_builddir = .. + +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +INSTALL = @INSTALL@ +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +ACLOCAL = @ACLOCAL@ +AMDEP_FALSE = @AMDEP_FALSE@ +AMDEP_TRUE = @AMDEP_TRUE@ +AMTAR = @AMTAR@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAINTAINER_MODE_FALSE = @MAINTAINER_MODE_FALSE@ +MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@ +MAKEINFO = @MAKEINFO@ +OBJEXT = @OBJEXT@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +RANLIB = @RANLIB@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +VERSION = @VERSION@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_RANLIB = @ac_ct_RANLIB@ +ac_ct_STRIP = @ac_ct_STRIP@ +ac_libbanshee_warn_cflags = @ac_libbanshee_warn_cflags@ +am__fastdepCC_FALSE = @am__fastdepCC_FALSE@ +am__fastdepCC_TRUE = @am__fastdepCC_TRUE@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +bindir = @bindir@ +build_alias = @build_alias@ +datadir = @datadir@ +exec_prefix = @exec_prefix@ +host_alias = @host_alias@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +oldincludedir = @oldincludedir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +AM_CFLAGS = -I$(srcdir)/../libcompat -I$(top_srcdir)/../include -I$(srcdir)/../include -I. -Ddeletes= -Dtraditional= -Dsameregion= -Dparentptr= @ac_libbanshee_warn_cflags@ +noinst_LIBRARIES = libbansheeengine.a +libbansheeengine_a_SOURCES = array.c bounds.c hash.c hashset.c list.c stamp.c \ +ufind.c util.c setif-sort.c termhash.c setif-var.c flow-var.c flowrow-sort.c \ +setst-var.c jcollection.c banshee.c buffer.c setst-sort.c term-var.c \ +term-sort.c dot.c + +subdir = engine +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +mkinstalldirs = $(SHELL) $(top_srcdir)/../mkinstalldirs +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +LIBRARIES = $(noinst_LIBRARIES) + +libbansheeengine_a_AR = $(AR) cru +libbansheeengine_a_LIBADD = +am_libbansheeengine_a_OBJECTS = array.$(OBJEXT) bounds.$(OBJEXT) \ + hash.$(OBJEXT) hashset.$(OBJEXT) list.$(OBJEXT) stamp.$(OBJEXT) \ + ufind.$(OBJEXT) util.$(OBJEXT) setif-sort.$(OBJEXT) \ + termhash.$(OBJEXT) setif-var.$(OBJEXT) flow-var.$(OBJEXT) \ + flowrow-sort.$(OBJEXT) setst-var.$(OBJEXT) \ + jcollection.$(OBJEXT) banshee.$(OBJEXT) buffer.$(OBJEXT) \ + setst-sort.$(OBJEXT) term-var.$(OBJEXT) term-sort.$(OBJEXT) \ + dot.$(OBJEXT) +libbansheeengine_a_OBJECTS = $(am_libbansheeengine_a_OBJECTS) + +DEFAULT_INCLUDES = -I. -I$(srcdir) -I$(top_builddir) +depcomp = $(SHELL) $(top_srcdir)/../depcomp +am__depfiles_maybe = depfiles +@AMDEP_TRUE@DEP_FILES = ./$(DEPDIR)/array.Po ./$(DEPDIR)/banshee.Po \ +@AMDEP_TRUE@ ./$(DEPDIR)/bounds.Po ./$(DEPDIR)/buffer.Po \ +@AMDEP_TRUE@ ./$(DEPDIR)/dot.Po ./$(DEPDIR)/flow-var.Po \ +@AMDEP_TRUE@ ./$(DEPDIR)/flowrow-sort.Po ./$(DEPDIR)/hash.Po \ +@AMDEP_TRUE@ ./$(DEPDIR)/hashset.Po ./$(DEPDIR)/jcollection.Po \ +@AMDEP_TRUE@ ./$(DEPDIR)/list.Po ./$(DEPDIR)/setif-sort.Po \ +@AMDEP_TRUE@ ./$(DEPDIR)/setif-var.Po ./$(DEPDIR)/setst-sort.Po \ +@AMDEP_TRUE@ ./$(DEPDIR)/setst-var.Po ./$(DEPDIR)/stamp.Po \ +@AMDEP_TRUE@ ./$(DEPDIR)/term-sort.Po ./$(DEPDIR)/term-var.Po \ +@AMDEP_TRUE@ ./$(DEPDIR)/termhash.Po ./$(DEPDIR)/ufind.Po \ +@AMDEP_TRUE@ ./$(DEPDIR)/util.Po +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +CCLD = $(CC) +LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ +DIST_SOURCES = $(libbansheeengine_a_SOURCES) +DIST_COMMON = ChangeLog Makefile.am Makefile.in +SOURCES = $(libbansheeengine_a_SOURCES) + +all: all-am + +.SUFFIXES: +.SUFFIXES: .c .o .obj +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ Makefile.am $(top_srcdir)/configure.in $(ACLOCAL_M4) + cd $(top_srcdir) && \ + $(AUTOMAKE) --gnu engine/Makefile +Makefile: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.in $(top_builddir)/config.status + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe) + +AR = ar + +clean-noinstLIBRARIES: + -test -z "$(noinst_LIBRARIES)" || rm -f $(noinst_LIBRARIES) +libbansheeengine.a: $(libbansheeengine_a_OBJECTS) $(libbansheeengine_a_DEPENDENCIES) + -rm -f libbansheeengine.a + $(libbansheeengine_a_AR) libbansheeengine.a $(libbansheeengine_a_OBJECTS) $(libbansheeengine_a_LIBADD) + $(RANLIB) libbansheeengine.a + +mostlyclean-compile: + -rm -f *.$(OBJEXT) core *.core + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/array.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/banshee.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bounds.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/buffer.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dot.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/flow-var.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/flowrow-sort.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hash.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hashset.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jcollection.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/list.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/setif-sort.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/setif-var.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/setst-sort.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/setst-var.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/stamp.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/term-sort.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/term-var.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/termhash.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ufind.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/util.Po@am__quote@ + +distclean-depend: + -rm -rf ./$(DEPDIR) + +.c.o: +@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" \ +@am__fastdepCC_TRUE@ -c -o $@ `test -f '$<' || echo '$(srcdir)/'`$<; \ +@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; \ +@am__fastdepCC_TRUE@ else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; \ +@am__fastdepCC_TRUE@ fi +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ depfile='$(DEPDIR)/$*.Po' tmpdepfile='$(DEPDIR)/$*.TPo' @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c `test -f '$<' || echo '$(srcdir)/'`$< + +.c.obj: +@am__fastdepCC_TRUE@ if $(COMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" \ +@am__fastdepCC_TRUE@ -c -o $@ `if test -f '$<'; then $(CYGPATH_W) '$<'; else $(CYGPATH_W) '$(srcdir)/$<'; fi`; \ +@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; \ +@am__fastdepCC_TRUE@ else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; \ +@am__fastdepCC_TRUE@ fi +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ depfile='$(DEPDIR)/$*.Po' tmpdepfile='$(DEPDIR)/$*.TPo' @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c `if test -f '$<'; then $(CYGPATH_W) '$<'; else $(CYGPATH_W) '$(srcdir)/$<'; fi` +uninstall-info-am: + +ETAGS = etags +ETAGSFLAGS = + +CTAGS = ctags +CTAGSFLAGS = + +tags: TAGS + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ + mkid -fID $$unique + +TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ + test -z "$(ETAGS_ARGS)$$tags$$unique" \ + || $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$tags $$unique + +ctags: CTAGS +CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ + test -z "$(CTAGS_ARGS)$$tags$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$tags $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && cd $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) $$here + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) + +top_distdir = .. +distdir = $(top_distdir)/$(PACKAGE)-$(VERSION) + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \ + list='$(DISTFILES)'; for file in $$list; do \ + case $$file in \ + $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \ + $(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \ + esac; \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test "$$dir" != "$$file" && test "$$dir" != "."; then \ + dir="/$$dir"; \ + $(mkinstalldirs) "$(distdir)$$dir"; \ + else \ + dir=''; \ + fi; \ + if test -d $$d/$$file; then \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ + fi; \ + cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ + else \ + test -f $(distdir)/$$file \ + || cp -p $$d/$$file $(distdir)/$$file \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(LIBRARIES) + +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -rm -f Makefile $(CONFIG_CLEAN_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-noinstLIBRARIES mostlyclean-am + +distclean: distclean-am + +distclean-am: clean-am distclean-compile distclean-depend \ + distclean-generic distclean-tags + +dvi: dvi-am + +dvi-am: + +info: info-am + +info-am: + +install-data-am: + +install-exec-am: + +install-info: install-info-am + +install-man: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-info-am + +.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ + clean-noinstLIBRARIES ctags distclean distclean-compile \ + distclean-depend distclean-generic distclean-tags distdir dvi \ + dvi-am info info-am install install-am install-data \ + install-data-am install-exec install-exec-am install-info \ + install-info-am install-man install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-compile \ + mostlyclean-generic pdf pdf-am ps ps-am tags uninstall \ + uninstall-am uninstall-info-am + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/libbanshee/engine/array.c b/libbanshee/engine/array.c new file mode 100644 index 00000000000..be297533fa6 --- /dev/null +++ b/libbanshee/engine/array.c @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <regions.h> +#include <assert.h> +#include <limits.h> +#include "array.h" + +struct array { + region sameregion r; + void *sameregion data; + size_t elemsize; + type_t elemtype; + size_t nelems, nalloc; +}; + +struct array *new_array(region r, size_t initialsize, + size_t typesize, type_t typeinfo) +{ + struct array *a = ralloc(r, struct array); + + a->r = r; + a->data = typed_rarrayalloc(r, initialsize, typesize, typeinfo); + a->elemsize = typesize; + a->elemtype = typeinfo; + a->nelems = 0; + a->nalloc = initialsize; + + return a; +} + +void *array_extend(struct array *a, int by) +{ + size_t oldelems = a->nelems; + + if (by < 0) + assert(((unsigned int)-by) <= a->nelems && by != INT_MIN); + else if (a->nelems + by > a->nalloc) + { + size_t newsize = a->nalloc * 2 + by; + void *newdata = typed_rarrayalloc(a->r, newsize, a->elemsize, a->elemtype); + + /* XXX: could work harder to support really large array sizes + (this code will fail for a->nalloc >= (max(size_t)-by)/2) */ + assert(newsize > a->nalloc); /* die when we get really big */ + typed_rarraycopy(newdata, a->data, a->nelems, a->elemsize, a->elemtype); + a->data = newdata; + a->nalloc = newsize; + } + a->nelems += by; + + return (char *)a->data + a->elemsize * oldelems; +} + +void array_reset(struct array *a) +{ + a->nelems = 0; +} + +size_t array_length(struct array *a) +{ + return a->nelems; +} + +void *array_data(struct array *a) +{ + return a->data; +} + diff --git a/libbanshee/engine/array.h b/libbanshee/engine/array.h new file mode 100644 index 00000000000..6fe3a6cf17d --- /dev/null +++ b/libbanshee/engine/array.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef ARRAY_H +#define ARRAY_H + +/* A region-based growable array type */ + +struct array; + +struct array *new_array(region r, size_t initialsize, + size_t typesize, type_t typeinfo); +void *array_extend(struct array *a, int by); +void array_reset(struct array *a); +size_t array_length(struct array *a); +void *array_data(struct array *a); + + +#define DECLARE_ARRAY(name, type) \ +typedef struct name ## _a *name; \ +name new_ ## name(region r, size_t initialsize); \ +type *name ## _extend(name a, int by); \ +void name ## _reset(name a); \ +size_t name ## _length(name a); \ +type *name ## _data(name a); + +#define DEFINE_ARRAY(name, type) \ +name new_ ## name(region r, size_t initialsize) \ +{ \ + return (name)new_array(r, initialsize, sizeof(type), rctypeof(type)); \ +} \ +type *name ## _extend(name a, int by) \ +{ \ + return array_extend((struct array *)a, by); \ +} \ +void name ## _reset(name a) \ +{ \ + return array_reset((struct array *)a); \ +} \ +size_t name ## _length(name a) \ +{ \ + return array_length((struct array *)a); \ +} \ +type *name ## _data(name a) \ +{ \ + return array_data((struct array *)a); \ +} + +#endif diff --git a/libbanshee/engine/banshee.c b/libbanshee/engine/banshee.c new file mode 100644 index 00000000000..54b975afd9c --- /dev/null +++ b/libbanshee/engine/banshee.c @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <stdio.h> +#include <regions.h> +#include "banshee.h" +#include "setif-sort.h" +#include "setst-sort.h" +#include "flowrow-sort.h" +#include "setif-var.h" + +DEFINE_LIST(gen_e_list,gen_e) + +void engine_init(void) +{ + region_init(); + stamp_init(); +} + +void engine_reset(void) deletes +{ + stamp_reset(); +} + +/* TODO */ +void engine_update(void) +{ +} + +void engine_stats(FILE *f) +{ + setif_print_stats(f); + setst_print_stats(f); + flowrow_print_stats(f); +} + +void print_constraint_graphs(FILE *f) +{ + setif_print_constraint_graph(f); +} diff --git a/libbanshee/engine/banshee.h b/libbanshee/engine/banshee.h new file mode 100644 index 00000000000..f9b647d02a7 --- /dev/null +++ b/libbanshee/engine/banshee.h @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef BANSHEE_H +#define BANSHEE_H + +#include <stdio.h> +#include "linkage.h" +#include "stamp.h" +#include "list.h" +#include "util.h" +#include "dot.h" + +#define ALIAS_TYPE -2 +#define VAR_TYPE -1 +#define ZERO_TYPE 0 +#define ONE_TYPE 1 +#define UNION_TYPE 2 +#define INTER_TYPE 3 +#define CONSTANT_TYPE 4 + +EXTERN_C_BEGIN + + +#ifdef NONSPEC + +typedef enum sort_kind +{ + flowrow_sort, + setif_sort, + setst_sort, + flowterm_sort, + term_sort +} sort_kind; + +typedef struct gen_e +{ + sort_kind sort; +} *gen_e; +#else +typedef void *gen_e; +#endif + +DECLARE_LIST(gen_e_list,gen_e) + + typedef void (*gen_e_pr_fn_ptr) (FILE *, gen_e); + +/* + Function pointers that are common to all sorts +*/ + +/* inclusion */ +typedef void (*incl_fn_ptr) (gen_e, gen_e) deletes; + +/* match constructed terms */ +typedef void (*con_match_fn_ptr) (gen_e, gen_e) deletes; + +/* make fresh variables */ +typedef gen_e (*fresh_fn_ptr) (const char *); +typedef gen_e (*fresh_small_fn_ptr) (const char *); +typedef gen_e (*fresh_large_fn_ptr) (const char *); + +/* get a stamp */ +typedef stamp (*get_stamp_fn_ptr) (gen_e); + +/* extract a term from a proj pat */ +typedef gen_e (*get_proj_fn_ptr) (gen_e_list); + +void engine_init(void); +void engine_reset(void) deletes; +void engine_update(void); +void engine_stats(FILE *f); + +void print_constraint_graphs(FILE *f); + +EXTERN_C_END + +#endif /* BANSHEE_H */ diff --git a/libbanshee/engine/bool.h b/libbanshee/engine/bool.h new file mode 100644 index 00000000000..87638fc2dc6 --- /dev/null +++ b/libbanshee/engine/bool.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef BOOL_H +#define BOOL_H +#ifndef GCC_SYSTEM_H +#include "config.h" +/* 1 if we have _Bool. */ +#ifndef HAVE__BOOL +# define HAVE__BOOL \ + ((GCC_VERSION >= 3000) || (__STDC_VERSION__ >= 199901L)) +#endif +/* Provide some sort of boolean type. We use stdbool.h if it's + available. This must be after all inclusion of system headers, + as some of them will mess us up. */ +#undef bool +#undef true +#undef false +#undef TRUE +#undef FALSE + +#ifdef HAVE_STDBOOL_H +# include <stdbool.h> +#else +# if !HAVE__BOOL +typedef char _Bool; +# endif +# define bool _Bool +# define true 1 +# define false 0 +#endif + +#define TRUE true +#define FALSE false +#endif +#endif diff --git a/libbanshee/engine/bounds.c b/libbanshee/engine/bounds.c new file mode 100644 index 00000000000..9b14b75b4d9 --- /dev/null +++ b/libbanshee/engine/bounds.c @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <stdlib.h> +#include <assert.h> +#include "bounds.h" + +struct bounds +{ + hash_set set; + gen_e_list elems; +}; + +bounds bounds_create(region r) +{ + bounds result; + + result = ralloc(r, struct bounds); + result->set = hs_create(r); + result->elems = new_gen_e_list(r); + + return result; +} + +gen_e_list bounds_exprs(bounds b) +{ + return b->elems; +} + +bool bounds_add(bounds b, gen_e e, stamp s) +{ + if (hs_member(b->set, s)) + return TRUE; + + else + { + gen_e_list_cons(e,b->elems); + return FALSE; + } +} + +bool bounds_empty(bounds b) +{ + return (gen_e_list_empty(b->elems)); +} + +bool bounds_query(bounds b, stamp x) +{ + return (hs_query(b->set, x)); +} + +void bounds_set(bounds b,gen_e_list l) +{ + b->elems = l; +} + +void bounds_delete(bounds b) +{ + hs_delete(b->set); +} + + diff --git a/libbanshee/engine/bounds.h b/libbanshee/engine/bounds.h new file mode 100644 index 00000000000..8b52247b362 --- /dev/null +++ b/libbanshee/engine/bounds.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef BOUNDS_H +#define BOUNDS_H + +#include "linkage.h" +#include "banshee.h" +#include "stamp.h" +#include "hashset.h" + +EXTERN_C_BEGIN + +typedef struct bounds *bounds; + +bounds bounds_create(region r); + +gen_e_list bounds_exprs(bounds); + +/* returns true if the bound was already present */ +bool bounds_add(bounds,gen_e,stamp); +bool bounds_query(bounds,stamp); +bool bounds_empty(bounds); +void bounds_delete(bounds); +void bounds_set(bounds,gen_e_list); + +EXTERN_C_END + +#endif /* BOUNDS_H */ diff --git a/libbanshee/engine/buffer.c b/libbanshee/engine/buffer.c new file mode 100644 index 00000000000..68f062d365c --- /dev/null +++ b/libbanshee/engine/buffer.c @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <assert.h> +#include <stdio.h> +#include <string.h> +#include "buffer.h" + +/* Invariant: buffer always null-terminated */ +struct growbuf +{ + region r; + unsigned int maxsize, cursize; + char *buffer; +}; + +/* Make a new buffer with initial size */ +growbuf growbuf_new(region r, int size) +{ + growbuf b = ralloc(r, struct growbuf); + + assert(size > 0); + b->r = r; + b->maxsize = size; /* Force some growth! */ + b->cursize = 1; + b->buffer = rstralloc(r, size); + b->buffer[0] = '\0'; + return b; +} + +/* Empty a buffer */ +void growbuf_reset(growbuf b) +{ + assert(b->maxsize > 0); + b->cursize = 1; + b->buffer[0] = '\0'; +} + +/* Print to a buffer */ +int gprintf(growbuf b, const char *fmt, ...) +{ + va_list args; + va_start(args, fmt); + return gvprintf(b, fmt, args); +} + +/* Print to a buffer */ +int gvprintf(growbuf b, const char *fmt, va_list args) +{ + int nchars; + + if (!fmt) /* Bug (?)/feature of vnsprintf -- printing \0 returns -1, + goes into infinite loop. */ + return 0; + while (1) + { + char *bufStart; + int sizeLeft; + + bufStart = b->buffer + b->cursize - 1; /* chop trailing \0 */ + sizeLeft = b->maxsize - b->cursize + 1; /* +1 size we're chooping + the trailing \0 */ + assert(*bufStart == '\0'); + nchars = vsnprintf(bufStart, sizeLeft, fmt, args); + if (nchars > -1 && nchars < sizeLeft) + { + b->cursize += nchars; /* nchars doesn't include \0, + but we overwrote our \0 */ + break; + } + else + { + /* How much room do we need? In the new glibc, nchars + tells us how much (not including the trailing null). + So we need the current size, -1 since we'll remove the null, + plus the new size, plus 1 for the new null. */ + int newSize = (nchars > -1) ? b->cursize - 1 + nchars + 1 + : b->maxsize * 2; + char *newBuf; + + /* fprintf(stderr, "Reallocating buffer, newSize=%d\n", newSize); */ + newBuf = rstralloc(b->r, newSize); + memcpy(newBuf, b->buffer, b->cursize); + newBuf[b->cursize-1] = '\0'; /* vsnprintf has printed something! */ + b->buffer = newBuf; + b->maxsize = newSize; + /* b->cursize unchanged */ + } + } + return nchars; +} + +/* Get the contents of a buffer */ +char *growbuf_contents(growbuf b) +{ + return b->buffer; +} + +bool growbuf_empty(growbuf b) +{ + return b->cursize == 1; /* Buffer always null terminated */ +} diff --git a/libbanshee/engine/buffer.h b/libbanshee/engine/buffer.h new file mode 100644 index 00000000000..c270ade32c3 --- /dev/null +++ b/libbanshee/engine/buffer.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef BUFFER_H +#define BUFFER_H + +/* + * Growable buffers + * + * Always null terminated. + */ + +#include <stdarg.h> +#include "bool.h" +#include "regions.h" +#include "linkage.h" + +EXTERN_C_BEGIN + +typedef struct growbuf *growbuf; + +growbuf growbuf_new(region, int); /* Make a new buffer with initial size */ +void growbuf_reset(growbuf); /* Empty a buffer */ +int gprintf(growbuf, const char *, ...); /* Print to a buffer */ +int gvprintf(growbuf, const char *, va_list); /* Print to a buffer */ +char *growbuf_contents(growbuf); /* Get the contents of a buffer */ +bool growbuf_empty(growbuf); /* Return true iff buffer is empty */ + +EXTERN_C_END + +#endif diff --git a/libbanshee/engine/compiler.h b/libbanshee/engine/compiler.h new file mode 100644 index 00000000000..6c90a63d26c --- /dev/null +++ b/libbanshee/engine/compiler.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef COMPILER_H +#define COMPILER_H + +# if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +# define HAVE_C99 +# endif + +# if 0 +/*(defined(__GNUC__) || defined(HAVE_C99)) && !defined (IN_GCC) */ +# define HAVE_VARIADIC_MACROS +# endif + + +# if !defined(__GNUC__) && !defined(__attribute__) +# define __attribute__(attributes) +# endif + +#endif /* !COMPILER_H */ diff --git a/libbanshee/engine/dot.c b/libbanshee/engine/dot.c new file mode 100644 index 00000000000..73c772fee53 --- /dev/null +++ b/libbanshee/engine/dot.c @@ -0,0 +1,362 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <assert.h> +#include <regions.h> +#include "dot.h" +#include "hash.h" + +static FILE *of; +static hash_table node_hash_table; +static region dot_region; +static int node_count; +static const char *edge_op; + +static void print_n_attrs(node_attr *attrs, int n) +{ + int i; + fputc('[',of); + + for (i = 0; i < n; i++) + { + const char *name; + switch (attrs[i].name) + { + case n_color: + name = "color"; + break; + case n_fontcolor: + name = "fontcolor"; + break; + case n_fontname: + name = "fontname"; + break; + case n_fontsize: + name = "fontsize"; + break; + case n_height: + name = "height"; + break; + case n_width: + name = "width"; + break; + case n_label: + name = "label"; + break; + case n_layer: + name = "layer"; + break; + case n_shape: + name = "shape"; + break; + case n_shapefile: + name = "shapefile"; + break; + case n_style: + name = "style"; + break; + default: + name = ""; + assert(0); + break; + } + if (i > 0) + fputc(',',of); + fprintf(of,"%s = %s",name,attrs[i].value); + } + + fputc(']',of); +} + +static void print_e_attrs(edge_attr *attrs, int n) +{ + int i; + fputc('[',of); + for (i = 0; i < n; i++) + { + const char *name; + switch(attrs[i].name) + { + case e_color: + name = "color"; + break; + case e_decorate: + name = "decorate"; + break; + case e_dir: + name = "dir"; + break; + case e_fontcolor: + name = "fontcolor"; + break; + case e_fontname: + name = "fontname"; + break; + case e_fontsize: + name = "fontsize"; + break; + case e_id: + name = "id"; + break; + case e_label: + name = "label"; + break; + case e_layer: + name = "layer"; + break; + case e_minlen: + name = "minlen"; + break; + case e_style: + name = "style"; + break; + case e_weight: + name = "weight"; + break; + default : + name = ""; + assert(0); + break; + } + if (i > 0) + fputc(',',of); + fprintf(of,"%s = %s",name,attrs[i].value); + } + fputc(']',of); +} + +static void print_g_attrs(graph_attr *attrs, int n) +{ + int i; + fputc('[',of); + + for (i = 0; i < n; i++) + { + const char *name; + switch (attrs[i].name) + { + case g_center: + name = "center"; + break; + case g_clusterrank: + name = "clusterrank"; + break; + case g_color: + name = "color"; + break; + case g_concentrate: + name = "concentrate"; + break; + case g_fontcolor: + name = "fontcolor"; + break; + case g_fontname: + name = "fontname"; + break; + case g_fontsize: + name = "fontsize"; + break; + case g_label: + name = "label"; + break; + case g_layerseq: + name = "layerseq"; + break; + case g_margin: + name = "margin"; + break; + case g_mclimit: + name = "mclimit"; + break; + case g_nodesep: + name = "nodesep"; + break; + case g_nslimit: + name = "nslimit"; + break; + case g_ordering: + name = "ordering"; + break; + case g_orientation: + name = "orientation"; + break; + case g_page: + name = "page"; + break; + case g_rank: + name = "rank"; + break; + case g_rankdir: + name = "rankdir"; + break; + case g_ranksep: + name = "ranksep"; + break; + case g_ratio: + name = "ratio"; + break; + case g_size: + name = "size"; + break; + default : + name = ""; + assert(0); + break; + } + if (i > 0) + fputc(',',of); + fprintf(of,"%s = %s",name,attrs[i].value); + } + fputc(']',of); +} + + +void dot_start(FILE *to,const char *name,bool is_directed,bool is_strict) +{ + const char *graph_type,*strict; + + + node_count = 0; + dot_region = newregion(); + node_hash_table = make_string_hash_table(dot_region,8,TRUE); + of = to; + + if (is_directed) + { + edge_op = "->"; + graph_type = "digraph"; + } + else + { + edge_op = "--"; + graph_type = "graph"; + } + + if (is_strict) + strict = "strict"; + else + strict = ""; + + fprintf(of,"%s %s %s{\n",strict,graph_type,name); + +} + +void dot_global_graph_style(graph_attr *attrs, int n) +{ + fputs("graph ",of); + print_g_attrs(attrs,n); + fputc(';',of); + fputc('\n',of); +} + +void dot_global_edge_style(edge_attr *attrs, int n) +{ + fputs("edge ",of); + print_e_attrs(attrs,n); + fputc(';',of); + fputc('\n',of); +} + +void dot_global_node_style(node_attr *attrs, int n) +{ + fputs("node ",of); + print_n_attrs(attrs,n); + fputc(';',of); + fputc('\n',of); +} + +/* by default, set the node's name to label */ +static void declare_node(dot_node n, char *label) +{ + int i; + char mangled[512]; + + if (label[0] == '\"') + mangled[0] = 's'; + else + mangled[0] = label[0]; + + for (i = 1; label[i] && i < 512 ;i++) + { + if (label[i] == '\"') + mangled[i] = '_'; + else mangled[i] = label[i]; + } + mangled[i] = '\0'; + + fprintf(of,"nd_%d [label=\"%s\"]\n",n,mangled); +} + +dot_node dot_get_node(char *label) deletes +{ + dot_node result; + if (!hash_table_lookup(node_hash_table,(hash_key)label,(hash_data *)(char *)&result)) + { + dot_node newnode = node_count++; + + declare_node(newnode,label); + hash_table_insert(node_hash_table, + (hash_key)rstrdup(dot_region,label), + (hash_data)newnode); + + return newnode; + } + else + return result; + +} + +void dot_node_style(dot_node node,node_attr *attrs, int n) +{ + fprintf(of,"nd_%d ",node); + print_n_attrs(attrs,n); + fputc(';',of); + fputc('\n',of); +} + +void dot_plain_edge(dot_node from, dot_node to) +{ + fprintf(of,"nd_%d %s nd_%d;\n",from,edge_op,to); +} + +void dot_styled_edge(dot_node from, dot_node to, edge_attr *attrs, int n) +{ + fprintf(of,"nd_%d %s nd_%d ",from,edge_op,to); + print_e_attrs(attrs,n); + fputc(';',of); + fputc('\n',of); +} + +void dot_end(void) deletes +{ + fputc('}',of); + hash_table_delete(node_hash_table); + deleteregion_ptr(&dot_region); +} diff --git a/libbanshee/engine/dot.h b/libbanshee/engine/dot.h new file mode 100644 index 00000000000..7e221e68145 --- /dev/null +++ b/libbanshee/engine/dot.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef DOT_H +#define DOT_H + +#include <stdio.h> +#include "linkage.h" +#include "bool.h" + +EXTERN_C_BEGIN + +typedef int dot_node; + +typedef struct +{ + enum n_attrs + { + n_color, + n_fontcolor, + n_fontname, + n_fontsize, + n_height, + n_width, + n_label, + n_layer, + n_shape, + n_shapefile, + n_style + } name; + const char *value; +} node_attr; + +typedef struct +{ + enum e_attrs + { + e_color, + e_decorate, + e_dir, + e_fontcolor, + e_fontname, + e_fontsize, + e_id, + e_label, + e_layer, + e_minlen, + e_style, + e_weight + } name; + const char *value; +} edge_attr; + +typedef struct +{ + enum g_attrs + { + g_center, + g_clusterrank, + g_color, + g_concentrate, + g_fontcolor, + g_fontname, + g_fontsize, + g_label, + g_layerseq, + g_margin, + g_mclimit, + g_nodesep, + g_nslimit, + g_ordering, + g_orientation, + g_page, + g_rank, + g_rankdir, + g_ranksep, + g_ratio, + g_size + } name; + const char *value; +} graph_attr; + +void dot_start(FILE *to,const char *name,bool directed,bool strict); + +void dot_global_graph_style(graph_attr *attrs,int n); +void dot_global_edge_style(edge_attr *attrs,int n); +void dot_global_node_style(node_attr *attrs,int n); + +dot_node dot_get_node(char *label) deletes; +void dot_node_style(dot_node node,node_attr *attrs,int n); + +void dot_plain_edge(dot_node from, dot_node to); +void dot_styled_edge(dot_node from, dot_node to, edge_attr *attrs,int n); + +void dot_end(void) deletes; + +EXTERN_C_END + +#endif /* DOT_H */ diff --git a/libbanshee/engine/flow-var.c b/libbanshee/engine/flow-var.c new file mode 100644 index 00000000000..4c60aa4eb47 --- /dev/null +++ b/libbanshee/engine/flow-var.c @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <stdio.h> +#include <assert.h> +#include "banshee.h" +#include "flow-var.h" +#include "ufind.h" +#include "bounds.h" + +DECLARE_UFIND(contour_elt,contour) + +struct flow_var /* extends gen_e */ +{ +#ifdef NONSPEC + sort_kind sort; +#endif + int type; /* alias or var */ + stamp st; + gen_e alias; + bounds sameregion ubs; + bounds sameregion lbs; + contour_elt elt; + const char *name; +}; + +DEFINE_UFIND(contour_elt,contour) +DEFINE_LIST(flow_var_list, flow_var) + +#define get_contour(x) (contour_elt_get_info((x)->elt)) + +static flow_var make_var(region r, const char *name, stamp st) +{ + flow_var result = ralloc(r,struct flow_var); + + result->type = VAR_TYPE; + result->st = st; + result->alias = NULL; + result->ubs = bounds_create(r); + result->lbs = bounds_create(r); + result->elt = new_contour_elt(r,NULL); + result->name = name; + +#ifdef NONSPEC + result->sort = flow_sort; +#endif + + return result; +} + +flow_var fv_fresh(region r, const char *name) +{ + return make_var(r,name,stamp_fresh()); +} + +flow_var fv_fresh_large(region r, const char *name) +{ + return make_var(r,name,stamp_fresh_large()); +} + +flow_var fv_fresh_small(region r, const char *name) +{ + return make_var(r,name,stamp_fresh_small()); +} + +const char * fv_get_name(flow_var v) +{ + return v->name; +} + +gen_e_list fv_get_lbs(flow_var v) +{ + return bounds_exprs(v->lbs); +} + +gen_e_list fv_get_ubs(flow_var v) +{ + return bounds_exprs(v->ubs); +} + +bool fv_add_ub(flow_var v, gen_e e, stamp st) +{ + return bounds_add(v->ubs,e,st); +} + +bool fv_add_lb(flow_var v, gen_e e, stamp st) +{ + return bounds_add(v->lbs,e,st); +} + +bool fv_is_ub(flow_var v, stamp st) +{ + bool self_edge = v->st == st, + in_bounds = bounds_query(v->ubs,st); + + return (self_edge || in_bounds); +} + +bool fv_is_lb(flow_var v, stamp st) +{ + bool self_edge = v->st == st, + in_bounds = bounds_query(v->lbs,st); + + return (self_edge || in_bounds); +} + +void fv_set_alias(flow_var v, gen_e e) +{ + assert(v->type == VAR_TYPE); + + v->type = ALIAS_TYPE; + v->alias = e; +} + +gen_e fv_get_alias(flow_var v) +{ + return v->alias; +} + +bool fv_has_contour(flow_var v) +{ + return (get_contour(v) != NULL); +} + +void fv_set_contour(flow_var v, contour c) +{ + contour_elt_update(v->elt,c); +} + +static contour combine_contour(contour c1, contour c2) +{ + if (c1 == NULL) + return c2; + else if (c2 == NULL) + return c1; + + else + { + fail("Attempt to unify two distinct contours\n"); + return NULL; + } + +} +void fv_unify_contour(flow_var v1, flow_var v2) +{ + contour_elt_unify(combine_contour,v1->elt,v2->elt); +} + + +gen_e fv_instantiate_contour(flow_var v) deletes +{ + contour c = get_contour(v); + return c->instantiate(c->fresh,c->get_stamp,c->shape); +} diff --git a/libbanshee/engine/flow-var.h b/libbanshee/engine/flow-var.h new file mode 100644 index 00000000000..2d77007824a --- /dev/null +++ b/libbanshee/engine/flow-var.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef FLOW_VAR_H +#define FLOW_VAR_H + +#include "linkage.h" +#include "banshee.h" +#include "jcollection.h" + +EXTERN_C_BEGIN + +typedef struct flow_var *flow_var; + +typedef gen_e (*contour_inst_fn_ptr) (fresh_fn_ptr,get_stamp_fn_ptr,gen_e) deletes; + +struct contour +{ + gen_e shape; + fresh_fn_ptr fresh; + get_stamp_fn_ptr get_stamp; + contour_inst_fn_ptr instantiate; +}; + +typedef struct contour *contour; + +DECLARE_LIST(flow_var_list, flow_var) + +flow_var fv_fresh(region r, const char *name); +flow_var fv_fresh_large(region r, const char *name); +flow_var fv_fresh_small(region r, const char *name); +const char * fv_get_name(flow_var v); +gen_e_list fv_get_lbs(flow_var v); +gen_e_list fv_get_ubs(flow_var v); +bool fv_add_ub(flow_var v, gen_e e, stamp st); +bool fv_add_lb(flow_var v, gen_e e, stamp st); +bool fv_is_ub(flow_var v, stamp st); +bool fv_is_lb(flow_var v, stamp st); + +void fv_set_alias(flow_var v, gen_e e); +gen_e fv_get_alias(flow_var v); +void fv_set_contour(flow_var v, contour c); +bool fv_has_contour(flow_var v); +void fv_unify_contour(flow_var v1, flow_var v2); +gen_e fv_instantiate_contour(flow_var v) deletes; + +EXTERN_C_END + +#endif /* FLOW_VAR_H */ diff --git a/libbanshee/engine/flowrow-sort.c b/libbanshee/engine/flowrow-sort.c new file mode 100644 index 00000000000..baee348ffd3 --- /dev/null +++ b/libbanshee/engine/flowrow-sort.c @@ -0,0 +1,1107 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <regions.h> +#include <assert.h> +#include <stdio.h> +#include <string.h> +#include <ansidecl.h> +#include "flowrow-sort.h" +#include "termhash.h" + +#include "setif-sort.h" + +#define ABS_TYPE 2 +#define WILD_TYPE 3 +#define ROW_TYPE 4 + +/* generic flow row */ +struct flowrow_gen +{ +#ifdef NONSPEC + sort_kind sort; +#endif + int type; + stamp st; +#ifdef NONSPEC + sort_kind base_sort; +#endif +}; + +typedef struct flowrow_gen *flowrow_gen; + +struct flowrow +{ +#ifdef NONSPEC + sort_kind sort; +#endif + int type; + stamp st; +#ifdef NONSPEC + sort_kind base_sort; +#endif + flowrow_map fields; + gen_e rest; +}; + +typedef struct flowrow *flowrow; + +struct field_split +{ + gen_e_list matched1; + gen_e_list matched2; + flowrow_map nomatch1; + flowrow_map nomatch2; +}; + +region flowrow_region; +term_hash flowrow_hash; +struct flowrow_stats flowrow_stats; +static void fields_print(FILE *f,flowrow_map m,field_print_fn_ptr field_print) deletes; + +stamp flowrow_get_stamp(gen_e e) +{ + if ( ((flowrow_gen)e)->type == ALIAS_TYPE) + return ((flowrow_gen)fv_get_alias( (flow_var)e ))->st; + else + return ((flowrow_gen)e)->st; + +} + +static flowrow_map flowrow_get_fields(gen_e e) +{ + assert (flowrow_is_row(e)); + + return ((flowrow)e)->fields; +} + +static gen_e flowrow_get_rest(gen_e e) +{ + assert(flowrow_is_row(e)); + + return ((flowrow)e)->rest; +} + + +static int field_compare(const flowrow_field f1,const flowrow_field f2) + +{ + int compare = strcmp(f1->label,f2->label); + return compare; +} + + +static int field_compare_ne(const flowrow_field f1,const flowrow_field f2) + +{ + int compare = strcmp(f1->label,f2->label); + + if (! compare) /* rows should never have two fields with the same labels */ + { + failure("Multiple fields in this row share the same label\n"); + } + return compare; +} + +static struct field_split split_fields(region r, flowrow_map fields1, + flowrow_map fields2) +{ + struct field_split split; + flowrow_map_scanner scan1, scan2; + flowrow_field field1,field2; + bool consumed1 = TRUE,consumed2 = TRUE, + fields1_remain = TRUE, fields2_remain = TRUE;; + + split.matched1 = new_gen_e_list(r); + split.matched2 = new_gen_e_list(r); + split.nomatch1 = new_flowrow_map(r); + split.nomatch2 = new_flowrow_map(r); + + flowrow_map_scan(fields1,&scan1); + flowrow_map_scan(fields2,&scan2); + + while (TRUE) + { + if (consumed1) + fields1_remain = flowrow_map_next(&scan1,&field1); + if (consumed2) + fields2_remain = flowrow_map_next(&scan2,&field2); + + if (fields1_remain && fields2_remain) + { + int compare_fields = field_compare(field1,field2); + + if (compare_fields < 0) + { + flowrow_map_cons(field1,split.nomatch1); + consumed1 = TRUE; + consumed2 = FALSE; + } + else if (compare_fields > 0) + { + flowrow_map_cons(field2,split.nomatch2); + consumed2 = TRUE; + consumed1 = FALSE; + } + else /* two fields are equal */ + { + gen_e_list_cons(field1->expr,split.matched1); + gen_e_list_cons(field2->expr,split.matched2); + consumed1 = TRUE; + consumed2 = TRUE; + continue; + } + } + else if (fields1_remain) + { + /* flowrow_map_append(split.nomatch1,flowrow_map_copy(r,fields1)); */ + flowrow_map_cons(field1,split.nomatch1); + + while (flowrow_map_next(&scan1,&field1)) + { + flowrow_map_cons(field1,split.nomatch1); + } + + break; + } + else if (fields2_remain) + { + /* flowrow_map_append(split.nomatch2,flowrow_map_copy(r,fields2)); */ + flowrow_map_cons(field2,split.nomatch2); + while (flowrow_map_next(&scan2,&field2)) + { + flowrow_map_cons(field2,split.nomatch2); + } + break; + } + else /* no remaining fields, so */ break; + } + + return split; +} + +static bool flowrow_is_normalized(gen_e r) +{ + if ( flowrow_is_row(r) ) + { + gen_e rest = flowrow_get_rest(r); + + if ( flowrow_is_row(rest) || flowrow_is_alias(rest) ) + return FALSE; + } + else if ( flowrow_is_alias(r) ) + return FALSE; + + return TRUE; +} + +static gen_e normalize(get_stamp_fn_ptr get_stamp, + flowrow_map m,gen_e r) deletes +{ + if (flowrow_is_row(r)) + { + flowrow_map_append(m, + flowrow_map_copy(flowrow_region, + flowrow_get_fields(r))); + return normalize(get_stamp,m,flowrow_get_rest(r)); + } + else if (flowrow_is_alias(r)) + { + assert (! flowrow_is_alias(fv_get_alias((flow_var)r)) ); + return normalize(get_stamp, m,fv_get_alias((flow_var)r)); + } + else + return flowrow_row(get_stamp,m,r); +} + +static gen_e normalize_row(get_stamp_fn_ptr get_stamp, gen_e r) deletes +{ + if (flowrow_is_normalized(r)) + return r; + else /* normalize the row */ + return normalize(get_stamp,new_flowrow_map(flowrow_region),r); +} + +static bool eq(gen_e e1, gen_e e2) +{ + return ( flowrow_get_stamp(e1) == flowrow_get_stamp(e2) ); +} + + +/* + A row constraint row1 <= row2 is l-inductive iff row2 is a var and for all + X = tlv(row1), o(row2) > o(X). + + tlv(row) = {X} if row is a var X, {} otherwise +*/ +static bool l_inductive(gen_e e1, gen_e e2) +{ + if (flowrow_is_var(e2)) + { + if (flowrow_is_var(e1)) + return flowrow_get_stamp(e2) > flowrow_get_stamp(e1); + else return TRUE; + } + return FALSE; +} + +/* + A row constraint row1 <= row2 is r-inductive iff row1 is a var and for all + X = tlv(row2), o(row1) > o(X) +*/ +static bool r_inductive(gen_e e1, gen_e e2) +{ + if (flowrow_is_var(e1)) + { + if (flowrow_is_var(e2)) + return flowrow_get_stamp(e1) > flowrow_get_stamp(e2); + else return TRUE; + } + return FALSE; +} + +static inline bool flowrow_minimal(flowrow r) +{ + return flowrow_is_zero(r->rest); +} + +static inline bool flowrow_maximal(flowrow r) +{ + return flowrow_is_one(r->rest); +} + +static inline bool flowrow_closed(flowrow r) +{ + return flowrow_is_abs(r->rest); +} + +static inline bool flowrow_wildcard(flowrow r) +{ + return flowrow_is_wild(r->rest); +} + +static inline bool flowrow_var(flowrow r) +{ + return flowrow_is_var(r->rest); +} + +static gen_e contour_instantiate(fresh_fn_ptr fresh, + get_stamp_fn_ptr get_stamp, + gen_e e) deletes +{ + if (flowrow_is_row(e)) + { + gen_e result; + flowrow_map_scanner scan; + flowrow_field f; + gen_e row = normalize_row(get_stamp,e); + + region scratch_rgn = newregion(); + + flowrow_map new_fields = new_flowrow_map(scratch_rgn); + + flowrow_map_scan(flowrow_get_fields(row),&scan); + + while (flowrow_map_next(&scan,&f)) + { + flowrow_field new_field = + ralloc(flowrow_region,struct flowrow_field); + new_field->label = f->label; + new_field->expr = fresh(NULL); + + flowrow_map_cons(new_field,new_fields); + } + + result = flowrow_row(get_stamp,new_fields,flowrow_fresh(NULL)); + + deleteregion(scratch_rgn); + + assert( flowrow_is_row(result) ); + + return result; + } + + else /* TODO */ + { + failure("Unmatched contour\n"); + return NULL; + } +} + +static contour get_contour(fresh_fn_ptr fresh,get_stamp_fn_ptr get_stamp, + gen_e zero_elem ATTRIBUTE_UNUSED,gen_e e) +{ + if (flowrow_is_row(e)) + { + contour result; + + result = ralloc(flowrow_region,struct contour); + result->shape = e; + result->fresh = fresh; + result->get_stamp = get_stamp; + result->instantiate = contour_instantiate; + + return result; + } + else /* TODO */ + { + failure("Unmatched contour\n"); + return NULL; + } +} + + +static void trans_lbs(fresh_fn_ptr fresh,get_stamp_fn_ptr get_stamp, + incl_fn_ptr field_incl, gen_e zero_elem, + flow_var v, gen_e e) deletes +{ + gen_e temp; + gen_e_list_scanner scan; + + gen_e_list_scan(fv_get_lbs(v),&scan); + while (gen_e_list_next(&scan,&temp)) + flowrow_inclusion(fresh,get_stamp,field_incl,zero_elem,temp,e); + +} + +static void trans_ubs(fresh_fn_ptr fresh,get_stamp_fn_ptr get_stamp, + incl_fn_ptr field_incl, gen_e zero_elem, + flow_var v, gen_e e) deletes +{ + gen_e temp; + gen_e_list_scanner scan; + + gen_e_list_scan(fv_get_ubs(v),&scan); + while (gen_e_list_next(&scan,&temp)) + flowrow_inclusion(fresh,get_stamp,field_incl,zero_elem,e,temp); +} + +static void update_lower_bound(fresh_fn_ptr fresh,get_stamp_fn_ptr get_stamp, + incl_fn_ptr field_incl, gen_e zero_elem, + flow_var v,gen_e e) deletes +{ + if (fv_has_contour(v)) /* _ <= v, and v has a contour */ + { + gen_e shape = fv_instantiate_contour(v); + + fv_set_alias(v,shape); + trans_ubs(fresh,get_stamp,field_incl,zero_elem,v,shape); + trans_lbs(fresh,get_stamp,field_incl,zero_elem,v,shape); + + flowrow_inclusion(fresh,get_stamp,field_incl,zero_elem,e,shape); + + } + + else if (flowrow_is_var(e)) + { + flow_var v_lb = (flow_var)e; + + if (fv_has_contour(v_lb)) /* v1 <= v2, v1 has a contour */ + { + gen_e shape = fv_instantiate_contour(v_lb); + + fv_set_alias(v_lb,shape); + trans_ubs(fresh,get_stamp,field_incl,zero_elem,v_lb,shape); + trans_lbs(fresh,get_stamp,field_incl,zero_elem,v_lb,shape); + + flowrow_inclusion(fresh,get_stamp,field_incl,zero_elem, + shape,(gen_e)v); + + } + + else /* we have v1 <= v2, no contours */ + { + bool redundant; + + fv_unify_contour(v,(flow_var)e); + redundant = fv_add_lb(v,e,flowrow_get_stamp(e)); + + if (! redundant) + trans_ubs(fresh,get_stamp,field_incl,zero_elem,v,e); + + } + } + else /* we have c(...) <= v, and v has no contour */ + { + gen_e shape = NULL; + fv_set_contour(v,get_contour(fresh,get_stamp,zero_elem,e)); + + shape = fv_instantiate_contour(v); + fv_set_alias(v,shape); + trans_ubs(fresh,get_stamp,field_incl,zero_elem,v,shape); + trans_lbs(fresh,get_stamp,field_incl,zero_elem,v,shape); + + flowrow_inclusion(fresh,get_stamp,field_incl,zero_elem,e,shape); + + } +} + +static void update_upper_bound(fresh_fn_ptr fresh,get_stamp_fn_ptr get_stamp, + incl_fn_ptr field_incl, gen_e zero_elem, + flow_var v,gen_e e) deletes +{ + if (fv_has_contour(v)) /* v isn't aliased, and we discovered a contour*/ + { + gen_e shape = fv_instantiate_contour(v); + + fv_set_alias(v,shape); + trans_ubs(fresh,get_stamp,field_incl,zero_elem,v,shape); + trans_lbs(fresh,get_stamp,field_incl,zero_elem,v,shape); + + flowrow_inclusion(fresh,get_stamp,field_incl,zero_elem,shape,e); + + } + + else if (flowrow_is_var(e)) + { + flow_var v2 = (flow_var)e; + + if (fv_has_contour(v2)) // v2 isn't aliased, and we discovered a contour + { + gen_e shape = fv_instantiate_contour(v2); + + fv_set_alias(v2,shape); + trans_ubs(fresh,get_stamp,field_incl,zero_elem,v2,shape); + trans_lbs(fresh,get_stamp,field_incl,zero_elem,v2,shape); + + + flowrow_inclusion(fresh,get_stamp,field_incl,zero_elem, + (gen_e)v,shape); + + } + + else /* we have v1 <= v2, no contours */ + { + bool redundant; + + fv_unify_contour(v,(flow_var)e); + redundant = fv_add_ub(v,e,flowrow_get_stamp(e)); + + if (! redundant) + trans_lbs(fresh,get_stamp,field_incl,zero_elem,v,e); + + } + } + else /* we have v <= c(...), and v has no contour */ + { + gen_e shape = NULL; + fv_set_contour(v,get_contour(fresh,get_stamp,zero_elem,e)); + + shape = fv_instantiate_contour(v); + + if (! flowrow_is_row(shape) ) + { + assert(0); + } + + fv_set_alias(v,shape); + trans_ubs(fresh,get_stamp,field_incl,zero_elem,v,shape); + trans_lbs(fresh,get_stamp,field_incl,zero_elem,v,shape); + + + flowrow_inclusion(fresh,get_stamp,field_incl,zero_elem,shape,e); + + } + +} + +// END + + +void flowrow_inclusion(fresh_fn_ptr fresh,get_stamp_fn_ptr get_stamp, + incl_fn_ptr field_incl, gen_e zero_elem, gen_e a, + gen_e b) deletes +{ + gen_e e1 = normalize_row(get_stamp, a), + e2 = normalize_row(get_stamp, b); + + if (eq(e1,e2)) + return; + else if (flowrow_is_zero(e1) || flowrow_is_wild(e1)) + return; + else if (flowrow_is_one(e2) || flowrow_is_wild(e2)) + return; + + else if ( l_inductive(e1,e2) ) + { + flow_var v2 = (flow_var)e2; + + flowrow_stats.rows_l_inductive++; + + update_lower_bound(fresh,get_stamp,field_incl,zero_elem,v2,e1); + return; + } + + else if ( r_inductive(e1,e2) ) + { + flow_var v1 = (flow_var)e1; + + flowrow_stats.rows_r_inductive++; + + update_upper_bound(fresh,get_stamp,field_incl,zero_elem,v1,e2); + return; + } + + else if ( flowrow_is_row(e1) && flowrow_is_row(e2)) + { + region scratch_rgn = newregion(); + + flowrow r1 = (flowrow)e1, + r2 = (flowrow)e2; + + struct field_split split = + split_fields(scratch_rgn,r1->fields,r2->fields); + + if ( gen_e_list_empty(split.matched1) ) + { + assert ( gen_e_list_empty(split.matched2) ); + + if (flowrow_wildcard(r1) || flowrow_minimal(r1)) + { + gen_e newrow = + flowrow_row(get_stamp,split.nomatch1,flowrow_get_rest(e1)); + + flowrow_inclusion(fresh,get_stamp,field_incl, zero_elem,newrow, + flowrow_get_rest(e2)); + } + else if (flowrow_maximal(r2) || flowrow_closed(r2)) + { + gen_e newrow = + flowrow_row(get_stamp,split.nomatch2,flowrow_get_rest(e2)); + + flowrow_inclusion(fresh, get_stamp,field_incl,zero_elem, + flowrow_get_rest(e1),newrow); + } + else + { + gen_e rest1 = flowrow_get_rest(e1), + rest2 = flowrow_get_rest(e2); + + //assert( flowrow_is_var(rest1) && flowrow_is_var(rest2)); + + if ( eq(rest1,rest2)) + failure("Recursive row resolution\n"); + else + { + gen_e fv = flowrow_fresh(NULL); + gen_e newrow1 = flowrow_row(get_stamp,split.nomatch1,fv); + gen_e newrow2 = flowrow_row(get_stamp,split.nomatch2,fv); + + flowrow_inclusion(fresh,get_stamp,field_incl, + zero_elem,rest1,newrow2); + flowrow_inclusion(fresh,get_stamp,field_incl, + zero_elem,newrow1,rest2); + } + + } + } + + else /* some fields matched */ + { + gen_e_list_scanner scan1, scan2; + gen_e f1,f2; + + assert( gen_e_list_length(split.matched1) + == gen_e_list_length(split.matched2) ); + + gen_e_list_scan(split.matched1,&scan1); + gen_e_list_scan(split.matched2,&scan2); + + while (gen_e_list_next(&scan1,&f1) && + gen_e_list_next(&scan2,&f2) ) + { + field_incl(f1,f2); + } + + if ( flowrow_wildcard(r1) && flowrow_wildcard(r2) ) + { + goto END; + } + else + { + flowrow_map fields1 = split.nomatch1; + flowrow_map fields2 = split.nomatch2; + + gen_e newrow1 = flowrow_row(get_stamp,fields1,r1->rest); + gen_e newrow2 = flowrow_row(get_stamp,fields2,r2->rest); + + flowrow_inclusion(fresh,get_stamp,field_incl,zero_elem, + newrow1, newrow2); + } + } + END: + deleteregion(scratch_rgn); + } + + else /* potentially a problem normalizing a row? */ + { + failure("Unmatched case in row inclusion\n"); + return; + } +} + +gen_e flowrow_row(get_stamp_fn_ptr get_stamp,flowrow_map f, gen_e rest) deletes +{ + flowrow_map fields = flowrow_map_copy(flowrow_region,f); + + if (flowrow_map_empty(fields)) + { + return rest; + } + else + { + flowrow_map_scanner scan; + flowrow_field temp; + gen_e result; + int i = 2, + length = flowrow_map_length(fields); + stamp st[2+2*length]; + + st[0] = ROW_TYPE; + if (rest) + st[1] = flowrow_get_stamp(rest); + else + assert(0); + + flowrow_map_sort(fields,field_compare_ne); + + flowrow_map_scan(fields,&scan); + while(flowrow_map_next(&scan,&temp)) + { + st[i++] = stamp_string(temp->label); + if (temp->expr) + st[i++] = get_stamp(temp->expr); + else + assert(0); + } + + if ( (result = term_hash_find(flowrow_hash,st,2 + 2*length)) == NULL) + { + flowrow r = ralloc(flowrow_region, struct flowrow); + r->type = ROW_TYPE; + r->st = stamp_fresh(); + r->fields = fields; + r->rest = rest; + +#ifdef NONSPEC + r->base_sort = row_map_head(fields)->expr->sort; + r->sort = flowrow_sort; +#endif + result = (gen_e) r; + term_hash_insert(flowrow_hash,result,st,2+2*length); + } + /* assert(flowrow_is_normalized(result)); */ + return result; + + } +} + +#ifndef NONSPEC +static struct flowrow_gen zero_row = {ZERO_TYPE,ZERO_TYPE}; +static struct flowrow_gen one_row = {ONE_TYPE,ONE_TYPE}; +static struct flowrow_gen abs_row = {ABS_TYPE, ABS_TYPE}; +static struct flowrow_gen wild_row = {WILD_TYPE, WILD_TYPE}; + +gen_e flowrow_zero(void) +{ + return (gen_e)&zero_row; +} + +gen_e flowrow_one(void) +{ + return (gen_e)&one_row; +} + +gen_e flowrow_abs(void) +{ + return (gen_e)&abs_row; +} + +gen_e flowrow_wild(void) +{ + return (gen_e)&wild_row; +} + +gen_e flowrow_fresh(const char *name) +{ + flowrow_stats.fresh++; + return (gen_e)fv_fresh(flowrow_region,name); +} + +gen_e flowrow_fresh_small(const char *name) +{ + flowrow_stats.fresh_small++; + return (gen_e)fv_fresh_small(flowrow_region,name); +} + +gen_e flowrow_fresh_large(const char *name) +{ + flowrow_stats.fresh_large++; + return (gen_e)fv_fresh_large(flowrow_region,name); +} + +#else +static struct flowrow_gen term_zero_row = {flowrow_sort,ZERO_TYPE,ZERO_TYPE,term_sort}; +static struct flowrow_gen term_one_row = {flowrow_sort,ONE_TYPE,ONE_TYPE,term_sort}; +static struct flowrow_gen term_abs_row = {flowrow_sort,ABS_TYPE, ABS_TYPE,term_sort}; +static struct flowrow_gen term_wild_row = {flowrow_sort,WILD_TYPE, WILD_TYPE,term_sort}; + + +static struct flowrow_gen setif_zero_row = {flowrow_sort,ZERO_TYPE,ZERO_TYPE,setif_sort}; +static struct flowrow_gen setif_one_row = {flowrow_sort,ONE_TYPE,ONE_TYPE,setif_sort}; +static struct flowrow_gen setif_abs_row = {flowrow_sort,ABS_TYPE, ABS_TYPE,setif_sort}; +static struct flowrow_gen setif_wild_row = {flowrow_sort,WILD_TYPE, WILD_TYPE,setif_sort}; + +static struct flowrow_gen setst_zero_row = {flowrow_sort,ZERO_TYPE,ZERO_TYPE,setst_sort}; +static struct flowrow_gen setst_one_row = {flowrow_sort,ONE_TYPE,ONE_TYPE,setst_sort}; +static struct flowrow_gen setst_abs_row = {flowrow_sort,ABS_TYPE, ABS_TYPE,setst_sort}; +static struct flowrow_gen setst_wild_row = {flowrow_sort,WILD_TYPE, WILD_TYPE,setst_sort}; + + +gen_e flowrow_zero(sort_kind base_sort) +{ + switch (base_sort) + { + case setif_sort: + return (gen_e)&setif_zero_row; + case setst_sort: + return (gen_e)&setst_zero_row; + case term_sort: + return (gen_e)&term_zero_row; + default: + { + failure("No matching base sort: flowrow_zero\n"); + return NULL; + } + } + + return NULL; +} + +gen_e flowrow_one(sort_kind base_sort) +{ + switch (base_sort) + { + case setif_sort: + return (gen_e)&setif_one_row; + case setst_sort: + return (gen_e)&setst_one_row; + case term_sort: + return (gen_e)&term_one_row; + default: + { + failure("No matching base sort: flowrow_one\n"); + return NULL; + } + } + + return NULL; +} + +gen_e flowrow_abs(sort_kind base_sort) +{ + switch (base_sort) + { + case setif_sort: + return (gen_e)&setif_abs_row; + case setst_sort: + return (gen_e)&setst_abs_row; + case term_sort: + return (gen_e)&term_abs_row; + default: + { + failure("No matching base sort: flowrow_abs\n"); + return NULL; + } + } + + return NULL; +} + +gen_e flowrow_wild(sort_kind base_sort) +{ + + switch (base_sort) + { + case setif_sort: + return (gen_e)&setif_wild_row; + case setst_sort: + return (gen_e)&setst_wild_row; + case term_sort: + return (gen_e)&term_wild_row; + default: + { + failure("No matching base sort: flowrow_wild\n"); + return NULL; + } + } + + return NULL; +} + +gen_e flowrow_fresh(const char *name,sort_kind base_sort) +{ + + switch (base_sort) + { + case setif_sort: + return + case setst_sort: + return (gen_e)&setst_one_row; + case term_sort: + return (gen_e)&term_one_row; + default: + { + failure("No matching base sort: flowrow_one\n"); + return NULL; + } + } + + return NULL; +} + +gen_e flowrow_fresh_small(sort_kind base_sort) +{ + + switch (base_sort) + { + case setif_sort: + return (gen_e)&setif_one_row; + case setst_sort: + return (gen_e)&setst_one_row; + case term_sort: + return (gen_e)&term_one_row; + default: + { + failure("No matching base sort: flowrow_one\n"); + return NULL; + } + } + + return NULL; +} + +gen_e flowrow_fresh_large(sort_kind base_sort) +{ + +} + +sort_kind flowrow_base_sort(gen_e e) +{ + +} +#endif /* NONSPEC */ + + +gen_e flowrow_extract_field(const char *name, gen_e e) +{ + + static bool field_eq(const flowrow_field f) + { + return (! strcmp(f->label,name)); + } + + if (flowrow_is_row(e)) + { + flowrow_map fields = flowrow_get_fields(e); + flowrow_field f = flowrow_map_find(fields,field_eq); + + if (f) + return f->expr; + } + return NULL; +} + +gen_e flowrow_extract_rest(gen_e e) +{ + if (flowrow_is_row(e)) + return flowrow_get_rest(e); + else + return NULL; +} + +flowrow_map flowrow_extract_fields(gen_e e) +{ + if (flowrow_is_row(e)) + return flowrow_map_copy(flowrow_region,flowrow_get_fields(e)); + else + return NULL; +} + + +bool flowrow_is_alias(gen_e e) +{ + return ((flowrow_gen)e)->type == ALIAS_TYPE; +} + +bool flowrow_is_zero(gen_e e) +{ + return ((flowrow_gen)e)->type == ZERO_TYPE; +} + +bool flowrow_is_one(gen_e e) +{ + return ((flowrow_gen)e)->type == ONE_TYPE; +} + +bool flowrow_is_abs(gen_e e) +{ + return ((flowrow_gen)e)->type == ABS_TYPE; +} + +bool flowrow_is_wild(gen_e e) +{ + return ((flowrow_gen)e)->type == WILD_TYPE; +} + +bool flowrow_is_var(gen_e e) +{ + return ((flowrow_gen)e)->type == VAR_TYPE; +} + +bool flowrow_is_row(gen_e e) +{ + return ((flowrow_gen)e)->type == ROW_TYPE; +} + +void flowrow_init(void) +{ + flowrow_region = newregion(); + flowrow_hash = make_term_hash(flowrow_region); +} + +static void flowrow_reset_stats(void) +{ + flowrow_stats.fresh = 0; + flowrow_stats.fresh_small = 0; + flowrow_stats.fresh_large = 0; + + flowrow_stats.rows_disjoint_wild = 0; + flowrow_stats.rows_equal = 0; + flowrow_stats.rows_zero_one_wild = 0; + flowrow_stats.rows_l_inductive = 0; + flowrow_stats.rows_r_inductive = 0; + flowrow_stats.rows_disjoint_r1_minimal = 0; + flowrow_stats.rows_disjoint_r1_var_r2_minimal = 0; + flowrow_stats.rows_disjoint_r1_var_r2_maximal = 0; + flowrow_stats.rows_disjoint_r1_var_r2_closed = 0; + flowrow_stats.rows_disjoint_r1_var_r2_var_lt = 0; + flowrow_stats.rows_disjoint_r1_var_r2_var_gt = 0; + flowrow_stats.rows_equal_domains = 0; + flowrow_stats.rows_nonempty_intersection = 0; + flowrow_stats.rows_fresh = 0; + flowrow_stats.rows_fresh_large = 0; +} + +void flowrow_reset(void) deletes +{ + term_hash_delete(flowrow_hash); + deleteregion_ptr(&flowrow_region); + + flowrow_reset_stats(); + + flowrow_region = newregion(); + flowrow_hash = make_term_hash(flowrow_region); + +} + +static void fields_print(FILE *f,flowrow_map m,field_print_fn_ptr field_print) deletes +{ + flowrow_map_scanner scan; + flowrow_field temp; + + flowrow_map_scan(m,&scan); + + if (flowrow_map_next(&scan,&temp)) + { + fprintf(f,"%s : ",temp->label); + + if (field_print) + field_print(f,temp->expr); + else + fprintf(f,"?"); + } + + while (flowrow_map_next(&scan,&temp)) + { + fprintf(f,",%s : ",temp->label); + + if (field_print) + field_print(f,temp->expr); + else + fprintf(f,"?"); + } +} + +void flowrow_print(FILE *f,get_stamp_fn_ptr get_stamp, + field_print_fn_ptr field_print,gen_e row) deletes +{ + gen_e e = normalize_row(get_stamp,row); + + switch ( ((flowrow_gen)e)->type) + { + case ZERO_TYPE: + fprintf(f, "0"); + break; + case ONE_TYPE: + fprintf(f, "1"); + break; + case ABS_TYPE: + fprintf(f, "abs"); + break; + case WILD_TYPE: + fprintf(f, "wild"); + break; + case VAR_TYPE: + fprintf(f, fv_get_name((flow_var)e)); + break; + case ROW_TYPE: + fprintf(f, "<"); + fields_print(f, flowrow_get_fields(e), field_print); + fprintf(f, "|"); + flowrow_print(f, get_stamp, field_print, flowrow_get_rest(e)); + fprintf(f, ">"); + break; + default: + assert(0); + break; + } +} + +void flowrow_print_stats(FILE *f) +{ + fprintf(f,"\n========== Flow Var Stats ==========\n"); + fprintf(f,"Fresh : %d\n",flowrow_stats.fresh); + fprintf(f,"Fresh Small : %d\n",flowrow_stats.fresh_small); + fprintf(f,"Fresh Large : %d\n",flowrow_stats.fresh_large); + fprintf(f,"=====================================\n"); +} + +DEFINE_LIST(flowrow_map,flowrow_field) diff --git a/libbanshee/engine/flowrow-sort.h b/libbanshee/engine/flowrow-sort.h new file mode 100644 index 00000000000..8d79b42c29d --- /dev/null +++ b/libbanshee/engine/flowrow-sort.h @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef FLOWROW_SORT_H +#define FLOWROW_SORT_H + + +#include "stdio.h" +#include "banshee.h" +#include "termhash.h" +#include "flow-var.h" + +struct flowrow_field +{ + char *label; + gen_e expr; +}; + +typedef struct flowrow_field *flowrow_field; + +DECLARE_LIST(flowrow_map,flowrow_field) + +extern region flowrow_region; + +void flowrow_inclusion(fresh_fn_ptr fresh, get_stamp_fn_ptr get_stamp, + incl_fn_ptr field_incl,gen_e zero_elem, gen_e e1, + gen_e e2) deletes; + +gen_e flowrow_row(get_stamp_fn_ptr get_stamp,flowrow_map fields, gen_e rest) deletes; + +gen_e flowrow_extract_field(const char *name, gen_e e); +gen_e flowrow_extract_rest(gen_e e); +flowrow_map flowrow_extract_fields(gen_e e); + +stamp flowrow_get_stamp(gen_e e); + +#ifndef NONSPEC +gen_e flowrow_zero(void); +gen_e flowrow_one(void); +gen_e flowrow_abs(void); +gen_e flowrow_wild(void); +gen_e flowrow_fresh(const char *name); +gen_e flowrow_fresh_small(const char *name); +gen_e flowrow_fresh_large(const char *name); +#else +sort_kind flowrow_base_sort(gen_e e); +gen_e flowrow_zero(sort_kind base_sort); +gen_e flowrow_one(sort_kind base_sort); +gen_e flowrow_abs(sort_kind base_sort); +gen_e flowrow_wild(sort_kind base_sort); +gen_e flowrow_fresh(sort_kind base_sort); +gen_e flowrow_fresh_small(sort_kind base_sort); +gen_e flowrow_fresh_large(sort_kind base_sort); +#endif + +bool flowrow_is_zero(gen_e e); +bool flowrow_is_one(gen_e e); +bool flowrow_is_abs(gen_e e); +bool flowrow_is_wild(gen_e e); +bool flowrow_is_var(gen_e e); +bool flowrow_is_row(gen_e e); +bool flowrow_is_alias(gen_e e); + + +void flowrow_init(void); +void flowrow_reset(void) deletes; + +typedef void (* field_print_fn_ptr) (FILE *f,gen_e e) deletes; + +void flowrow_print(FILE *f,get_stamp_fn_ptr get_stamp, + field_print_fn_ptr field_print,gen_e e) deletes; +void flowrow_print_stats(FILE *f); + +extern struct flowrow_stats flowrow_stats; + +struct flowrow_stats +{ + int fresh; + int fresh_small; + int fresh_large; + + int rows_disjoint_wild; + int rows_equal; + int rows_zero_one_wild; + int rows_l_inductive; + int rows_r_inductive; + int rows_disjoint_r1_minimal; + int rows_disjoint_r1_var_r2_minimal; + int rows_disjoint_r1_var_r2_maximal; + int rows_disjoint_r1_var_r2_closed; + int rows_disjoint_r1_var_r2_var_lt; + int rows_disjoint_r1_var_r2_var_gt; + int rows_equal_domains; + int rows_nonempty_intersection; + int rows_fresh; + int rows_fresh_large; +}; + +#endif /* FLOWROW_H */ + + + + + + + diff --git a/libbanshee/engine/hash.c b/libbanshee/engine/hash.c new file mode 100644 index 00000000000..bf315cee4a5 --- /dev/null +++ b/libbanshee/engine/hash.c @@ -0,0 +1,427 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <string.h> +#include "hash.h" +#include "util.h" + +struct bucket +{ + hash_key key; + hash_data data; + struct bucket *next; +}; + +#define scan_bucket(b, var) for (var = b; var; var = var->next) + +struct Hash_table +{ + region r; /* Region for this table */ + hash_fn hash; /* Function for hashing keys */ + keyeq_fn cmp; /* Function for comparing keys */ + + int size; /* Number of buckets */ + int elts; /* Number of elements */ + bool internal_rgn; /* TRUE if the ht uses an internal region */ + bucket *table; /* Array of (size) buckets */ +}; + +static void rehash(hash_table ht) deletes; + +/* Make a new hash table, with size buckets initially. The actual + table is allocated in a local region, which is discarded on rehashing. */ +hash_table make_hash_table(region r, int size, hash_fn hash, + keyeq_fn cmp, bool internal_rgn) +{ + hash_table result; + + assert(size > 0); + result = ralloc(r, struct Hash_table); + + if (internal_rgn) + result->r = newregion(); + else + result->r = r; + + result->internal_rgn = internal_rgn; + result->hash = hash; + result->cmp = cmp; + result->size = size; + result->elts = 0; + result->table = rarrayalloc(result->r, size, bucket); + + return result; +} + +/* Hash a string */ +static int string_hash(char *str) +{ + char *c; + int h; + + c = str; + h = 0; + if (!c) + return 0; + while (*c) + h = 33*h + 720 + *c++; /* SML/NJ's string hash function */ + return h; +} + +/* Return TRUE iff s1 == s2 */ +static bool string_eq(char *s1, char *s2) +{ + return !strcmp(s1, s2); +} + +/* Make a hash table for strings. */ +hash_table make_string_hash_table(region rhash, int size, bool internal_rgn) +{ + return make_hash_table(rhash, size, (hash_fn) string_hash, + (keyeq_fn) string_eq,internal_rgn); +} + +/* Zero out ht. Doesn't reclaim bucket space. */ +void hash_table_reset(hash_table ht) deletes +{ + int i; + + if (ht->internal_rgn) + { + deleteregion(ht->r); + ht->r = newregion(); + } + + ht->elts = 0; + for (i = 0; i < ht->size; i++) + ht->table[i] = NULL; +} + +void hash_table_delete(hash_table ht) deletes +{ + if (ht->internal_rgn) + deleteregion(ht->r); +} + + +/* Return the number of entries in ht */ +int hash_table_size(hash_table ht) +{ + return ht->elts; +} + +/* Return the bucket corresponding to k in ht */ +static inline bucket *find_bucket(hash_table ht, hash_key k) +{ + int hash; + + hash = ht->hash(k); + if (hash < 0) + hash = -1*hash; + return &ht->table[hash % ht->size]; +} + +/* Lookup k in ht. Returns corresponding data in *d, and function + result is TRUE if the k was in ht, false otherwise. */ +bool hash_table_lookup(hash_table ht, hash_key k, hash_data *d) +{ + bucket cur; + + cur = *find_bucket(ht, k); + while (cur) + { + if (ht->cmp(k, cur->key)) + { + if (d) + *d = cur->data; + return TRUE; + } + cur = cur->next; + } + return FALSE; +} + + +/* Add k:d to ht. If k was already in ht, replace old entry by k:d. + Rehash if necessary. Returns TRUE if k was not already in ht. */ +bool hash_table_insert(hash_table ht, hash_key k, hash_data d) deletes +{ + bucket *cur; + + if (ht->elts > ht->size*15) + rehash(ht); + cur = find_bucket(ht, k); + while (*cur) + { + if (ht->cmp(k, (*cur)->key)) + { + (*cur)->data = d; + return FALSE; /* Replace */ + } + cur = &(*cur)->next; + } + *cur = ralloc(ht->r, struct bucket); + (*cur)->key = k; + (*cur)->data = d; + (*cur)->next = NULL; + ht->elts++; + return TRUE; /* New key */ +} + +/* Remove mapping for k in ht. Returns TRUE if k was in ht. */ +bool hash_table_remove(hash_table ht, hash_key k) +{ + bucket *cur; + bucket *prev = NULL; + + cur = find_bucket(ht, k); + while (*cur) + { + if (ht->cmp(k, (*cur)->key)) + { + if (!*prev) + (*prev)->next = (*cur)->next; + else + *cur = NULL; + ht->elts--; + return TRUE; + } + prev = cur; + cur = &(*cur)->next; + } + return FALSE; +} + +/* Return a copy of ht */ +hash_table hash_table_copy(region r, hash_table ht) +{ + int i; + hash_table result; + bucket cur, newbucket, *prev; + + result = make_hash_table(r, ht->size, ht->hash, ht->cmp,ht->internal_rgn); + result->elts = ht->elts; + + for (i = 0; i < ht->size; i++) + { + prev = &result->table[i]; + scan_bucket(ht->table[i], cur) + { + newbucket = ralloc(result->r, struct bucket); + newbucket->key = cur->key; + newbucket->data = cur->data; + newbucket->next = NULL; + assert(!*prev); + *prev = newbucket; + prev = &newbucket->next; + } + } + return result; + /* + hash_table result; + hash_table_scanner hts; + hash_key k; + hash_data d; + + result = make_hash_table(r, ht->size, ht->hash, ht->cmp); + hash_table_scan(ht, &hts); + while (hash_table_next(&hts, &k, &d)) + insist(hash_table_insert(result, k, d)); + + return result; + */ +} + +/* Increase size of ht (double it) and reinsert all the elements */ +static void rehash(hash_table ht) deletes +{ + int old_table_size, i; + bucket *old_table, cur; + region old_region; + +#ifdef DEBUG + printf("Rehash table size=%d, elts=%d\n", ht->size, ht->elts); +#endif + + old_table_size = ht->size; + old_table = ht->table; + old_region = ht->r; + + if (ht->internal_rgn) + ht->r = newregion(); + + ht->size = ht->size*2; + ht->elts = 0; + ht->table = rarrayalloc(ht->r, ht->size, bucket); + + for (i = 0; i < old_table_size; i++) + scan_bucket(old_table[i], cur) + insist(hash_table_insert(ht, cur->key, cur->data)); + + if (ht->internal_rgn) + deleteregion(old_region); +} + +/* Begin scanning ht */ +void hash_table_scan(hash_table ht, hash_table_scanner *hts) +{ + hts->ht = ht; + hts->i = 0; + hts->cur = hts->ht->table[0]; +} + +/* Get next elt in table, storing the elt in *k and *d if k and d are + non-NULL, respectively. Returns TRUE if there is a next elt, FALSE + otherwise. */ +bool hash_table_next(hash_table_scanner *hts, hash_key *k, hash_data *d) +{ + while (hts->cur == NULL) + { + hts->i++; + if (hts->i < hts->ht->size) + hts->cur = hts->ht->table[hts->i]; + else + break; + } + + if (hts->i == hts->ht->size) + { + return FALSE; + } + else + { + if (k) + *k = hts->cur->key; + if (d) + *d = hts->cur->data; + hts->cur = hts->cur->next; + } + return TRUE; +} + +/* Apply f to all elements of ht, in some arbitrary order */ +void hash_table_apply(hash_table ht, hash_apply_fn f, void *arg) +{ + int i; + bucket cur; + + for (i = 0; i < ht->size; i++) + scan_bucket(ht->table[i], cur) + f(cur->key, cur->data, arg); +} + +/* Map f to all elements on ht, creating a new hash table */ +hash_table hash_table_map(hash_table ht, hash_map_fn f, void *arg) +{ + int i; + hash_table result; + bucket cur, newbucket, *prev; + + result = make_hash_table(ht->r, ht->size, ht->hash, ht->cmp,ht->internal_rgn); + result->elts = ht->elts; + + for (i = 0; i < ht->size; i++) + { + prev = &result->table[i]; + scan_bucket(ht->table[i], cur) + { + newbucket = ralloc(ht->r, struct bucket); + newbucket->key = cur->key; + newbucket->data = f(cur->key, cur->data, arg); + newbucket->next = NULL; + assert(!*prev); + *prev = newbucket; + prev = &newbucket->next; + } + } + return result; + /* + hash_table result; + int i; + bucket cur; + + result = make_hash_table(ht->r, ht->size, ht->hash, ht->cmp); + for (i = 0; i < ht->size; i++) + scan_bucket(ht->table[i], cur) + insist(hash_table_insert(result, cur->key, f(cur->key, cur->data, arg))); + return result; + */ +} + +static keycmp_fn cur_cmp = NULL; + +static int entry_cmp(const void *a, const void *b) +{ + struct sorted_entry *ae = (struct sorted_entry *) a; + struct sorted_entry *be = (struct sorted_entry *) b; + return cur_cmp(ae->k, be->k); +} + +/* Begin scanning ht in sorted order according to f */ +void hash_table_scan_sorted(hash_table ht, keycmp_fn f, + hash_table_scanner_sorted *htss) +{ + hash_table_scanner hts; + int i; + + htss->r = newregion(); + htss->size = hash_table_size(ht); + htss->entries = rarrayalloc(htss->r, htss->size, struct sorted_entry); + htss->i = 0; + + hash_table_scan(ht, &hts); + i = 0; + while (hash_table_next(&hts, &htss->entries[i].k, + &htss->entries[i].d)) + i++; + assert(i == htss->size); + cur_cmp = f; + qsort(htss->entries, htss->size, sizeof(struct sorted_entry), entry_cmp); + cur_cmp = NULL; +} + +/* Just like hash_table_next, but scans in sorted order */ +bool hash_table_next_sorted(hash_table_scanner_sorted *htss, hash_key *k, + hash_data *d) deletes +{ + if (htss->i < htss->size) + { + *k = htss->entries[htss->i].k; + *d = htss->entries[htss->i].d; + htss->i++; + return TRUE; + } + else + { + deleteregion(htss->r); + htss->r = NULL; + return FALSE; + } +} diff --git a/libbanshee/engine/hash.h b/libbanshee/engine/hash.h new file mode 100644 index 00000000000..eb93ac88b31 --- /dev/null +++ b/libbanshee/engine/hash.h @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef HASH_H +#define HASH_H + +#include <regions.h> +#include "bool.h" +/*#include "hash_info.h"*/ /* Includes hash_key, hash_data typedef */ +#include "linkage.h" + +EXTERN_C_BEGIN + +typedef void *hash_key; +typedef void *hash_data; + +/* Function to hash a key */ +typedef int (*hash_fn)(hash_key k); + +/* Function returning true iff k1 and k2 are equal */ +typedef bool (*keyeq_fn)(hash_key k1, hash_key k2); + +/* Function applied to elts in the hash table */ +typedef void (*hash_apply_fn)(hash_key k, hash_data d, void *arg); + +/* Function mapped to elts in the hash table */ +typedef hash_data (*hash_map_fn)(hash_key k, hash_data d, void *arg); + +typedef struct Hash_table *hash_table; + +/* Make a new hash table, with size buckets initially. */ +hash_table make_hash_table(region rhash, int size, hash_fn hash, + keyeq_fn cmp, bool internal_rgn); + +/* Make a hash table for strings. */ +hash_table make_string_hash_table(region rhash, int size, bool internal_rgn); + +/* Zero out ht. Doesn't reclaim bucket space. */ +void hash_table_reset(hash_table ht) deletes; + +/* Delete ht and internal memory associated with it. The top level pointer + must still be deleted. */ +void hash_table_delete(hash_table ht) deletes; + +/* Return the number of entries in ht */ +int hash_table_size(hash_table ht); + + +/* Lookup k in ht. If d is not NULL, returns corresponding data in *d. + Function result is TRUE if the k was in ht, false otherwise. */ +bool hash_table_lookup(hash_table ht, hash_key k, hash_data *d); + +/* Add k:d to ht. If k was already in ht, replace old entry by k:d. + Rehash if necessary. Returns TRUE if k was not already in ht. */ +bool hash_table_insert(hash_table ht, hash_key k, hash_data d) deletes; + +/* Remove mapping for k in ht. Returns TRUE if k was in ht. */ +bool hash_table_remove(hash_table ht, hash_key k); + +/* Return a copy of ht, allocated in rhash */ +hash_table hash_table_copy(region rhash, hash_table ht); + +/* Apply f to all elements of ht, in some arbitrary order */ +void hash_table_apply(hash_table ht, hash_apply_fn f, void *arg); + +/* Map f to all elements on ht, creating a new hash table */ +hash_table hash_table_map(hash_table ht, hash_map_fn f, void *arg); + +typedef struct bucket *bucket; +typedef struct +{ + hash_table ht; + int i; + bucket cur; +} hash_table_scanner; /* Opaque type! Do not modify fields. */ + +/* Begin scanning ht */ +void hash_table_scan(hash_table ht, hash_table_scanner *); + +/* Get next elt in table, storing the elt in *k and *d if k and d are + non-NULL, respectively. Returns TRUE if there is a next elt, FALSE + otherwise. */ +bool hash_table_next(hash_table_scanner *, hash_key *k, hash_data *d); + +/* Total order on hash table keys, only uesd for hash_table_scan_sorted */ +typedef int (*keycmp_fn)(hash_key k1, hash_key k2); + +struct sorted_entry +{ + hash_key k; + hash_data d; +}; + +typedef struct +{ + region r; + int i; + int size; + struct sorted_entry *entries; +} hash_table_scanner_sorted; + +/* Begin scanning ht in sorted order according to f */ +void hash_table_scan_sorted(hash_table ht, keycmp_fn f, + hash_table_scanner_sorted *htss); + +/* Just like hash_table_next, but scans in sorted order */ +bool hash_table_next_sorted(hash_table_scanner_sorted *htss, hash_key *k, + hash_data *d) deletes; + + +EXTERN_C_END + +#endif diff --git a/libbanshee/engine/hashset.c b/libbanshee/engine/hashset.c new file mode 100644 index 00000000000..27195e767bd --- /dev/null +++ b/libbanshee/engine/hashset.c @@ -0,0 +1,217 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <stdlib.h> +#include <assert.h> +#include <math.h> + +#include "hashset.h" +#include "util.h" +#define INIT_TABLE_SIZE 2 +#define EMPTY_KEY 0 +#define UB(n) ((1<<n)-1) /* 2^n-1 */ +#define CAP(n) (1<<n) /* 2^n */ + +struct hash_set +{ + int *traditional table; + unsigned int ub; + unsigned int capacity; + unsigned int inserts; + unsigned int size; +}; + +static const int prime_1 = 83; +static const int prime_2 = 5189; +static const int init_table_size = INIT_TABLE_SIZE; +static const int empty_key = EMPTY_KEY; + +hash_set hs_create(region r) +{ + + hash_set hs = ralloc(r, struct hash_set); + + hs->ub = UB(init_table_size); + hs->size = init_table_size; + hs->capacity = CAP(init_table_size); + hs->table = (int *)calloc(hs->capacity, sizeof(int)); + hs->inserts = 0; + return hs; +} + +int hs_num_items(hash_set hs) +{ + return hs->inserts; +} + +int *hs_list_items(hash_set hs) +{ + return hs->table; +} + +static bool member(int *table, int ub, int i, int value) +{ + while (table[i] != empty_key) + { + if (table[i] == value) + return TRUE; + + else + i = ub & (i + prime_2); + } + return FALSE; +} + +static inline void reinsert(int *table, int ub, int value) +{ + int i; + + i = ub & (prime_1 * value); + + while (table[i] != empty_key) + { + /* possibly the value is already present */ + if (table[i] == value) + return; + + else + i = ub & (i + prime_2); + } + + table[i] = value; +} + +static bool member_or_insert(int *table, int ub, int i, int value) +{ + while (table[i] != empty_key) + { + if (table[i] == value) + return TRUE; + + else + i = ub & (i + prime_2); + } + table[i] = value; + return FALSE; +} + +static void rehash(hash_set hs) +{ + int *old_table; + int old_capacity, i; + + old_table = hs->table; + old_capacity = hs->capacity; + hs->capacity *= 2; + hs->ub = UB(++hs->size); + hs->table = (int *)calloc(hs->capacity, sizeof(int)); + assert(hs->table); + + + for (i = 0; i < old_capacity; i++) + { + reinsert(hs->table, hs->ub, old_table[i]); + } + + free(old_table); +} +/* +static void post_insert(hash_set hs) +{ + float percent_full; + + int capacity = hs->capacity; + int inserts = ++hs->inserts; + + printf("%d,%d->%f\n",inserts,capacity,percent_full); + assert(capacity); + percent_full = (float) inserts / capacity; + + + if (percent_full != percent_full) + { + assert (0); + } + + if (percent_full >= .85) + rehash(hs); +} +*/ + +static void post_insert(hash_set hs) +{ + int capacity = hs->capacity; + int inserts = ++hs->inserts; + + float percent_capacity = capacity * .85; + + /* + printf("%d,%d->%f\n",inserts,capacity,percent_capacity); + */ + + if ( (float) inserts >= percent_capacity) + { + rehash(hs); + } + +} + + +bool hs_query(hash_set hs, int entry) +{ + int hash; + int ub = hs->ub; + + hash = ub & (prime_1 * abs(entry)); + return member(hs->table, ub, hash, entry); +} + +bool hs_member(hash_set hs, int entry) +{ + int hash; + int ub = hs->ub; + + hash = ub & (prime_1 * abs(entry)); + if (member_or_insert(hs->table, ub, hash, entry)) + return TRUE; + + else + { + post_insert(hs); + return FALSE; + } +} + +void hs_delete(hash_set hs) +{ + free(hs->table); +} + + diff --git a/libbanshee/engine/hashset.h b/libbanshee/engine/hashset.h new file mode 100644 index 00000000000..0b06cd0a657 --- /dev/null +++ b/libbanshee/engine/hashset.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef HASHSET_H +#define HASHSET_H + +#include "linkage.h" +#include "banshee.h" + +EXTERN_C_BEGIN + +typedef struct hash_set *hash_set; + +hash_set hs_create(region r); +void hs_delete(hash_set); +bool hs_member(hash_set,int); /* adds the entry if not present */ +bool hs_query(hash_set,int); /* query only */ +int *hs_list_items(hash_set); +int hs_num_items(hash_set); + +EXTERN_C_END + +#endif /* HASHSET_H */ diff --git a/libbanshee/engine/jcollection.c b/libbanshee/engine/jcollection.c new file mode 100644 index 00000000000..8df072c8c8d --- /dev/null +++ b/libbanshee/engine/jcollection.c @@ -0,0 +1,326 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <assert.h> +#include "jcollection.h" +#include "hashset.h" +#include "termhash.h" + + +/* + static term_hash jcoll_hash; + */ + +struct jcoll_dict +{ + region r; + term_hash hash; + get_stamp_fn_ptr get_stamp; +}; + +enum jcoll_type +{ + j_single, + j_chain, + j_join +}; + +/* generic jcoll type */ +struct jcoll +{ + enum jcoll_type type; + stamp st; +}; + +struct jcoll_single +{ + enum jcoll_type type; + stamp st; + gen_e entry; +}; + +struct jcoll_chain +{ + enum jcoll_type type; + stamp st; + gen_e_list sameregion entries; +}; + +struct jcoll_join +{ + enum jcoll_type type; + stamp st; + jcoll_list sameregion joins; + gen_e_list sameregion cache; +}; + +typedef struct jcoll_single *jcoll_single; +typedef struct jcoll_chain *jcoll_chain; +typedef struct jcoll_join *jcoll_join; + +DEFINE_LIST(jcoll_list,jcoll) + + + +jcoll jcoll_new(jcoll_dict d, gen_e e) +{ + jcoll_single result = ralloc(d->r, struct jcoll_single); + result->type = j_single; + result->st = stamp_fresh(); + result->entry = e; + return (jcoll)result; +} + +jcoll jcoll_jjoin(jcoll_dict d,jcoll_list list) +{ + + if (jcoll_list_empty(list)) + return NULL; + else if (jcoll_list_length(list) == 1) + return jcoll_list_head(list); + + else + { + int i = 0, + length = jcoll_list_length(list) + 1; + stamp sts[length]; + jcoll_join result; + + jcoll_list_scanner scan; + jcoll temp; + + sts[i++] = j_join; + + jcoll_list_scan(list,&scan); + while (jcoll_list_next(&scan,&temp)) + { + stamp st = temp ? temp->st : 0; + sts[i++] = st; + } + qsort(&sts[1],length-1,sizeof(int),ptr_cmp); + + if ( NULL == (result = (jcoll_join)term_hash_find(d->hash,sts,length)) ) + { + result = ralloc(d->r,struct jcoll_join); + + result->type = j_join; + result->st = stamp_fresh(); + result->joins = list; + result->cache = new_gen_e_list(d->r); + term_hash_insert(d->hash,(gen_e)result,sts,length); + } + return (jcoll)result; + } + +} + +/* + Hash chains + */ +jcoll jcoll_create_chain(jcoll_dict d, gen_e_list elems) +{ + int i = 0, + length = gen_e_list_length(elems) + 1; + stamp sts[length]; + gen_e_list_scanner scan; + gen_e temp; + jcoll_chain result; + + sts[i++] = j_chain; + + gen_e_list_scan(elems,&scan); + while (gen_e_list_next(&scan,&temp)) + { + sts[i++] = d->get_stamp(temp); + } + qsort(&sts[1],length-1,sizeof(int),ptr_cmp); /* FIX, first pos should always be chain */ + + if ( NULL == (result = (jcoll_chain)term_hash_find(d->hash,sts,length)) ) + { + result = ralloc(d->r,struct jcoll_chain); + result->type = j_chain; + result->st = stamp_fresh(); + result->entries = elems; + term_hash_insert(d->hash,(gen_e)result,sts, + length); + } + return (jcoll)result; +} + +typedef void (*japp_fn_ptr) (void *, void *); + +static void app_aux(hash_set h, get_stamp_fn_ptr get_stamp, japp_fn_ptr app, + jcoll j, void *data) +{ + if (! j) + return; + + switch(j->type) + { + case j_single: + { + jcoll_single single = (jcoll_single) j; + + if (! hs_member(h,get_stamp(single->entry)) ) + app(single->entry, data); + } + break; + case j_chain: + { + jcoll_chain chain = (jcoll_chain) j; + + if (! hs_member(h,chain->st) ) + { + gen_e_list_scanner scan; + gen_e entry; + + gen_e_list_scan(chain->entries, &scan); + while (gen_e_list_next(&scan, &entry)) + { + if (! hs_member(h, get_stamp(entry)) ) + app(entry, data); + } + + } + + } + break; + case j_join: + { + jcoll_join join = (jcoll_join) j; + + if (! hs_member(h, join->st)) + { + if (! gen_e_list_empty(join->cache)) + { + gen_e_list_scanner scan; + gen_e entry; + + gen_e_list_scan(join->cache, &scan); + while (gen_e_list_next(&scan, &entry)) + { + if (! hs_member(h, get_stamp(entry)) ) + app(entry, data); + } + } + else + { + jcoll_list_scanner scan; + jcoll temp; + + jcoll_list_scan(join->joins, &scan); + while (jcoll_list_next(&scan,&temp)) + { + app_aux(h,get_stamp,app,temp, data); + } + + } + } + + } + break; + } + +} + +static void jcoll_app(jcoll_dict d, japp_fn_ptr app, jcoll j, void *data) deletes +{ + region scratch_rgn = newregion(); + hash_set hash = hs_create(scratch_rgn); + app_aux(hash,d->get_stamp, app, j, data); + hs_delete(hash); + deleteregion(scratch_rgn); +} + static void jcoll_accum(void *e, void *accum) + { + gen_e_list_cons((gen_e) e, (gen_e_list) accum); + } + +gen_e_list jcoll_flatten(jcoll_dict d, jcoll j) deletes +{ + + gen_e_list accum = NULL; + + + if (j == NULL) + return new_gen_e_list(d->r); + + switch (j->type) + { + case j_single: + { + jcoll_single single = (jcoll_single)j; + + accum = new_gen_e_list(d->r); + gen_e_list_cons(single->entry,accum); + } + break; + case j_chain: + { + jcoll_chain chain = (jcoll_chain)j; + /* accum = gen_e_list_copy(r,chain->entries); */ + accum = chain->entries; + } + break; + case j_join: + { + jcoll_join join = (jcoll_join)j; + + if (! gen_e_list_empty(join->cache)) + return join->cache; + else + { + accum = new_gen_e_list(d->r); + jcoll_app(d, jcoll_accum,j, accum); + + gen_e_list_append(join->cache,accum /* gen_e_list_copy(r,accum)*/); + } + } + break; + } + + return accum; +} + +jcoll_dict jcoll_create_dict(region r,get_stamp_fn_ptr get_stamp) +{ + jcoll_dict result = ralloc(r,struct jcoll_dict); + + result->r = r; + result->hash = make_term_hash(r); + result->get_stamp = get_stamp; + return result; +} + + +void jcoll_delete_dict(jcoll_dict d) +{ + term_hash_delete(d->hash); +} diff --git a/libbanshee/engine/jcollection.h b/libbanshee/engine/jcollection.h new file mode 100644 index 00000000000..b1f0e717d4e --- /dev/null +++ b/libbanshee/engine/jcollection.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef JCOLLECTION_H +#define JCOLLECTION_H + +#include "linkage.h" +#include "banshee.h" + +EXTERN_C_BEGIN + +typedef struct jcoll *jcoll; + +typedef struct jcoll_dict *jcoll_dict; + +DECLARE_LIST(jcoll_list,jcoll) + +jcoll jcoll_new(jcoll_dict d, gen_e e); +jcoll jcoll_jjoin(jcoll_dict d, jcoll_list list); +gen_e_list jcoll_flatten(jcoll_dict d, jcoll j) deletes; +jcoll jcoll_create_chain(jcoll_dict d, gen_e_list elems); + +jcoll_dict jcoll_create_dict(region r,get_stamp_fn_ptr get_stamp); +void jcoll_delete_dict(jcoll_dict d); + +EXTERN_C_END + +#endif /* JCOLLECTION_H */ + + + + + + + + diff --git a/libbanshee/engine/linkage.h b/libbanshee/engine/linkage.h new file mode 100644 index 00000000000..ef70bbe2468 --- /dev/null +++ b/libbanshee/engine/linkage.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef LINKAGE_H +#define LINKAGE_H + +#ifdef __cplusplus +# define EXTERN_C extern "C" +# define EXTERN_C_BEGIN extern "C" { +# define EXTERN_C_END } +#else +# define EXTERN_C +# define EXTERN_C_BEGIN +# define EXTERN_C_END +#endif + +#endif diff --git a/libbanshee/engine/list.c b/libbanshee/engine/list.c new file mode 100644 index 00000000000..3972e2dd904 --- /dev/null +++ b/libbanshee/engine/list.c @@ -0,0 +1,438 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <assert.h> +#include "list.h" +#include "util.h" + +struct list_node +{ + void *data; + struct list_node *sameregion next; +}; + +#define scan_node(b,var) for (var = b; var; var = var->next) + +struct list +{ + region sameregion r; + int length; + list_node sameregion head; +}; + +struct list *new_list(region r) +{ + struct list *result; + + assert(r); + + result = ralloc(r,struct list); + result->r = r; + result->length = 0; + result->head = NULL; + + return result; +} + +int list_size(struct list *l) +{ + return l->length; +} + +struct list *list_cons(void *data, struct list *l) +{ + list_node newnode = ralloc(l->r, struct list_node); + newnode->next = l->head; + newnode->data = data; + + l->head = newnode; + l->length++; + + return l; +} + +struct list *list_reverse(struct list *l) +{ + + if (list_empty(l)) + return l; + + else + { + list_node temp,reversed = NULL; + + while (l->head) + { + temp = l->head->next; + + l->head->next = reversed; + + reversed = l->head; + + l->head = temp; + } + + l->head = reversed; + return l; + } + +} + +bool list_empty(struct list *l) +{ + return (l->head == NULL); +} + +static inline list_node tail(list_node n) +{ + if (n == NULL) + return NULL; + else + { + list_node temp = NULL, + tail = NULL; + + scan_node(n,temp) + tail = temp; + + assert(tail && tail->next == NULL); + + return tail; + } +} + +struct list *list_append(struct list *a, struct list *b) +{ + list_node tl; + + assert( a && b ); + assert( a != b); + assert( ptr_eq(a->r,b->r) ); + + tl = tail(a->head); + + + if (! tl) + { + a->head = b->head; + a->length = b->length; + } + + else + { + tl->next = b->head; + a->length += b->length; + } + return a; +} + +struct list *list_app(struct list *l,app_fn app) +{ + list_node n = NULL; + + + assert(l); + + scan_node(l->head,n) + { + app(n->data); + } + return l; +} + +void *list_find(struct list *l,eq_fn eq) +{ + list_node n = NULL; + assert(l); + + scan_node(l->head,n) + { + if (eq(n->data)) + return n; + } + + return NULL; +} + +struct list *list_tail(struct list *l) +{ + l->length--; + l->head = l->head->next; + return l; +} + +void *list_head(struct list *l) +{ + return l->head->data; +} + +struct list *list_filter(region r,struct list *l,eq_fn eq) +{ + struct list *result; + list_node n = NULL; + assert(l); + + result = new_list(r); + + scan_node(l->head,n) + { + if (eq(n->data)) + list_cons(n->data,result); + } + + return result; +} + +struct list *list_keep(struct list *l, eq_fn eq) +{ + list_node prev, n; + assert(l); + + while (l->head && !eq(l->head->data)) + { + l->head = l->head->next; + } + + prev = l->head; + scan_node(l->head->next,n) + { + if (!eq(n->data)) + prev->next = n->next; + else prev = n; + } + return l; +} + +struct list *list_filter2(struct list *l,eq_fn eq) +{ + return list_filter(l->r,l,eq); +} + +struct list *list_copy(region r, struct list *l) +{ + + struct list *result; + list_node n = NULL; +#ifndef NDEBUG + int count = 0; +#endif + assert(l); + + result = new_list(r); + + scan_node(l->head,n) + { + list_cons(n->data,result); + assert(++count <= l->length); + } + + return list_reverse(result); +} +/* A Linked-List Memory Sort + by Philip J. Erdelsky + pje@acm.org + http://www.alumni.caltech.edu/~pje/ +*/ + +#include <stdio.h> + +static void *sort_linked_list(void *p, unsigned index, + int (*compare)(const void *,const void *, comparator_fn), long *pcount, comparator_fn data) +{ + unsigned base; + unsigned long block_size; + + struct record + { + struct record *next[1]; + /* other members not directly accessed by this function */ + }; + + struct tape + { + struct record *first, *last; + unsigned long count; + } tape[4]; + + /* Distribute the records alternately to tape[0] and tape[1]. */ + + tape[0].count = tape[1].count = 0L; + tape[0].first = NULL; + base = 0; + while (p != NULL) + { + struct record *next = ((struct record *)p)->next[index]; + ((struct record *)p)->next[index] = tape[base].first; + tape[base].first = ((struct record *)p); + tape[base].count++; + p = next; + base ^= 1; + } + + /* If the list is empty or contains only a single record, then */ + /* tape[1].count == 0L and this part is vacuous. */ + + for (base = 0, block_size = 1L; tape[base+1].count != 0L; + base ^= 2, block_size <<= 1) + { + int dest; + struct tape *tape0, *tape1; + tape0 = tape + base; + tape1 = tape + base + 1; + dest = base ^ 2; + tape[dest].count = tape[dest+1].count = 0; + for (; tape0->count != 0; dest ^= 1) + { + unsigned long n0, n1; + struct tape *output_tape = tape + dest; + n0 = n1 = block_size; + while (1) + { + struct record *chosen_record; + struct tape *chosen_tape; + if (n0 == 0 || tape0->count == 0) + { + if (n1 == 0 || tape1->count == 0) + break; + chosen_tape = tape1; + n1--; + } + else if (n1 == 0 || tape1->count == 0) + { + chosen_tape = tape0; + n0--; + } + else if ((*compare)(tape0->first, tape1->first, data) > 0) + { + chosen_tape = tape1; + n1--; + } + else + { + chosen_tape = tape0; + n0--; + } + chosen_tape->count--; + chosen_record = chosen_tape->first; + chosen_tape->first = chosen_record->next[index]; + if (output_tape->count == 0) + output_tape->first = chosen_record; + else + output_tape->last->next[index] = chosen_record; + output_tape->last = chosen_record; + output_tape->count++; + } + } + } + + if (tape[base].count > 1L) + tape[base].last->next[index] = NULL; + if (pcount != NULL) + *pcount = tape[base].count; + return tape[base].first; +} + + + +static int compare(const void *node1, const void *node2, comparator_fn data) +{ + comparator_fn cmp = (comparator_fn) data; + return cmp(((struct list_node *)node1)->data, + ((struct list_node *)node2)->data); +} + +struct list *list_sort(struct list *l, comparator_fn cmp) +{ + long pcount; + l->head = sort_linked_list(l->head,1,compare,&pcount, cmp); + assert(pcount == l->length); + return l; +} + +struct list *list_merge(struct list *a,struct list *b, comparator_fn cmp) +{ + return list_sort( list_append(a,b),cmp); +} + +void list_scan(struct list *a,struct list_scanner *scan) +{ + scan->l = a; + scan->cur = a->head; +} + +bool list_next(struct list_scanner *scan, void **data) +{ + if (!scan->cur) + return FALSE; + else + { + if (data) + *data = scan->cur->data; + scan->cur = scan->cur->next; + return TRUE; + } +} + +void list_clear(struct list *l) +{ + l->head = NULL; + l->length = 0; +} + +bool list_member(struct list *l,void *data) +{ + list_node n = NULL; + scan_node(l->head,n) + { + if (n->data == data) + return TRUE; + } + return FALSE; +} + + +struct list *list_from_array(region r,void **data, int length) +{ + struct list *result = new_list(r); + int i; + + for (i = length -1; i >= 0; i--) + { + list_cons(data[i],result); + } + return result; +} + + + + + + + diff --git a/libbanshee/engine/list.h b/libbanshee/engine/list.h new file mode 100644 index 00000000000..1dfae50c0f1 --- /dev/null +++ b/libbanshee/engine/list.h @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef LIST_H +#define LIST_H + +#include <regions.h> +#include "bool.h" + +typedef void *list_data; +typedef void (*app_fn) (void *); +typedef bool (*eq_fn)(const void *); +typedef int (*comparator_fn)(const void *,const void *); + +struct list; + +typedef struct list_node *list_node; + +struct list_scanner +{ + struct list *l; + list_node cur; +}; /* Opaque type. Do not modify fields */ + + +struct list *new_list(region r); +int list_size(struct list *a); +struct list *list_cons(void *data, struct list *a); +struct list *list_append(struct list *a, struct list *b); +struct list *list_app(struct list *a,app_fn app); +void *list_find(struct list *a,eq_fn eq); +void *list_head(struct list *a); +struct list *list_tail(struct list *a); +struct list *list_filter(region r,struct list *a,eq_fn eq); +struct list *list_filter2(struct list *a,eq_fn eq); +struct list *list_keep(struct list *a,eq_fn eq); +struct list *list_copy(region r, struct list *a); +struct list *list_sort(struct list *a, comparator_fn cmp); +struct list *list_merge(struct list *a,struct list *b, comparator_fn cmp); +void list_scan(struct list *a,struct list_scanner *scan); +bool list_next(struct list_scanner *scan, void **data); +bool list_empty(struct list *a); +bool list_member(struct list *a, void *data); +void list_clear(struct list *a); +struct list *list_reverse(struct list *a); +struct list *list_from_array(region r,void **data, int length); + +#define DECLARE_OPAQUE_LIST(name,type) \ +typedef struct list_scanner name ## _scanner; \ +typedef void (* name ## _app_fn) (type); \ +typedef bool (* name ## _eq_fn) (const type); \ +typedef int (* name ## _comparator_fn)(const type,const type); \ +name new_ ## name(region r); \ +int name ## _length(name a); \ +name name ## _cons(type data, name a); \ +name name ## _append(name a, name b); \ +name name ## _app(name a, name ## _app_fn app); \ +type name ## _find(name a, name ## _eq_fn eq); \ +type name ## _head(name a); \ +name name ## _tail(name a); \ +name name ## _filter(region r,name a, name ## _eq_fn eq); \ +name name ## _filter2(name a, name ## _eq_fn eq); \ +name name ## _keep(name a, name ## _eq_fn eq); \ +name name ## _copy(region r, name a); \ +name name ## _sort(name a, name ## _comparator_fn cmp); \ +name name ## _merge(name a,name b, name ## _comparator_fn cmp); \ +void name ## _scan(name a, name ##_scanner *scan); \ +bool name ## _next(name ##_scanner *scan, type *data); \ +bool name ## _empty(name a); \ +void name ## _clear(name a); \ +bool name ## _member(name a, type data); \ +name name ## _reverse(name a); \ +name name ## _from_array(region r,type data[], int length); + +#define DECLARE_LIST(name,type) \ +typedef struct name ## _a *name; \ +typedef struct list_scanner name ## _scanner; \ +typedef void (* name ## _app_fn) (type); \ +typedef bool (* name ## _eq_fn) (const type); \ +typedef int (* name ## _comparator_fn)(const type,const type); \ +name new_ ## name(region r); \ +int name ## _length(name a); \ +name name ## _cons(type data, name a); \ +name name ## _append(name a, name b); \ +name name ## _app(name a, name ## _app_fn app); \ +type name ## _find(name a, name ## _eq_fn eq); \ +type name ## _head(name a); \ +name name ## _tail(name a); \ +name name ## _filter(region r,name a, name ## _eq_fn eq); \ +name name ## _filter2(name a, name ## _eq_fn eq); \ +name name ## _keep(name a, name ## _eq_fn eq); \ +name name ## _copy(region r, name a); \ +name name ## _sort(name a, name ## _comparator_fn cmp); \ +name name ## _merge(name a,name b, name ## _comparator_fn cmp); \ +void name ## _scan(name a, name ##_scanner *scan); \ +bool name ## _next(name ##_scanner *scan, type *data); \ +bool name ## _empty(name a); \ +void name ## _clear(name a); \ +bool name ## _member(name a, type data); \ +name name ## _reverse(name a); \ +name name ## _from_array(region r,type data[], int length); + +#define DEFINE_LIST(name,type) \ +name new_ ## name(region r) \ +{ \ + return (name)new_list(r); \ +} \ +int name ## _length(name a) \ +{ \ + return list_size((struct list *)a); \ +} \ +name name ## _cons(type data, name a) \ +{ \ + return (name)list_cons((void *)data,(struct list *) a ); \ +}\ +name name ## _append(name a, name b) \ +{ \ + return (name)list_append((struct list *)a,(struct list *)b); \ +} \ +name name ## _app(name a, name ## _app_fn app) \ +{ \ + return (name)list_app((struct list *) a, (app_fn) app); \ +} \ +type name ## _find(name a, name ## _eq_fn eq) \ +{ \ + return (type)list_find((struct list *)a, (eq_fn) eq); \ +} \ +name name ## _tail(name a) \ +{\ + return (name)list_tail((struct list *)a);\ +}\ +type name ## _head(name a) \ +{ \ + return (type)list_head((struct list *)a); \ +} \ +name name ## _filter(region r,name a, name ## _eq_fn eq) \ +{ \ + return (name)list_filter(r,(struct list *)a, (eq_fn) eq); \ +} \ +name name ## _keep(name a, name ## _eq_fn eq) \ +{ \ + return (name)list_keep((struct list *)a, (eq_fn) eq); \ +} \ +name name ## _filter2(name a, name ## _eq_fn eq) \ +{ \ + return (name)list_filter2((struct list *)a, (eq_fn) eq); \ +} \ +name name ## _copy(region r, name a) \ +{ \ + return (name)list_copy(r,(struct list *) a); \ +} \ +name name ## _sort(name a, name ## _comparator_fn cmp) \ +{ \ + return (name)list_sort((struct list *)a,(comparator_fn) cmp); \ +} \ +name name ## _merge(name a,name b, name ## _comparator_fn cmp) \ +{ \ + return (name)list_merge((struct list *)a,(struct list *)b,(comparator_fn)cmp); \ +} \ +void name ## _scan(name a, name ##_scanner *scan) \ +{ \ + list_scan((struct list *)a,(struct list_scanner *)scan);\ +}\ +bool name ## _next(name ##_scanner *scan, type *data) \ +{ \ + return list_next((struct list_scanner *)scan, (void **)data); \ +} \ +bool name ## _empty(name a) \ +{ \ + return list_empty((struct list *)a); \ +} \ +void name ## _clear(name a) \ +{ \ + list_clear((struct list *)a); \ +} \ +bool name ## _member(name a, type data) \ +{ \ + return list_member((struct list *)a,(void *)data); \ +} \ +name name ## _reverse(name a) \ +{\ + return (name)list_reverse((struct list *)a);\ +}\ +name name ## _from_array(region r,type data[], int length) \ +{\ + return (name)list_from_array(r,(void **)data,length); \ +}\ + +#endif /* LIST_H */ diff --git a/libbanshee/engine/malloc.c b/libbanshee/engine/malloc.c new file mode 100644 index 00000000000..84facdb0160 --- /dev/null +++ b/libbanshee/engine/malloc.c @@ -0,0 +1,5400 @@ +/* + This is a version (aka dlmalloc) of malloc/free/realloc written by + Doug Lea and released to the public domain. Use, modify, and + redistribute this code without permission or acknowledgement in any + way you wish. Send questions, comments, complaints, performance + data, etc to dl@cs.oswego.edu + +* VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee) + + Note: There may be an updated version of this malloc obtainable at + ftp://gee.cs.oswego.edu/pub/misc/malloc.c + Check before installing! + +* Quickstart + + This library is all in one file to simplify the most common usage: + ftp it, compile it (-O), and link it into another program. All + of the compile-time options default to reasonable values for use on + most unix platforms. Compile -DWIN32 for reasonable defaults on windows. + You might later want to step through various compile-time and dynamic + tuning options. + + For convenience, an include file for code using this malloc is at: + ftp://gee.cs.oswego.edu/pub/misc/malloc-2.7.0.h + You don't really need this .h file unless you call functions not + defined in your system include files. The .h file contains only the + excerpts from this file needed for using this malloc on ANSI C/C++ + systems, so long as you haven't changed compile-time options about + naming and tuning parameters. If you do, then you can create your + own malloc.h that does include all settings by cutting at the point + indicated below. + +* Why use this malloc? + + This is not the fastest, most space-conserving, most portable, or + most tunable malloc ever written. However it is among the fastest + while also being among the most space-conserving, portable and tunable. + Consistent balance across these factors results in a good general-purpose + allocator for malloc-intensive programs. + + The main properties of the algorithms are: + * For large (>= 512 bytes) requests, it is a pure best-fit allocator, + with ties normally decided via FIFO (i.e. least recently used). + * For small (<= 64 bytes by default) requests, it is a caching + allocator, that maintains pools of quickly recycled chunks. + * In between, and for combinations of large and small requests, it does + the best it can trying to meet both goals at once. + * For very large requests (>= 128KB by default), it relies on system + memory mapping facilities, if supported. + + For a longer but slightly out of date high-level description, see + http://gee.cs.oswego.edu/dl/html/malloc.html + + You may already by default be using a C library containing a malloc + that is based on some version of this malloc (for example in + linux). You might still want to use the one in this file in order to + customize settings or to avoid overheads associated with library + versions. + +* Contents, described in more detail in "description of public routines" below. + + Standard (ANSI/SVID/...) functions: + malloc(size_t n); + calloc(size_t n_elements, size_t element_size); + free(Void_t* p); + realloc(Void_t* p, size_t n); + memalign(size_t alignment, size_t n); + valloc(size_t n); + mallinfo() + mallopt(int parameter_number, int parameter_value) + + Additional functions: + independent_calloc(size_t n_elements, size_t size, Void_t* chunks[]); + independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]); + pvalloc(size_t n); + cfree(Void_t* p); + malloc_trim(size_t pad); + malloc_usable_size(Void_t* p); + malloc_stats(); + +* Vital statistics: + + Supported pointer representation: 4 or 8 bytes + Supported size_t representation: 4 or 8 bytes + Note that size_t is allowed to be 4 bytes even if pointers are 8. + You can adjust this by defining INTERNAL_SIZE_T + + Alignment: 2 * sizeof(size_t) (default) + (i.e., 8 byte alignment with 4byte size_t). This suffices for + nearly all current machines and C compilers. However, you can + define MALLOC_ALIGNMENT to be wider than this if necessary. + + Minimum overhead per allocated chunk: 4 or 8 bytes + Each malloced chunk has a hidden word of overhead holding size + and status information. + + Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead) + 8-byte ptrs: 24/32 bytes (including, 4/8 overhead) + + When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte + ptrs but 4 byte size) or 24 (for 8/8) additional bytes are + needed; 4 (8) for a trailing size field and 8 (16) bytes for + free list pointers. Thus, the minimum allocatable size is + 16/24/32 bytes. + + Even a request for zero bytes (i.e., malloc(0)) returns a + pointer to something of the minimum allocatable size. + + The maximum overhead wastage (i.e., number of extra bytes + allocated than were requested in malloc) is less than or equal + to the minimum size, except for requests >= mmap_threshold that + are serviced via mmap(), where the worst case wastage is 2 * + sizeof(size_t) bytes plus the remainder from a system page (the + minimal mmap unit); typically 4096 or 8192 bytes. + + Maximum allocated size: 4-byte size_t: 2^32 minus about two pages + 8-byte size_t: 2^64 minus about two pages + + It is assumed that (possibly signed) size_t values suffice to + represent chunk sizes. `Possibly signed' is due to the fact + that `size_t' may be defined on a system as either a signed or + an unsigned type. The ISO C standard says that it must be + unsigned, but a few systems are known not to adhere to this. + Additionally, even when size_t is unsigned, sbrk (which is by + default used to obtain memory from system) accepts signed + arguments, and may not be able to handle size_t-wide arguments + with negative sign bit. Generally, values that would + appear as negative after accounting for overhead and alignment + are supported only via mmap(), which does not have this + limitation. + + Requests for sizes outside the allowed range will perform an optional + failure action and then return null. (Requests may also + also fail because a system is out of memory.) + + Thread-safety: NOT thread-safe unless USE_MALLOC_LOCK defined + + When USE_MALLOC_LOCK is defined, wrappers are created to + surround every public call with either a pthread mutex or + a win32 spinlock (depending on WIN32). This is not + especially fast, and can be a major bottleneck. + It is designed only to provide minimal protection + in concurrent environments, and to provide a basis for + extensions. If you are using malloc in a concurrent program, + you would be far better off obtaining ptmalloc, which is + derived from a version of this malloc, and is well-tuned for + concurrent programs. (See http://www.malloc.de) + + Compliance: I believe it is compliant with the 1997 Single Unix Specification + (See http://www.opennc.org). Also SVID/XPG, ANSI C, and probably + others as well. + +* Synopsis of compile-time options: + + People have reported using previous versions of this malloc on all + versions of Unix, sometimes by tweaking some of the defines + below. It has been tested most extensively on Solaris and + Linux. It is also reported to work on WIN32 platforms. + People also report using it in stand-alone embedded systems. + + The implementation is in straight, hand-tuned ANSI C. It is not + at all modular. (Sorry!) It uses a lot of macros. To be at all + usable, this code should be compiled using an optimizing compiler + (for example gcc -O3) that can simplify expressions and control + paths. (FAQ: some macros import variables as arguments rather than + declare locals because people reported that some debuggers + otherwise get confused.) + + OPTION DEFAULT VALUE + + Compilation Environment options: + + __STD_C derived from C compiler defines + WIN32 NOT defined + HAVE_MEMCPY defined + USE_MEMCPY 1 if HAVE_MEMCPY is defined + HAVE_MMAP defined as 1 + MMAP_CLEARS 1 + HAVE_MREMAP 0 unless linux defined + malloc_getpagesize derived from system #includes, or 4096 if not + HAVE_USR_INCLUDE_MALLOC_H NOT defined + LACKS_UNISTD_H NOT defined unless WIN32 + LACKS_SYS_PARAM_H NOT defined unless WIN32 + LACKS_SYS_MMAN_H NOT defined unless WIN32 + + Changing default word sizes: + + INTERNAL_SIZE_T size_t + MALLOC_ALIGNMENT 2 * sizeof(INTERNAL_SIZE_T) + + Configuration and functionality options: + + USE_DL_PREFIX NOT defined + USE_PUBLIC_MALLOC_WRAPPERS NOT defined + USE_MALLOC_LOCK NOT defined + DEBUG NOT defined + REALLOC_ZERO_BYTES_FREES NOT defined + MALLOC_FAILURE_ACTION errno = ENOMEM, if __STD_C defined, else no-op + TRIM_FASTBINS 0 + + Options for customizing MORECORE: + + MORECORE sbrk + MORECORE_CONTIGUOUS 1 + MORECORE_CANNOT_TRIM NOT defined + MMAP_AS_MORECORE_SIZE (1024 * 1024) + + Tuning options that are also dynamically changeable via mallopt: + + DEFAULT_MXFAST 64 + DEFAULT_TRIM_THRESHOLD 128 * 1024 + DEFAULT_TOP_PAD 0 + DEFAULT_MMAP_THRESHOLD 128 * 1024 + DEFAULT_MMAP_MAX 65536 + + There are several other #defined constants and macros that you + probably don't want to touch unless you are extending or adapting malloc. +*/ + +/* + WIN32 sets up defaults for MS environment and compilers. + Otherwise defaults are for unix. +*/ + +/* #define WIN32 */ + +#ifdef WIN32 + +#define WIN32_LEAN_AND_MEAN +#include <windows.h> + +/* Win32 doesn't supply or need the following headers */ +#define LACKS_UNISTD_H +#define LACKS_SYS_PARAM_H +#define LACKS_SYS_MMAN_H + +/* Use the supplied emulation of sbrk */ +#define MORECORE sbrk +#define MORECORE_CONTIGUOUS 1 +#define MORECORE_FAILURE ((void*)(-1)) + +/* Use the supplied emulation of mmap and munmap */ +#define HAVE_MMAP 1 +#define MUNMAP_FAILURE (-1) +#define MMAP_CLEARS 1 + +/* These values don't really matter in windows mmap emulation */ +#define MAP_PRIVATE 1 +#define MAP_ANONYMOUS 2 +#define PROT_READ 1 +#define PROT_WRITE 2 + +/* Emulation functions defined at the end of this file */ + +/* If USE_MALLOC_LOCK, use supplied critical-section-based lock functions */ +#ifdef USE_MALLOC_LOCK +static int slwait(int *sl); +static int slrelease(int *sl); +#endif + +static long getpagesize(void); +static long getregionsize(void); +static void *sbrk(long size); +static void *mmap(void *ptr, long size, long prot, long type, long handle, long arg); +static long munmap(void *ptr, long size); + +static void vminfo (unsigned long *free, unsigned long *reserved, unsigned long *committed); +static int cpuinfo (int whole, unsigned long *kernel, unsigned long *user); + +#endif + +/* + __STD_C should be nonzero if using ANSI-standard C compiler, a C++ + compiler, or a C compiler sufficiently close to ANSI to get away + with it. +*/ + +#ifndef __STD_C +#if defined(__STDC__) || defined(_cplusplus) +#define __STD_C 1 +#else +#define __STD_C 0 +#endif +#endif /*__STD_C*/ + + +/* + Void_t* is the pointer type that malloc should say it returns +*/ + +#ifndef Void_t +#if (__STD_C || defined(WIN32)) +#define Void_t void +#else +#define Void_t char +#endif +#endif /*Void_t*/ + +#if __STD_C +#include <stddef.h> /* for size_t */ +#else +#include <sys/types.h> +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* define LACKS_UNISTD_H if your system does not have a <unistd.h>. */ + +/* #define LACKS_UNISTD_H */ + +#ifndef LACKS_UNISTD_H +#include <unistd.h> +#endif + +/* define LACKS_SYS_PARAM_H if your system does not have a <sys/param.h>. */ + +/* #define LACKS_SYS_PARAM_H */ + + +#include <stdio.h> /* needed for malloc_stats */ +#include <errno.h> /* needed for optional MALLOC_FAILURE_ACTION */ + + +/* + Debugging: + + Because freed chunks may be overwritten with bookkeeping fields, this + malloc will often die when freed memory is overwritten by user + programs. This can be very effective (albeit in an annoying way) + in helping track down dangling pointers. + + If you compile with -DDEBUG, a number of assertion checks are + enabled that will catch more memory errors. You probably won't be + able to make much sense of the actual assertion errors, but they + should help you locate incorrectly overwritten memory. The + checking is fairly extensive, and will slow down execution + noticeably. Calling malloc_stats or mallinfo with DEBUG set will + attempt to check every non-mmapped allocated and free chunk in the + course of computing the summmaries. (By nature, mmapped regions + cannot be checked very much automatically.) + + Setting DEBUG may also be helpful if you are trying to modify + this code. The assertions in the check routines spell out in more + detail the assumptions and invariants underlying the algorithms. + + Setting DEBUG does NOT provide an automated mechanism for checking + that all accesses to malloced memory stay within their + bounds. However, there are several add-ons and adaptations of this + or other mallocs available that do this. +*/ + +#if DEBUG +#include <assert.h> +#else +#define assert(x) ((void)0) +#endif + + +/* + INTERNAL_SIZE_T is the word-size used for internal bookkeeping + of chunk sizes. + + The default version is the same as size_t. + + While not strictly necessary, it is best to define this as an + unsigned type, even if size_t is a signed type. This may avoid some + artificial size limitations on some systems. + + On a 64-bit machine, you may be able to reduce malloc overhead by + defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the + expense of not being able to handle more than 2^32 of malloced + space. If this limitation is acceptable, you are encouraged to set + this unless you are on a platform requiring 16byte alignments. In + this case the alignment requirements turn out to negate any + potential advantages of decreasing size_t word size. + + Implementors: Beware of the possible combinations of: + - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits, + and might be the same width as int or as long + - size_t might have different width and signedness as INTERNAL_SIZE_T + - int and long might be 32 or 64 bits, and might be the same width + To deal with this, most comparisons and difference computations + among INTERNAL_SIZE_Ts should cast them to unsigned long, being + aware of the fact that casting an unsigned int to a wider long does + not sign-extend. (This also makes checking for negative numbers + awkward.) Some of these casts result in harmless compiler warnings + on some systems. +*/ + +#ifndef INTERNAL_SIZE_T +#define INTERNAL_SIZE_T size_t +#endif + +/* The corresponding word size */ +#define SIZE_SZ (sizeof(INTERNAL_SIZE_T)) + + +/* + MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks. + It must be a power of two at least 2 * SIZE_SZ, even on machines + for which smaller alignments would suffice. It may be defined as + larger than this though. Note however that code and data structures + are optimized for the case of 8-byte alignment. +*/ + + +#ifndef MALLOC_ALIGNMENT +#define MALLOC_ALIGNMENT (2 * SIZE_SZ) +#endif + +/* The corresponding bit mask value */ +#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1) + + + +/* + REALLOC_ZERO_BYTES_FREES should be set if a call to + realloc with zero bytes should be the same as a call to free. + Some people think it should. Otherwise, since this malloc + returns a unique pointer for malloc(0), so does realloc(p, 0). +*/ + +/* #define REALLOC_ZERO_BYTES_FREES */ + +/* + TRIM_FASTBINS controls whether free() of a very small chunk can + immediately lead to trimming. Setting to true (1) can reduce memory + footprint, but will almost always slow down programs that use a lot + of small chunks. + + Define this only if you are willing to give up some speed to more + aggressively reduce system-level memory footprint when releasing + memory in programs that use many small chunks. You can get + essentially the same effect by setting MXFAST to 0, but this can + lead to even greater slowdowns in programs using many small chunks. + TRIM_FASTBINS is an in-between compile-time option, that disables + only those chunks bordering topmost memory from being placed in + fastbins. +*/ + +#ifndef TRIM_FASTBINS +#define TRIM_FASTBINS 0 +#endif + + +/* + USE_DL_PREFIX will prefix all public routines with the string 'dl'. + This is necessary when you only want to use this malloc in one part + of a program, using your regular system malloc elsewhere. +*/ + +/* #define USE_DL_PREFIX */ + + +/* + USE_MALLOC_LOCK causes wrapper functions to surround each + callable routine with pthread mutex lock/unlock. + + USE_MALLOC_LOCK forces USE_PUBLIC_MALLOC_WRAPPERS to be defined +*/ + + +/* #define USE_MALLOC_LOCK */ + + +/* + If USE_PUBLIC_MALLOC_WRAPPERS is defined, every public routine is + actually a wrapper function that first calls MALLOC_PREACTION, then + calls the internal routine, and follows it with + MALLOC_POSTACTION. This is needed for locking, but you can also use + this, without USE_MALLOC_LOCK, for purposes of interception, + instrumentation, etc. It is a sad fact that using wrappers often + noticeably degrades performance of malloc-intensive programs. +*/ + +#ifdef USE_MALLOC_LOCK +#define USE_PUBLIC_MALLOC_WRAPPERS +#else +/* #define USE_PUBLIC_MALLOC_WRAPPERS */ +#endif + + +/* + Two-phase name translation. + All of the actual routines are given mangled names. + When wrappers are used, they become the public callable versions. + When DL_PREFIX is used, the callable names are prefixed. +*/ + +#ifndef USE_PUBLIC_MALLOC_WRAPPERS +#define cALLOc public_cALLOc +#define fREe public_fREe +#define cFREe public_cFREe +#define mALLOc public_mALLOc +#define mEMALIGn public_mEMALIGn +#define rEALLOc public_rEALLOc +#define vALLOc public_vALLOc +#define pVALLOc public_pVALLOc +#define mALLINFo public_mALLINFo +#define mALLOPt public_mALLOPt +#define mTRIm public_mTRIm +#define mSTATs public_mSTATs +#define mUSABLe public_mUSABLe +#define iCALLOc public_iCALLOc +#define iCOMALLOc public_iCOMALLOc +#endif + +#ifdef USE_DL_PREFIX +#define public_cALLOc dlcalloc +#define public_fREe dlfree +#define public_cFREe dlcfree +#define public_mALLOc dlmalloc +#define public_mEMALIGn dlmemalign +#define public_rEALLOc dlrealloc +#define public_vALLOc dlvalloc +#define public_pVALLOc dlpvalloc +#define public_mALLINFo dlmallinfo +#define public_mALLOPt dlmallopt +#define public_mTRIm dlmalloc_trim +#define public_mSTATs dlmalloc_stats +#define public_mUSABLe dlmalloc_usable_size +#define public_iCALLOc dlindependent_calloc +#define public_iCOMALLOc dlindependent_comalloc +#else /* USE_DL_PREFIX */ +#define public_cALLOc calloc +#define public_fREe free +#define public_cFREe cfree +#define public_mALLOc malloc +#define public_mEMALIGn memalign +#define public_rEALLOc realloc +#define public_vALLOc valloc +#define public_pVALLOc pvalloc +#define public_mALLINFo mallinfo +#define public_mALLOPt mallopt +#define public_mTRIm malloc_trim +#define public_mSTATs malloc_stats +#define public_mUSABLe malloc_usable_size +#define public_iCALLOc independent_calloc +#define public_iCOMALLOc independent_comalloc +#endif /* USE_DL_PREFIX */ + + +/* + HAVE_MEMCPY should be defined if you are not otherwise using + ANSI STD C, but still have memcpy and memset in your C library + and want to use them in calloc and realloc. Otherwise simple + macro versions are defined below. + + USE_MEMCPY should be defined as 1 if you actually want to + have memset and memcpy called. People report that the macro + versions are faster than libc versions on some systems. + + Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks + (of <= 36 bytes) are manually unrolled in realloc and calloc. +*/ + +#define HAVE_MEMCPY + +#ifndef USE_MEMCPY +#ifdef HAVE_MEMCPY +#define USE_MEMCPY 1 +#else +#define USE_MEMCPY 0 +#endif +#endif + + +#if (__STD_C || defined(HAVE_MEMCPY)) + +#ifdef WIN32 +/* On Win32 memset and memcpy are already declared in windows.h */ +#else +#if __STD_C +void* memset(void*, int, size_t); +void* memcpy(void*, const void*, size_t); +#else +Void_t* memset(); +Void_t* memcpy(); +#endif +#endif +#endif + +/* + MALLOC_FAILURE_ACTION is the action to take before "return 0" when + malloc fails to be able to return memory, either because memory is + exhausted or because of illegal arguments. + + By default, sets errno if running on STD_C platform, else does nothing. +*/ + +#ifndef MALLOC_FAILURE_ACTION +#if __STD_C +#define MALLOC_FAILURE_ACTION \ + errno = ENOMEM; + +#else +#define MALLOC_FAILURE_ACTION +#endif +#endif + +/* + MORECORE-related declarations. By default, rely on sbrk +*/ + + +#ifdef LACKS_UNISTD_H +#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) +#if __STD_C +extern Void_t* sbrk(ptrdiff_t); +#else +extern Void_t* sbrk(); +#endif +#endif +#endif + +/* + MORECORE is the name of the routine to call to obtain more memory + from the system. See below for general guidance on writing + alternative MORECORE functions, as well as a version for WIN32 and a + sample version for pre-OSX macos. +*/ + +#ifndef MORECORE +#define MORECORE sbrk +#endif + +/* + MORECORE_FAILURE is the value returned upon failure of MORECORE + as well as mmap. Since it cannot be an otherwise valid memory address, + and must reflect values of standard sys calls, you probably ought not + try to redefine it. +*/ + +#ifndef MORECORE_FAILURE +#define MORECORE_FAILURE (-1) +#endif + +/* + If MORECORE_CONTIGUOUS is true, take advantage of fact that + consecutive calls to MORECORE with positive arguments always return + contiguous increasing addresses. This is true of unix sbrk. Even + if not defined, when regions happen to be contiguous, malloc will + permit allocations spanning regions obtained from different + calls. But defining this when applicable enables some stronger + consistency checks and space efficiencies. +*/ + +#ifndef MORECORE_CONTIGUOUS +#define MORECORE_CONTIGUOUS 1 +#endif + +/* + Define MORECORE_CANNOT_TRIM if your version of MORECORE + cannot release space back to the system when given negative + arguments. This is generally necessary only if you are using + a hand-crafted MORECORE function that cannot handle negative arguments. +*/ + +/* #define MORECORE_CANNOT_TRIM */ + + +/* + Define HAVE_MMAP as true to optionally make malloc() use mmap() to + allocate very large blocks. These will be returned to the + operating system immediately after a free(). Also, if mmap + is available, it is used as a backup strategy in cases where + MORECORE fails to provide space from system. + + This malloc is best tuned to work with mmap for large requests. + If you do not have mmap, operations involving very large chunks (1MB + or so) may be slower than you'd like. +*/ + +#ifndef HAVE_MMAP +#define HAVE_MMAP 1 + +/* + Standard unix mmap using /dev/zero clears memory so calloc doesn't + need to. +*/ + +#ifndef MMAP_CLEARS +#define MMAP_CLEARS 1 +#endif + +#else /* no mmap */ +#ifndef MMAP_CLEARS +#define MMAP_CLEARS 0 +#endif +#endif + + +/* + MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if + sbrk fails, and mmap is used as a backup (which is done only if + HAVE_MMAP). The value must be a multiple of page size. This + backup strategy generally applies only when systems have "holes" in + address space, so sbrk cannot perform contiguous expansion, but + there is still space available on system. On systems for which + this is known to be useful (i.e. most linux kernels), this occurs + only when programs allocate huge amounts of memory. Between this, + and the fact that mmap regions tend to be limited, the size should + be large, to avoid too many mmap calls and thus avoid running out + of kernel resources. +*/ + +#ifndef MMAP_AS_MORECORE_SIZE +#define MMAP_AS_MORECORE_SIZE (1024 * 1024) +#endif + +/* + Define HAVE_MREMAP to make realloc() use mremap() to re-allocate + large blocks. This is currently only possible on Linux with + kernel versions newer than 1.3.77. +*/ + +#ifndef HAVE_MREMAP +#ifdef linux +#define HAVE_MREMAP 1 +#else +#define HAVE_MREMAP 0 +#endif + +#endif /* HAVE_MMAP */ + + +/* + The system page size. To the extent possible, this malloc manages + memory from the system in page-size units. Note that this value is + cached during initialization into a field of malloc_state. So even + if malloc_getpagesize is a function, it is only called once. + + The following mechanics for getpagesize were adapted from bsd/gnu + getpagesize.h. If none of the system-probes here apply, a value of + 4096 is used, which should be OK: If they don't apply, then using + the actual value probably doesn't impact performance. +*/ + + +#ifndef malloc_getpagesize + +#ifndef LACKS_UNISTD_H +# include <unistd.h> +#endif + +# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ +# ifndef _SC_PAGE_SIZE +# define _SC_PAGE_SIZE _SC_PAGESIZE +# endif +# endif + +# ifdef _SC_PAGE_SIZE +# define malloc_getpagesize sysconf(_SC_PAGE_SIZE) +# else +# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) + extern size_t getpagesize(); +# define malloc_getpagesize getpagesize() +# else +# ifdef WIN32 /* use supplied emulation of getpagesize */ +# define malloc_getpagesize getpagesize() +# else +# ifndef LACKS_SYS_PARAM_H +# include <sys/param.h> +# endif +# ifdef EXEC_PAGESIZE +# define malloc_getpagesize EXEC_PAGESIZE +# else +# ifdef NBPG +# ifndef CLSIZE +# define malloc_getpagesize NBPG +# else +# define malloc_getpagesize (NBPG * CLSIZE) +# endif +# else +# ifdef NBPC +# define malloc_getpagesize NBPC +# else +# ifdef PAGESIZE +# define malloc_getpagesize PAGESIZE +# else /* just guess */ +# define malloc_getpagesize (4096) +# endif +# endif +# endif +# endif +# endif +# endif +# endif +#endif + +/* + This version of malloc supports the standard SVID/XPG mallinfo + routine that returns a struct containing usage properties and + statistics. It should work on any SVID/XPG compliant system that has + a /usr/include/malloc.h defining struct mallinfo. (If you'd like to + install such a thing yourself, cut out the preliminary declarations + as described above and below and save them in a malloc.h file. But + there's no compelling reason to bother to do this.) + + The main declaration needed is the mallinfo struct that is returned + (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a + bunch of field that are not even meaningful in this version of + malloc. These fields are are instead filled by mallinfo() with + other numbers that might be of interest. + + HAVE_USR_INCLUDE_MALLOC_H should be set if you have a + /usr/include/malloc.h file that includes a declaration of struct + mallinfo. If so, it is included; else an SVID2/XPG2 compliant + version is declared below. These must be precisely the same for + mallinfo() to work. The original SVID version of this struct, + defined on most systems with mallinfo, declares all fields as + ints. But some others define as unsigned long. If your system + defines the fields using a type of different width than listed here, + you must #include your system version and #define + HAVE_USR_INCLUDE_MALLOC_H. +*/ + +/* #define HAVE_USR_INCLUDE_MALLOC_H */ + +#ifdef HAVE_USR_INCLUDE_MALLOC_H +#include "/usr/include/malloc.h" +#else + +/* SVID2/XPG mallinfo structure */ + +struct mallinfo { + int arena; /* non-mmapped space allocated from system */ + int ordblks; /* number of free chunks */ + int smblks; /* number of fastbin blocks */ + int hblks; /* number of mmapped regions */ + int hblkhd; /* space in mmapped regions */ + int usmblks; /* maximum total allocated space */ + int fsmblks; /* space available in freed fastbin blocks */ + int uordblks; /* total allocated space */ + int fordblks; /* total free space */ + int keepcost; /* top-most, releasable (via malloc_trim) space */ +}; + +/* + SVID/XPG defines four standard parameter numbers for mallopt, + normally defined in malloc.h. Only one of these (M_MXFAST) is used + in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply, + so setting them has no effect. But this malloc also supports other + options in mallopt described below. +*/ +#endif + + +/* ---------- description of public routines ------------ */ + +/* + malloc(size_t n) + Returns a pointer to a newly allocated chunk of at least n bytes, or null + if no space is available. Additionally, on failure, errno is + set to ENOMEM on ANSI C systems. + + If n is zero, malloc returns a minumum-sized chunk. (The minimum + size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit + systems.) On most systems, size_t is an unsigned type, so calls + with negative arguments are interpreted as requests for huge amounts + of space, which will often fail. The maximum supported value of n + differs across systems, but is in all cases less than the maximum + representable value of a size_t. +*/ +#if __STD_C +Void_t* public_mALLOc(size_t); +#else +Void_t* public_mALLOc(); +#endif + +/* + free(Void_t* p) + Releases the chunk of memory pointed to by p, that had been previously + allocated using malloc or a related routine such as realloc. + It has no effect if p is null. It can have arbitrary (i.e., bad!) + effects if p has already been freed. + + Unless disabled (using mallopt), freeing very large spaces will + when possible, automatically trigger operations that give + back unused memory to the system, thus reducing program footprint. +*/ +#if __STD_C +void public_fREe(Void_t*); +#else +void public_fREe(); +#endif + +/* + calloc(size_t n_elements, size_t element_size); + Returns a pointer to n_elements * element_size bytes, with all locations + set to zero. +*/ +#if __STD_C +Void_t* public_cALLOc(size_t, size_t); +#else +Void_t* public_cALLOc(); +#endif + +/* + realloc(Void_t* p, size_t n) + Returns a pointer to a chunk of size n that contains the same data + as does chunk p up to the minimum of (n, p's size) bytes, or null + if no space is available. + + The returned pointer may or may not be the same as p. The algorithm + prefers extending p when possible, otherwise it employs the + equivalent of a malloc-copy-free sequence. + + If p is null, realloc is equivalent to malloc. + + If space is not available, realloc returns null, errno is set (if on + ANSI) and p is NOT freed. + + if n is for fewer bytes than already held by p, the newly unused + space is lopped off and freed if possible. Unless the #define + REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of + zero (re)allocates a minimum-sized chunk. + + Large chunks that were internally obtained via mmap will always + be reallocated using malloc-copy-free sequences unless + the system supports MREMAP (currently only linux). + + The old unix realloc convention of allowing the last-free'd chunk + to be used as an argument to realloc is not supported. +*/ +#if __STD_C +Void_t* public_rEALLOc(Void_t*, size_t); +#else +Void_t* public_rEALLOc(); +#endif + +/* + memalign(size_t alignment, size_t n); + Returns a pointer to a newly allocated chunk of n bytes, aligned + in accord with the alignment argument. + + The alignment argument should be a power of two. If the argument is + not a power of two, the nearest greater power is used. + 8-byte alignment is guaranteed by normal malloc calls, so don't + bother calling memalign with an argument of 8 or less. + + Overreliance on memalign is a sure way to fragment space. +*/ +#if __STD_C +Void_t* public_mEMALIGn(size_t, size_t); +#else +Void_t* public_mEMALIGn(); +#endif + +/* + valloc(size_t n); + Equivalent to memalign(pagesize, n), where pagesize is the page + size of the system. If the pagesize is unknown, 4096 is used. +*/ +#if __STD_C +Void_t* public_vALLOc(size_t); +#else +Void_t* public_vALLOc(); +#endif + + + +/* + mallopt(int parameter_number, int parameter_value) + Sets tunable parameters The format is to provide a + (parameter-number, parameter-value) pair. mallopt then sets the + corresponding parameter to the argument value if it can (i.e., so + long as the value is meaningful), and returns 1 if successful else + 0. SVID/XPG/ANSI defines four standard param numbers for mallopt, + normally defined in malloc.h. Only one of these (M_MXFAST) is used + in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply, + so setting them has no effect. But this malloc also supports four + other options in mallopt. See below for details. Briefly, supported + parameters are as follows (listed defaults are for "typical" + configurations). + + Symbol param # default allowed param values + M_MXFAST 1 64 0-80 (0 disables fastbins) + M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming) + M_TOP_PAD -2 0 any + M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support) + M_MMAP_MAX -4 65536 any (0 disables use of mmap) +*/ +#if __STD_C +int public_mALLOPt(int, int); +#else +int public_mALLOPt(); +#endif + + +/* + mallinfo() + Returns (by copy) a struct containing various summary statistics: + + arena: current total non-mmapped bytes allocated from system + ordblks: the number of free chunks + smblks: the number of fastbin blocks (i.e., small chunks that + have been freed but not use resused or consolidated) + hblks: current number of mmapped regions + hblkhd: total bytes held in mmapped regions + usmblks: the maximum total allocated space. This will be greater + than current total if trimming has occurred. + fsmblks: total bytes held in fastbin blocks + uordblks: current total allocated space (normal or mmapped) + fordblks: total free space + keepcost: the maximum number of bytes that could ideally be released + back to system via malloc_trim. ("ideally" means that + it ignores page restrictions etc.) + + Because these fields are ints, but internal bookkeeping may + be kept as longs, the reported values may wrap around zero and + thus be inaccurate. +*/ +#if __STD_C +struct mallinfo public_mALLINFo(void); +#else +struct mallinfo public_mALLINFo(); +#endif + +/* + independent_calloc(size_t n_elements, size_t element_size, Void_t* chunks[]); + + independent_calloc is similar to calloc, but instead of returning a + single cleared space, it returns an array of pointers to n_elements + independent elements that can hold contents of size elem_size, each + of which starts out cleared, and can be independently freed, + realloc'ed etc. The elements are guaranteed to be adjacently + allocated (this is not guaranteed to occur with multiple callocs or + mallocs), which may also improve cache locality in some + applications. + + The "chunks" argument is optional (i.e., may be null, which is + probably the most typical usage). If it is null, the returned array + is itself dynamically allocated and should also be freed when it is + no longer needed. Otherwise, the chunks array must be of at least + n_elements in length. It is filled in with the pointers to the + chunks. + + In either case, independent_calloc returns this pointer array, or + null if the allocation failed. If n_elements is zero and "chunks" + is null, it returns a chunk representing an array with zero elements + (which should be freed if not wanted). + + Each element must be individually freed when it is no longer + needed. If you'd like to instead be able to free all at once, you + should instead use regular calloc and assign pointers into this + space to represent elements. (In this case though, you cannot + independently free elements.) + + independent_calloc simplifies and speeds up implementations of many + kinds of pools. It may also be useful when constructing large data + structures that initially have a fixed number of fixed-sized nodes, + but the number is not known at compile time, and some of the nodes + may later need to be freed. For example: + + struct Node { int item; struct Node* next; }; + + struct Node* build_list() { + struct Node** pool; + int n = read_number_of_nodes_needed(); + if (n <= 0) return 0; + pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0); + if (pool == 0) die(); + // organize into a linked list... + struct Node* first = pool[0]; + for (i = 0; i < n-1; ++i) + pool[i]->next = pool[i+1]; + free(pool); // Can now free the array (or not, if it is needed later) + return first; + } +*/ +#if __STD_C +Void_t** public_iCALLOc(size_t, size_t, Void_t**); +#else +Void_t** public_iCALLOc(); +#endif + +/* + independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]); + + independent_comalloc allocates, all at once, a set of n_elements + chunks with sizes indicated in the "sizes" array. It returns + an array of pointers to these elements, each of which can be + independently freed, realloc'ed etc. The elements are guaranteed to + be adjacently allocated (this is not guaranteed to occur with + multiple callocs or mallocs), which may also improve cache locality + in some applications. + + The "chunks" argument is optional (i.e., may be null). If it is null + the returned array is itself dynamically allocated and should also + be freed when it is no longer needed. Otherwise, the chunks array + must be of at least n_elements in length. It is filled in with the + pointers to the chunks. + + In either case, independent_comalloc returns this pointer array, or + null if the allocation failed. If n_elements is zero and chunks is + null, it returns a chunk representing an array with zero elements + (which should be freed if not wanted). + + Each element must be individually freed when it is no longer + needed. If you'd like to instead be able to free all at once, you + should instead use a single regular malloc, and assign pointers at + particular offsets in the aggregate space. (In this case though, you + cannot independently free elements.) + + independent_comallac differs from independent_calloc in that each + element may have a different size, and also that it does not + automatically clear elements. + + independent_comalloc can be used to speed up allocation in cases + where several structs or objects must always be allocated at the + same time. For example: + + struct Head { ... } + struct Foot { ... } + + void send_message(char* msg) { + int msglen = strlen(msg); + size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) }; + void* chunks[3]; + if (independent_comalloc(3, sizes, chunks) == 0) + die(); + struct Head* head = (struct Head*)(chunks[0]); + char* body = (char*)(chunks[1]); + struct Foot* foot = (struct Foot*)(chunks[2]); + // ... + } + + In general though, independent_comalloc is worth using only for + larger values of n_elements. For small values, you probably won't + detect enough difference from series of malloc calls to bother. + + Overuse of independent_comalloc can increase overall memory usage, + since it cannot reuse existing noncontiguous small chunks that + might be available for some of the elements. +*/ +#if __STD_C +Void_t** public_iCOMALLOc(size_t, size_t*, Void_t**); +#else +Void_t** public_iCOMALLOc(); +#endif + + +/* + pvalloc(size_t n); + Equivalent to valloc(minimum-page-that-holds(n)), that is, + round up n to nearest pagesize. + */ +#if __STD_C +Void_t* public_pVALLOc(size_t); +#else +Void_t* public_pVALLOc(); +#endif + +/* + cfree(Void_t* p); + Equivalent to free(p). + + cfree is needed/defined on some systems that pair it with calloc, + for odd historical reasons (such as: cfree is used in example + code in the first edition of K&R). +*/ +#if __STD_C +void public_cFREe(Void_t*); +#else +void public_cFREe(); +#endif + +/* + malloc_trim(size_t pad); + + If possible, gives memory back to the system (via negative + arguments to sbrk) if there is unused memory at the `high' end of + the malloc pool. You can call this after freeing large blocks of + memory to potentially reduce the system-level memory requirements + of a program. However, it cannot guarantee to reduce memory. Under + some allocation patterns, some large free blocks of memory will be + locked between two used chunks, so they cannot be given back to + the system. + + The `pad' argument to malloc_trim represents the amount of free + trailing space to leave untrimmed. If this argument is zero, + only the minimum amount of memory to maintain internal data + structures will be left (one page or less). Non-zero arguments + can be supplied to maintain enough trailing space to service + future expected allocations without having to re-obtain memory + from the system. + + Malloc_trim returns 1 if it actually released any memory, else 0. + On systems that do not support "negative sbrks", it will always + rreturn 0. +*/ +#if __STD_C +int public_mTRIm(size_t); +#else +int public_mTRIm(); +#endif + +/* + malloc_usable_size(Void_t* p); + + Returns the number of bytes you can actually use in + an allocated chunk, which may be more than you requested (although + often not) due to alignment and minimum size constraints. + You can use this many bytes without worrying about + overwriting other allocated objects. This is not a particularly great + programming practice. malloc_usable_size can be more useful in + debugging and assertions, for example: + + p = malloc(n); + assert(malloc_usable_size(p) >= 256); + +*/ +#if __STD_C +size_t public_mUSABLe(Void_t*); +#else +size_t public_mUSABLe(); +#endif + +/* + malloc_stats(); + Prints on stderr the amount of space obtained from the system (both + via sbrk and mmap), the maximum amount (which may be more than + current if malloc_trim and/or munmap got called), and the current + number of bytes allocated via malloc (or realloc, etc) but not yet + freed. Note that this is the number of bytes allocated, not the + number requested. It will be larger than the number requested + because of alignment and bookkeeping overhead. Because it includes + alignment wastage as being in use, this figure may be greater than + zero even when no user-level chunks are allocated. + + The reported current and maximum system memory can be inaccurate if + a program makes other calls to system memory allocation functions + (normally sbrk) outside of malloc. + + malloc_stats prints only the most commonly interesting statistics. + More information can be obtained by calling mallinfo. + +*/ +#if __STD_C +void public_mSTATs(); +#else +void public_mSTATs(); +#endif + +/* mallopt tuning options */ + +/* + M_MXFAST is the maximum request size used for "fastbins", special bins + that hold returned chunks without consolidating their spaces. This + enables future requests for chunks of the same size to be handled + very quickly, but can increase fragmentation, and thus increase the + overall memory footprint of a program. + + This malloc manages fastbins very conservatively yet still + efficiently, so fragmentation is rarely a problem for values less + than or equal to the default. The maximum supported value of MXFAST + is 80. You wouldn't want it any higher than this anyway. Fastbins + are designed especially for use with many small structs, objects or + strings -- the default handles structs/objects/arrays with sizes up + to 8 4byte fields, or small strings representing words, tokens, + etc. Using fastbins for larger objects normally worsens + fragmentation without improving speed. + + M_MXFAST is set in REQUEST size units. It is internally used in + chunksize units, which adds padding and alignment. You can reduce + M_MXFAST to 0 to disable all use of fastbins. This causes the malloc + algorithm to be a closer approximation of fifo-best-fit in all cases, + not just for larger requests, but will generally cause it to be + slower. +*/ + + +/* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */ +#ifndef M_MXFAST +#define M_MXFAST 1 +#endif + +#ifndef DEFAULT_MXFAST +#define DEFAULT_MXFAST 64 +#endif + + +/* + M_TRIM_THRESHOLD is the maximum amount of unused top-most memory + to keep before releasing via malloc_trim in free(). + + Automatic trimming is mainly useful in long-lived programs. + Because trimming via sbrk can be slow on some systems, and can + sometimes be wasteful (in cases where programs immediately + afterward allocate more large chunks) the value should be high + enough so that your overall system performance would improve by + releasing this much memory. + + The trim threshold and the mmap control parameters (see below) + can be traded off with one another. Trimming and mmapping are + two different ways of releasing unused memory back to the + system. Between these two, it is often possible to keep + system-level demands of a long-lived program down to a bare + minimum. For example, in one test suite of sessions measuring + the XF86 X server on Linux, using a trim threshold of 128K and a + mmap threshold of 192K led to near-minimal long term resource + consumption. + + If you are using this malloc in a long-lived program, it should + pay to experiment with these values. As a rough guide, you + might set to a value close to the average size of a process + (program) running on your system. Releasing this much memory + would allow such a process to run in memory. Generally, it's + worth it to tune for trimming rather tham memory mapping when a + program undergoes phases where several large chunks are + allocated and released in ways that can reuse each other's + storage, perhaps mixed with phases where there are no such + chunks at all. And in well-behaved long-lived programs, + controlling release of large blocks via trimming versus mapping + is usually faster. + + However, in most programs, these parameters serve mainly as + protection against the system-level effects of carrying around + massive amounts of unneeded memory. Since frequent calls to + sbrk, mmap, and munmap otherwise degrade performance, the default + parameters are set to relatively high values that serve only as + safeguards. + + The trim value It must be greater than page size to have any useful + effect. To disable trimming completely, you can set to + (unsigned long)(-1) + + Trim settings interact with fastbin (MXFAST) settings: Unless + TRIM_FASTBINS is defined, automatic trimming never takes place upon + freeing a chunk with size less than or equal to MXFAST. Trimming is + instead delayed until subsequent freeing of larger chunks. However, + you can still force an attempted trim by calling malloc_trim. + + Also, trimming is not generally possible in cases where + the main arena is obtained via mmap. + + Note that the trick some people use of mallocing a huge space and + then freeing it at program startup, in an attempt to reserve system + memory, doesn't have the intended effect under automatic trimming, + since that memory will immediately be returned to the system. +*/ + +#define M_TRIM_THRESHOLD -1 + +#ifndef DEFAULT_TRIM_THRESHOLD +#define DEFAULT_TRIM_THRESHOLD (128 * 1024) +#endif + +/* + M_TOP_PAD is the amount of extra `padding' space to allocate or + retain whenever sbrk is called. It is used in two ways internally: + + * When sbrk is called to extend the top of the arena to satisfy + a new malloc request, this much padding is added to the sbrk + request. + + * When malloc_trim is called automatically from free(), + it is used as the `pad' argument. + + In both cases, the actual amount of padding is rounded + so that the end of the arena is always a system page boundary. + + The main reason for using padding is to avoid calling sbrk so + often. Having even a small pad greatly reduces the likelihood + that nearly every malloc request during program start-up (or + after trimming) will invoke sbrk, which needlessly wastes + time. + + Automatic rounding-up to page-size units is normally sufficient + to avoid measurable overhead, so the default is 0. However, in + systems where sbrk is relatively slow, it can pay to increase + this value, at the expense of carrying around more memory than + the program needs. +*/ + +#define M_TOP_PAD -2 + +#ifndef DEFAULT_TOP_PAD +#define DEFAULT_TOP_PAD (0) +#endif + +/* + M_MMAP_THRESHOLD is the request size threshold for using mmap() + to service a request. Requests of at least this size that cannot + be allocated using already-existing space will be serviced via mmap. + (If enough normal freed space already exists it is used instead.) + + Using mmap segregates relatively large chunks of memory so that + they can be individually obtained and released from the host + system. A request serviced through mmap is never reused by any + other request (at least not directly; the system may just so + happen to remap successive requests to the same locations). + + Segregating space in this way has the benefits that: + + 1. Mmapped space can ALWAYS be individually released back + to the system, which helps keep the system level memory + demands of a long-lived program low. + 2. Mapped memory can never become `locked' between + other chunks, as can happen with normally allocated chunks, which + means that even trimming via malloc_trim would not release them. + 3. On some systems with "holes" in address spaces, mmap can obtain + memory that sbrk cannot. + + However, it has the disadvantages that: + + 1. The space cannot be reclaimed, consolidated, and then + used to service later requests, as happens with normal chunks. + 2. It can lead to more wastage because of mmap page alignment + requirements + 3. It causes malloc performance to be more dependent on host + system memory management support routines which may vary in + implementation quality and may impose arbitrary + limitations. Generally, servicing a request via normal + malloc steps is faster than going through a system's mmap. + + The advantages of mmap nearly always outweigh disadvantages for + "large" chunks, but the value of "large" varies across systems. The + default is an empirically derived value that works well in most + systems. +*/ + +#define M_MMAP_THRESHOLD -3 + +#ifndef DEFAULT_MMAP_THRESHOLD +#define DEFAULT_MMAP_THRESHOLD (128 * 1024) +#endif + +/* + M_MMAP_MAX is the maximum number of requests to simultaneously + service using mmap. This parameter exists because +. Some systems have a limited number of internal tables for + use by mmap, and using more than a few of them may degrade + performance. + + The default is set to a value that serves only as a safeguard. + Setting to 0 disables use of mmap for servicing large requests. If + HAVE_MMAP is not set, the default value is 0, and attempts to set it + to non-zero values in mallopt will fail. +*/ + +#define M_MMAP_MAX -4 + +#ifndef DEFAULT_MMAP_MAX +#if HAVE_MMAP +#define DEFAULT_MMAP_MAX (65536) +#else +#define DEFAULT_MMAP_MAX (0) +#endif +#endif + +#ifdef __cplusplus +}; /* end of extern "C" */ +#endif + +/* + ======================================================================== + To make a fully customizable malloc.h header file, cut everything + above this line, put into file malloc.h, edit to suit, and #include it + on the next line, as well as in programs that use this malloc. + ======================================================================== +*/ + +/* #include "malloc.h" */ + +/* --------------------- public wrappers ---------------------- */ + +#ifdef USE_PUBLIC_MALLOC_WRAPPERS + +/* Declare all routines as internal */ +#if __STD_C +static Void_t* mALLOc(size_t); +static void fREe(Void_t*); +static Void_t* rEALLOc(Void_t*, size_t); +static Void_t* mEMALIGn(size_t, size_t); +static Void_t* vALLOc(size_t); +static Void_t* pVALLOc(size_t); +static Void_t* cALLOc(size_t, size_t); +static Void_t** iCALLOc(size_t, size_t, Void_t**); +static Void_t** iCOMALLOc(size_t, size_t*, Void_t**); +static void cFREe(Void_t*); +static int mTRIm(size_t); +static size_t mUSABLe(Void_t*); +static void mSTATs(); +static int mALLOPt(int, int); +static struct mallinfo mALLINFo(void); +#else +static Void_t* mALLOc(); +static void fREe(); +static Void_t* rEALLOc(); +static Void_t* mEMALIGn(); +static Void_t* vALLOc(); +static Void_t* pVALLOc(); +static Void_t* cALLOc(); +static Void_t** iCALLOc(); +static Void_t** iCOMALLOc(); +static void cFREe(); +static int mTRIm(); +static size_t mUSABLe(); +static void mSTATs(); +static int mALLOPt(); +static struct mallinfo mALLINFo(); +#endif + +/* + MALLOC_PREACTION and MALLOC_POSTACTION should be + defined to return 0 on success, and nonzero on failure. + The return value of MALLOC_POSTACTION is currently ignored + in wrapper functions since there is no reasonable default + action to take on failure. +*/ + + +#ifdef USE_MALLOC_LOCK + +#ifdef WIN32 + +static int mALLOC_MUTEx; +#define MALLOC_PREACTION slwait(&mALLOC_MUTEx) +#define MALLOC_POSTACTION slrelease(&mALLOC_MUTEx) + +#else + +#include <pthread.h> + +static pthread_mutex_t mALLOC_MUTEx = PTHREAD_MUTEX_INITIALIZER; + +#define MALLOC_PREACTION pthread_mutex_lock(&mALLOC_MUTEx) +#define MALLOC_POSTACTION pthread_mutex_unlock(&mALLOC_MUTEx) + +#endif /* USE_MALLOC_LOCK */ + +#else + +/* Substitute anything you like for these */ + +#define MALLOC_PREACTION (0) +#define MALLOC_POSTACTION (0) + +#endif + +Void_t* public_mALLOc(size_t bytes) { + Void_t* m; + if (MALLOC_PREACTION != 0) { + return 0; + } + m = mALLOc(bytes); + if (MALLOC_POSTACTION != 0) { + } + return m; +} + +void public_fREe(Void_t* m) { + if (MALLOC_PREACTION != 0) { + return; + } + fREe(m); + if (MALLOC_POSTACTION != 0) { + } +} + +Void_t* public_rEALLOc(Void_t* m, size_t bytes) { + if (MALLOC_PREACTION != 0) { + return 0; + } + m = rEALLOc(m, bytes); + if (MALLOC_POSTACTION != 0) { + } + return m; +} + +Void_t* public_mEMALIGn(size_t alignment, size_t bytes) { + Void_t* m; + if (MALLOC_PREACTION != 0) { + return 0; + } + m = mEMALIGn(alignment, bytes); + if (MALLOC_POSTACTION != 0) { + } + return m; +} + +Void_t* public_vALLOc(size_t bytes) { + Void_t* m; + if (MALLOC_PREACTION != 0) { + return 0; + } + m = vALLOc(bytes); + if (MALLOC_POSTACTION != 0) { + } + return m; +} + +Void_t* public_pVALLOc(size_t bytes) { + Void_t* m; + if (MALLOC_PREACTION != 0) { + return 0; + } + m = pVALLOc(bytes); + if (MALLOC_POSTACTION != 0) { + } + return m; +} + +Void_t* public_cALLOc(size_t n, size_t elem_size) { + Void_t* m; + if (MALLOC_PREACTION != 0) { + return 0; + } + m = cALLOc(n, elem_size); + if (MALLOC_POSTACTION != 0) { + } + return m; +} + + +Void_t** public_iCALLOc(size_t n, size_t elem_size, Void_t** chunks) { + Void_t** m; + if (MALLOC_PREACTION != 0) { + return 0; + } + m = iCALLOc(n, elem_size, chunks); + if (MALLOC_POSTACTION != 0) { + } + return m; +} + +Void_t** public_iCOMALLOc(size_t n, size_t sizes[], Void_t** chunks) { + Void_t** m; + if (MALLOC_PREACTION != 0) { + return 0; + } + m = iCOMALLOc(n, sizes, chunks); + if (MALLOC_POSTACTION != 0) { + } + return m; +} + +void public_cFREe(Void_t* m) { + if (MALLOC_PREACTION != 0) { + return; + } + cFREe(m); + if (MALLOC_POSTACTION != 0) { + } +} + +int public_mTRIm(size_t s) { + int result; + if (MALLOC_PREACTION != 0) { + return 0; + } + result = mTRIm(s); + if (MALLOC_POSTACTION != 0) { + } + return result; +} + +size_t public_mUSABLe(Void_t* m) { + size_t result; + if (MALLOC_PREACTION != 0) { + return 0; + } + result = mUSABLe(m); + if (MALLOC_POSTACTION != 0) { + } + return result; +} + +void public_mSTATs() { + if (MALLOC_PREACTION != 0) { + return; + } + mSTATs(); + if (MALLOC_POSTACTION != 0) { + } +} + +struct mallinfo public_mALLINFo() { + struct mallinfo m; + if (MALLOC_PREACTION != 0) { + struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + return nm; + } + m = mALLINFo(); + if (MALLOC_POSTACTION != 0) { + } + return m; +} + +int public_mALLOPt(int p, int v) { + int result; + if (MALLOC_PREACTION != 0) { + return 0; + } + result = mALLOPt(p, v); + if (MALLOC_POSTACTION != 0) { + } + return result; +} + +#endif + + + +/* ------------- Optional versions of memcopy ---------------- */ + + +#if USE_MEMCPY + +/* + Note: memcpy is ONLY invoked with non-overlapping regions, + so the (usually slower) memmove is not needed. +*/ + +#define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes) +#define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes) + +#else /* !USE_MEMCPY */ + +/* Use Duff's device for good zeroing/copying performance. */ + +#define MALLOC_ZERO(charp, nbytes) \ +do { \ + INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \ + unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \ + long mcn; \ + if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \ + switch (mctmp) { \ + case 0: for(;;) { *mzp++ = 0; \ + case 7: *mzp++ = 0; \ + case 6: *mzp++ = 0; \ + case 5: *mzp++ = 0; \ + case 4: *mzp++ = 0; \ + case 3: *mzp++ = 0; \ + case 2: *mzp++ = 0; \ + case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \ + } \ +} while(0) + +define MALLOC_COPY(dest,src,nbytes) \ +do { \ + INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \ + INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \ + unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \ + long mcn; \ + if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \ + switch (mctmp) { \ + case 0: for(;;) { *mcdst++ = *mcsrc++; \ + case 7: *mcdst++ = *mcsrc++; \ + case 6: *mcdst++ = *mcsrc++; \ + case 5: *mcdst++ = *mcsrc++; \ + case 4: *mcdst++ = *mcsrc++; \ + case 3: *mcdst++ = *mcsrc++; \ + case 2: *mcdst++ = *mcsrc++; \ + case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \ + } \ +} while(0) + +#endif + +/* ------------------ MMAP support ------------------ */ + + +#if HAVE_MMAP + +#include <fcntl.h> +#ifndef LACKS_SYS_MMAN_H +#include <sys/mman.h> +#endif + +#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) +#define MAP_ANONYMOUS MAP_ANON +#endif + +/* + Nearly all versions of mmap support MAP_ANONYMOUS, + so the following is unlikely to be needed, but is + supplied just in case. +*/ + +#ifndef MAP_ANONYMOUS + +static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ + +#define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \ + (dev_zero_fd = open("/dev/zero", O_RDWR), \ + mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \ + mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) + +#else + +#define MMAP(addr, size, prot, flags) \ + (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0)) + +#endif + + +#endif /* HAVE_MMAP */ + + +/* + ----------------------- Chunk representations ----------------------- +*/ + + +/* + This struct declaration is misleading (but accurate and necessary). + It declares a "view" into memory allowing access to necessary + fields at known offsets from a given base. See explanation below. +*/ + +struct malloc_chunk { + + INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */ + INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */ + + struct malloc_chunk* fd; /* double links -- used only if free. */ + struct malloc_chunk* bk; +}; + + +typedef struct malloc_chunk* mchunkptr; + +/* + malloc_chunk details: + + (The following includes lightly edited explanations by Colin Plumb.) + + Chunks of memory are maintained using a `boundary tag' method as + described in e.g., Knuth or Standish. (See the paper by Paul + Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a + survey of such techniques.) Sizes of free chunks are stored both + in the front of each chunk and at the end. This makes + consolidating fragmented chunks into bigger chunks very fast. The + size fields also hold bits representing whether chunks are free or + in use. + + An allocated chunk looks like this: + + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk, if allocated | | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of chunk, in bytes |P| + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | User data starts here... . + . . + . (malloc_usable_space() bytes) . + . | +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + + Where "chunk" is the front of the chunk for the purpose of most of + the malloc code, but "mem" is the pointer that is returned to the + user. "Nextchunk" is the beginning of the next contiguous chunk. + + Chunks always begin on even word boundries, so the mem portion + (which is returned to the user) is also on an even word boundary, and + thus at least double-word aligned. + + Free chunks are stored in circular doubly-linked lists, and look like this: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `head:' | Size of chunk, in bytes |P| + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Forward pointer to next chunk in list | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Back pointer to previous chunk in list | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Unused space (may be 0 bytes long) . + . . + . | +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `foot:' | Size of chunk, in bytes | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + The P (PREV_INUSE) bit, stored in the unused low-order bit of the + chunk size (which is always a multiple of two words), is an in-use + bit for the *previous* chunk. If that bit is *clear*, then the + word before the current chunk size contains the previous chunk + size, and can be used to find the front of the previous chunk. + The very first chunk allocated always has this bit set, + preventing access to non-existent (or non-owned) memory. If + prev_inuse is set for any given chunk, then you CANNOT determine + the size of the previous chunk, and might even get a memory + addressing fault when trying to do so. + + Note that the `foot' of the current chunk is actually represented + as the prev_size of the NEXT chunk. This makes it easier to + deal with alignments etc but can be very confusing when trying + to extend or adapt this code. + + The two exceptions to all this are + + 1. The special chunk `top' doesn't bother using the + trailing size field since there is no next contiguous chunk + that would have to index off it. After initialization, `top' + is forced to always exist. If it would become less than + MINSIZE bytes long, it is replenished. + + 2. Chunks allocated via mmap, which have the second-lowest-order + bit (IS_MMAPPED) set in their size fields. Because they are + allocated one-by-one, each must contain its own trailing size field. + +*/ + +/* + ---------- Size and alignment checks and conversions ---------- +*/ + +/* conversion from malloc headers to user pointers, and back */ + +#define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ)) +#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ)) + +/* The smallest possible chunk */ +#define MIN_CHUNK_SIZE (sizeof(struct malloc_chunk)) + +/* The smallest size we can malloc is an aligned minimal chunk */ + +#define MINSIZE \ + (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)) + +/* Check if m has acceptable alignment */ + +#define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0) + + +/* + Check if a request is so large that it would wrap around zero when + padded and aligned. To simplify some other code, the bound is made + low enough so that adding MINSIZE will also not wrap around sero. +*/ + +#define REQUEST_OUT_OF_RANGE(req) \ + ((unsigned long)(req) >= \ + (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE)) + +/* pad request bytes into a usable size -- internal version */ + +#define request2size(req) \ + (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \ + MINSIZE : \ + ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK) + +/* Same, except also perform argument check */ + +#define checked_request2size(req, sz) \ + if (REQUEST_OUT_OF_RANGE(req)) { \ + MALLOC_FAILURE_ACTION; \ + return 0; \ + } \ + (sz) = request2size(req); + +/* + --------------- Physical chunk operations --------------- +*/ + + +/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */ +#define PREV_INUSE 0x1 + +/* extract inuse bit of previous chunk */ +#define prev_inuse(p) ((p)->size & PREV_INUSE) + + +/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */ +#define IS_MMAPPED 0x2 + +/* check for mmap()'ed chunk */ +#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED) + +/* + Bits to mask off when extracting size + + Note: IS_MMAPPED is intentionally not masked off from size field in + macros for which mmapped chunks should never be seen. This should + cause helpful core dumps to occur if it is tried by accident by + people extending or adapting this malloc. +*/ +#define SIZE_BITS (PREV_INUSE|IS_MMAPPED) + +/* Get size, ignoring use bits */ +#define chunksize(p) ((p)->size & ~(SIZE_BITS)) + + +/* Ptr to next physical malloc_chunk. */ +#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) )) + +/* Ptr to previous physical malloc_chunk */ +#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) )) + +/* Treat space at ptr + offset as a chunk */ +#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) + +/* extract p's inuse bit */ +#define inuse(p)\ +((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE) + +/* set/clear chunk as being inuse without otherwise disturbing */ +#define set_inuse(p)\ +((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE + +#define clear_inuse(p)\ +((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE) + + +/* check/set/clear inuse bits in known places */ +#define inuse_bit_at_offset(p, s)\ + (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE) + +#define set_inuse_bit_at_offset(p, s)\ + (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE) + +#define clear_inuse_bit_at_offset(p, s)\ + (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE)) + + +/* Set size at head, without disturbing its use bit */ +#define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s))) + +/* Set size/use field */ +#define set_head(p, s) ((p)->size = (s)) + +/* Set size at footer (only when chunk is not in use) */ +#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s)) + + +/* + -------------------- Internal data structures -------------------- + + All internal state is held in an instance of malloc_state defined + below. There are no other static variables, except in two optional + cases: + * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above. + * If HAVE_MMAP is true, but mmap doesn't support + MAP_ANONYMOUS, a dummy file descriptor for mmap. + + Beware of lots of tricks that minimize the total bookkeeping space + requirements. The result is a little over 1K bytes (for 4byte + pointers and size_t.) +*/ + +/* + Bins + + An array of bin headers for free chunks. Each bin is doubly + linked. The bins are approximately proportionally (log) spaced. + There are a lot of these bins (128). This may look excessive, but + works very well in practice. Most bins hold sizes that are + unusual as malloc request sizes, but are more usual for fragments + and consolidated sets of chunks, which is what these bins hold, so + they can be found quickly. All procedures maintain the invariant + that no consolidated chunk physically borders another one, so each + chunk in a list is known to be preceeded and followed by either + inuse chunks or the ends of memory. + + Chunks in bins are kept in size order, with ties going to the + approximately least recently used chunk. Ordering isn't needed + for the small bins, which all contain the same-sized chunks, but + facilitates best-fit allocation for larger chunks. These lists + are just sequential. Keeping them in order almost never requires + enough traversal to warrant using fancier ordered data + structures. + + Chunks of the same size are linked with the most + recently freed at the front, and allocations are taken from the + back. This results in LRU (FIFO) allocation order, which tends + to give each chunk an equal opportunity to be consolidated with + adjacent freed chunks, resulting in larger free chunks and less + fragmentation. + + To simplify use in double-linked lists, each bin header acts + as a malloc_chunk. This avoids special-casing for headers. + But to conserve space and improve locality, we allocate + only the fd/bk pointers of bins, and then use repositioning tricks + to treat these as the fields of a malloc_chunk*. +*/ + +typedef struct malloc_chunk* mbinptr; + +/* addressing -- note that bin_at(0) does not exist */ +#define bin_at(m, i) ((mbinptr)((char*)&((m)->bins[(i)<<1]) - (SIZE_SZ<<1))) + +/* analog of ++bin */ +#define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1))) + +/* Reminders about list directionality within bins */ +#define first(b) ((b)->fd) +#define last(b) ((b)->bk) + +/* Take a chunk off a bin list */ +#define unlink(P, BK, FD) { \ + FD = P->fd; \ + BK = P->bk; \ + FD->bk = BK; \ + BK->fd = FD; \ +} + +/* + Indexing + + Bins for sizes < 512 bytes contain chunks of all the same size, spaced + 8 bytes apart. Larger bins are approximately logarithmically spaced: + + 64 bins of size 8 + 32 bins of size 64 + 16 bins of size 512 + 8 bins of size 4096 + 4 bins of size 32768 + 2 bins of size 262144 + 1 bin of size what's left + + There is actually a little bit of slop in the numbers in bin_index + for the sake of speed. This makes no difference elsewhere. + + The bins top out around 1MB because we expect to service large + requests via mmap. +*/ + +#define NBINS 128 +#define NSMALLBINS 64 +#define SMALLBIN_WIDTH 8 +#define MIN_LARGE_SIZE 512 + +#define in_smallbin_range(sz) \ + ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE) + +#define smallbin_index(sz) (((unsigned)(sz)) >> 3) + +#define largebin_index(sz) \ +(((((unsigned long)(sz)) >> 6) <= 32)? 56 + (((unsigned long)(sz)) >> 6): \ + ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \ + ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \ + ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \ + ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \ + 126) + +#define bin_index(sz) \ + ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz)) + + +/* + Unsorted chunks + + All remainders from chunk splits, as well as all returned chunks, + are first placed in the "unsorted" bin. They are then placed + in regular bins after malloc gives them ONE chance to be used before + binning. So, basically, the unsorted_chunks list acts as a queue, + with chunks being placed on it in free (and malloc_consolidate), + and taken off (to be either used or placed in bins) in malloc. +*/ + +/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */ +#define unsorted_chunks(M) (bin_at(M, 1)) + +/* + Top + + The top-most available chunk (i.e., the one bordering the end of + available memory) is treated specially. It is never included in + any bin, is used only if no other chunk is available, and is + released back to the system if it is very large (see + M_TRIM_THRESHOLD). Because top initially + points to its own bin with initial zero size, thus forcing + extension on the first malloc request, we avoid having any special + code in malloc to check whether it even exists yet. But we still + need to do so when getting memory from system, so we make + initial_top treat the bin as a legal but unusable chunk during the + interval between initialization and the first call to + sYSMALLOc. (This is somewhat delicate, since it relies on + the 2 preceding words to be zero during this interval as well.) +*/ + +/* Conveniently, the unsorted bin can be used as dummy top on first call */ +#define initial_top(M) (unsorted_chunks(M)) + +/* + Binmap + + To help compensate for the large number of bins, a one-level index + structure is used for bin-by-bin searching. `binmap' is a + bitvector recording whether bins are definitely empty so they can + be skipped over during during traversals. The bits are NOT always + cleared as soon as bins are empty, but instead only + when they are noticed to be empty during traversal in malloc. +*/ + +/* Conservatively use 32 bits per map word, even if on 64bit system */ +#define BINMAPSHIFT 5 +#define BITSPERMAP (1U << BINMAPSHIFT) +#define BINMAPSIZE (NBINS / BITSPERMAP) + +#define idx2block(i) ((i) >> BINMAPSHIFT) +#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1)))) + +#define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i)) +#define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i))) +#define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i)) + +/* + Fastbins + + An array of lists holding recently freed small chunks. Fastbins + are not doubly linked. It is faster to single-link them, and + since chunks are never removed from the middles of these lists, + double linking is not necessary. Also, unlike regular bins, they + are not even processed in FIFO order (they use faster LIFO) since + ordering doesn't much matter in the transient contexts in which + fastbins are normally used. + + Chunks in fastbins keep their inuse bit set, so they cannot + be consolidated with other free chunks. malloc_consolidate + releases all chunks in fastbins and consolidates them with + other free chunks. +*/ + +typedef struct malloc_chunk* mfastbinptr; + +/* offset 2 to use otherwise unindexable first 2 bins */ +#define fastbin_index(sz) ((((unsigned int)(sz)) >> 3) - 2) + +/* The maximum fastbin request size we support */ +#define MAX_FAST_SIZE 80 + +#define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1) + +/* + FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free() + that triggers automatic consolidation of possibly-surrounding + fastbin chunks. This is a heuristic, so the exact value should not + matter too much. It is defined at half the default trim threshold as a + compromise heuristic to only attempt consolidation if it is likely + to lead to trimming. However, it is not dynamically tunable, since + consolidation reduces fragmentation surrounding loarge chunks even + if trimming is not used. +*/ + +#define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL) + +/* + Since the lowest 2 bits in max_fast don't matter in size comparisons, + they are used as flags. +*/ + +/* + FASTCHUNKS_BIT held in max_fast indicates that there are probably + some fastbin chunks. It is set true on entering a chunk into any + fastbin, and cleared only in malloc_consolidate. + + The truth value is inverted so that have_fastchunks will be true + upon startup (since statics are zero-filled), simplifying + initialization checks. +*/ + +#define FASTCHUNKS_BIT (1U) + +#define have_fastchunks(M) (((M)->max_fast & FASTCHUNKS_BIT) == 0) +#define clear_fastchunks(M) ((M)->max_fast |= FASTCHUNKS_BIT) +#define set_fastchunks(M) ((M)->max_fast &= ~FASTCHUNKS_BIT) + +/* + NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous + regions. Otherwise, contiguity is exploited in merging together, + when possible, results from consecutive MORECORE calls. + + The initial value comes from MORECORE_CONTIGUOUS, but is + changed dynamically if mmap is ever used as an sbrk substitute. +*/ + +#define NONCONTIGUOUS_BIT (2U) + +#define contiguous(M) (((M)->max_fast & NONCONTIGUOUS_BIT) == 0) +#define noncontiguous(M) (((M)->max_fast & NONCONTIGUOUS_BIT) != 0) +#define set_noncontiguous(M) ((M)->max_fast |= NONCONTIGUOUS_BIT) +#define set_contiguous(M) ((M)->max_fast &= ~NONCONTIGUOUS_BIT) + +/* + Set value of max_fast. + Use impossibly small value if 0. + Precondition: there are no existing fastbin chunks. + Setting the value clears fastchunk bit but preserves noncontiguous bit. +*/ + +#define set_max_fast(M, s) \ + (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \ + FASTCHUNKS_BIT | \ + ((M)->max_fast & NONCONTIGUOUS_BIT) + + +/* + ----------- Internal state representation and initialization ----------- +*/ + +struct malloc_state { + + /* The maximum chunk size to be eligible for fastbin */ + INTERNAL_SIZE_T max_fast; /* low 2 bits used as flags */ + + /* Fastbins */ + mfastbinptr fastbins[NFASTBINS]; + + /* Base of the topmost chunk -- not otherwise kept in a bin */ + mchunkptr top; + + /* The remainder from the most recent split of a small request */ + mchunkptr last_remainder; + + /* Normal bins packed as described above */ + mchunkptr bins[NBINS * 2]; + + /* Bitmap of bins */ + unsigned int binmap[BINMAPSIZE]; + + /* Tunable parameters */ + unsigned long trim_threshold; + INTERNAL_SIZE_T top_pad; + INTERNAL_SIZE_T mmap_threshold; + + /* Memory map support */ + int n_mmaps; + int n_mmaps_max; + int max_n_mmaps; + + /* Cache malloc_getpagesize */ + unsigned int pagesize; + + /* Statistics */ + INTERNAL_SIZE_T mmapped_mem; + INTERNAL_SIZE_T sbrked_mem; + INTERNAL_SIZE_T max_sbrked_mem; + INTERNAL_SIZE_T max_mmapped_mem; + INTERNAL_SIZE_T max_total_mem; +}; + +typedef struct malloc_state *mstate; + +/* + There is exactly one instance of this struct in this malloc. + If you are adapting this malloc in a way that does NOT use a static + malloc_state, you MUST explicitly zero-fill it before using. This + malloc relies on the property that malloc_state is initialized to + all zeroes (as is true of C statics). +*/ + +static struct malloc_state av_; /* never directly referenced */ + +/* + All uses of av_ are via get_malloc_state(). + At most one "call" to get_malloc_state is made per invocation of + the public versions of malloc and free, but other routines + that in turn invoke malloc and/or free may call more then once. + Also, it is called in check* routines if DEBUG is set. +*/ + +#define get_malloc_state() (&(av_)) + +/* + Initialize a malloc_state struct. + + This is called only from within malloc_consolidate, which needs + be called in the same contexts anyway. It is never called directly + outside of malloc_consolidate because some optimizing compilers try + to inline it at all call points, which turns out not to be an + optimization at all. (Inlining it in malloc_consolidate is fine though.) +*/ + +#if __STD_C +static void malloc_init_state(mstate av) +#else +static void malloc_init_state(av) mstate av; +#endif +{ + int i; + mbinptr bin; + + /* Establish circular links for normal bins */ + for (i = 1; i < NBINS; ++i) { + bin = bin_at(av,i); + bin->fd = bin->bk = bin; + } + + av->top_pad = DEFAULT_TOP_PAD; + av->n_mmaps_max = DEFAULT_MMAP_MAX; + av->mmap_threshold = DEFAULT_MMAP_THRESHOLD; + av->trim_threshold = DEFAULT_TRIM_THRESHOLD; + +#if !MORECORE_CONTIGUOUS + set_noncontiguous(av); +#endif + + set_max_fast(av, DEFAULT_MXFAST); + + av->top = initial_top(av); + av->pagesize = malloc_getpagesize; +} + +/* + Other internal utilities operating on mstates +*/ + +#if __STD_C +static Void_t* sYSMALLOc(INTERNAL_SIZE_T, mstate); +static int sYSTRIm(size_t, mstate); +static void malloc_consolidate(mstate); +static Void_t** iALLOc(size_t, size_t*, int, Void_t**); +#else +static Void_t* sYSMALLOc(); +static int sYSTRIm(); +static void malloc_consolidate(); +static Void_t** iALLOc(); +#endif + +/* + Debugging support + + These routines make a number of assertions about the states + of data structures that should be true at all times. If any + are not true, it's very likely that a user program has somehow + trashed memory. (It's also possible that there is a coding error + in malloc. In which case, please report it!) +*/ + +#if ! DEBUG + +#define check_chunk(P) +#define check_free_chunk(P) +#define check_inuse_chunk(P) +#define check_remalloced_chunk(P,N) +#define check_malloced_chunk(P,N) +#define check_malloc_state() + +#else +#define check_chunk(P) do_check_chunk(P) +#define check_free_chunk(P) do_check_free_chunk(P) +#define check_inuse_chunk(P) do_check_inuse_chunk(P) +#define check_remalloced_chunk(P,N) do_check_remalloced_chunk(P,N) +#define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N) +#define check_malloc_state() do_check_malloc_state() + +/* + Properties of all chunks +*/ + +#if __STD_C +static void do_check_chunk(mchunkptr p) +#else +static void do_check_chunk(p) mchunkptr p; +#endif +{ + mstate av = get_malloc_state(); + unsigned long sz = chunksize(p); + /* min and max possible addresses assuming contiguous allocation */ + char* max_address = (char*)(av->top) + chunksize(av->top); + char* min_address = max_address - av->sbrked_mem; + + if (!chunk_is_mmapped(p)) { + + /* Has legal address ... */ + if (p != av->top) { + if (contiguous(av)) { + assert(((char*)p) >= min_address); + assert(((char*)p + sz) <= ((char*)(av->top))); + } + } + else { + /* top size is always at least MINSIZE */ + assert((unsigned long)(sz) >= MINSIZE); + /* top predecessor always marked inuse */ + assert(prev_inuse(p)); + } + + } + else { +#if HAVE_MMAP + /* address is outside main heap */ + if (contiguous(av) && av->top != initial_top(av)) { + assert(((char*)p) < min_address || ((char*)p) > max_address); + } + /* chunk is page-aligned */ + assert(((p->prev_size + sz) & (av->pagesize-1)) == 0); + /* mem is aligned */ + assert(aligned_OK(chunk2mem(p))); +#else + /* force an appropriate assert violation if debug set */ + assert(!chunk_is_mmapped(p)); +#endif + } +} + +/* + Properties of free chunks +*/ + +#if __STD_C +static void do_check_free_chunk(mchunkptr p) +#else +static void do_check_free_chunk(p) mchunkptr p; +#endif +{ + mstate av = get_malloc_state(); + + INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; + mchunkptr next = chunk_at_offset(p, sz); + + do_check_chunk(p); + + /* Chunk must claim to be free ... */ + assert(!inuse(p)); + assert (!chunk_is_mmapped(p)); + + /* Unless a special marker, must have OK fields */ + if ((unsigned long)(sz) >= MINSIZE) + { + assert((sz & MALLOC_ALIGN_MASK) == 0); + assert(aligned_OK(chunk2mem(p))); + /* ... matching footer field */ + assert(next->prev_size == sz); + /* ... and is fully consolidated */ + assert(prev_inuse(p)); + assert (next == av->top || inuse(next)); + + /* ... and has minimally sane links */ + assert(p->fd->bk == p); + assert(p->bk->fd == p); + } + else /* markers are always of size SIZE_SZ */ + assert(sz == SIZE_SZ); +} + +/* + Properties of inuse chunks +*/ + +#if __STD_C +static void do_check_inuse_chunk(mchunkptr p) +#else +static void do_check_inuse_chunk(p) mchunkptr p; +#endif +{ + mstate av = get_malloc_state(); + mchunkptr next; + do_check_chunk(p); + + if (chunk_is_mmapped(p)) + return; /* mmapped chunks have no next/prev */ + + /* Check whether it claims to be in use ... */ + assert(inuse(p)); + + next = next_chunk(p); + + /* ... and is surrounded by OK chunks. + Since more things can be checked with free chunks than inuse ones, + if an inuse chunk borders them and debug is on, it's worth doing them. + */ + if (!prev_inuse(p)) { + /* Note that we cannot even look at prev unless it is not inuse */ + mchunkptr prv = prev_chunk(p); + assert(next_chunk(prv) == p); + do_check_free_chunk(prv); + } + + if (next == av->top) { + assert(prev_inuse(next)); + assert(chunksize(next) >= MINSIZE); + } + else if (!inuse(next)) + do_check_free_chunk(next); +} + +/* + Properties of chunks recycled from fastbins +*/ + +#if __STD_C +static void do_check_remalloced_chunk(mchunkptr p, INTERNAL_SIZE_T s) +#else +static void do_check_remalloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s; +#endif +{ + INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; + + do_check_inuse_chunk(p); + + /* Legal size ... */ + assert((sz & MALLOC_ALIGN_MASK) == 0); + assert((unsigned long)(sz) >= MINSIZE); + /* ... and alignment */ + assert(aligned_OK(chunk2mem(p))); + /* chunk is less than MINSIZE more than request */ + assert((long)(sz) - (long)(s) >= 0); + assert((long)(sz) - (long)(s + MINSIZE) < 0); +} + +/* + Properties of nonrecycled chunks at the point they are malloced +*/ + +#if __STD_C +static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s) +#else +static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s; +#endif +{ + /* same as recycled case ... */ + do_check_remalloced_chunk(p, s); + + /* + ... plus, must obey implementation invariant that prev_inuse is + always true of any allocated chunk; i.e., that each allocated + chunk borders either a previously allocated and still in-use + chunk, or the base of its memory arena. This is ensured + by making all allocations from the the `lowest' part of any found + chunk. This does not necessarily hold however for chunks + recycled via fastbins. + */ + + assert(prev_inuse(p)); +} + + +/* + Properties of malloc_state. + + This may be useful for debugging malloc, as well as detecting user + programmer errors that somehow write into malloc_state. + + If you are extending or experimenting with this malloc, you can + probably figure out how to hack this routine to print out or + display chunk addresses, sizes, bins, and other instrumentation. +*/ + +static void do_check_malloc_state() +{ + mstate av = get_malloc_state(); + int i; + mchunkptr p; + mchunkptr q; + mbinptr b; + unsigned int binbit; + int empty; + unsigned int idx; + INTERNAL_SIZE_T size; + unsigned long total = 0; + int max_fast_bin; + + /* internal size_t must be no wider than pointer type */ + assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*)); + + /* alignment is a power of 2 */ + assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0); + + /* cannot run remaining checks until fully initialized */ + if (av->top == 0 || av->top == initial_top(av)) + return; + + /* pagesize is a power of 2 */ + assert((av->pagesize & (av->pagesize-1)) == 0); + + /* properties of fastbins */ + + /* max_fast is in allowed range */ + assert((av->max_fast & ~1) <= request2size(MAX_FAST_SIZE)); + + max_fast_bin = fastbin_index(av->max_fast); + + for (i = 0; i < NFASTBINS; ++i) { + p = av->fastbins[i]; + + /* all bins past max_fast are empty */ + if (i > max_fast_bin) + assert(p == 0); + + while (p != 0) { + /* each chunk claims to be inuse */ + do_check_inuse_chunk(p); + total += chunksize(p); + /* chunk belongs in this bin */ + assert(fastbin_index(chunksize(p)) == i); + p = p->fd; + } + } + + if (total != 0) + assert(have_fastchunks(av)); + else if (!have_fastchunks(av)) + assert(total == 0); + + /* check normal bins */ + for (i = 1; i < NBINS; ++i) { + b = bin_at(av,i); + + /* binmap is accurate (except for bin 1 == unsorted_chunks) */ + if (i >= 2) { + binbit = get_binmap(av,i); + empty = last(b) == b; + if (!binbit) + assert(empty); + else if (!empty) + assert(binbit); + } + + for (p = last(b); p != b; p = p->bk) { + /* each chunk claims to be free */ + do_check_free_chunk(p); + size = chunksize(p); + total += size; + if (i >= 2) { + /* chunk belongs in bin */ + idx = bin_index(size); + assert(idx == i); + /* lists are sorted */ + assert(p->bk == b || + (unsigned long)chunksize(p->bk) >= (unsigned long)chunksize(p)); + } + /* chunk is followed by a legal chain of inuse chunks */ + for (q = next_chunk(p); + (q != av->top && inuse(q) && + (unsigned long)(chunksize(q)) >= MINSIZE); + q = next_chunk(q)) + do_check_inuse_chunk(q); + } + } + + /* top chunk is OK */ + check_chunk(av->top); + + /* sanity checks for statistics */ + + assert(total <= (unsigned long)(av->max_total_mem)); + assert(av->n_mmaps >= 0); + assert(av->n_mmaps <= av->n_mmaps_max); + assert(av->n_mmaps <= av->max_n_mmaps); + + assert((unsigned long)(av->sbrked_mem) <= + (unsigned long)(av->max_sbrked_mem)); + + assert((unsigned long)(av->mmapped_mem) <= + (unsigned long)(av->max_mmapped_mem)); + + assert((unsigned long)(av->max_total_mem) >= + (unsigned long)(av->mmapped_mem) + (unsigned long)(av->sbrked_mem)); +} +#endif + + +/* ----------- Routines dealing with system allocation -------------- */ + +/* + sysmalloc handles malloc cases requiring more memory from the system. + On entry, it is assumed that av->top does not have enough + space to service request for nb bytes, thus requiring that av->top + be extended or replaced. +*/ + +#if __STD_C +static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av) +#else +static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av; +#endif +{ + mchunkptr old_top; /* incoming value of av->top */ + INTERNAL_SIZE_T old_size; /* its size */ + char* old_end; /* its end address */ + + long size; /* arg to first MORECORE or mmap call */ + char* brk; /* return value from MORECORE */ + + long correction; /* arg to 2nd MORECORE call */ + char* snd_brk; /* 2nd return val */ + + INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */ + INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */ + char* aligned_brk; /* aligned offset into brk */ + + mchunkptr p; /* the allocated/returned chunk */ + mchunkptr remainder; /* remainder from allocation */ + unsigned long remainder_size; /* its size */ + + unsigned long sum; /* for updating stats */ + + size_t pagemask = av->pagesize - 1; + + +#if HAVE_MMAP + + /* + If have mmap, and the request size meets the mmap threshold, and + the system supports mmap, and there are few enough currently + allocated mmapped regions, try to directly map this request + rather than expanding top. + */ + + if ((unsigned long)(nb) >= (unsigned long)(av->mmap_threshold) && + (av->n_mmaps < av->n_mmaps_max)) { + + char* mm; /* return value from mmap call*/ + + /* + Round up size to nearest page. For mmapped chunks, the overhead + is one SIZE_SZ unit larger than for normal chunks, because there + is no following chunk whose prev_size field could be used. + */ + size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask; + + /* Don't try if size wraps around 0 */ + if ((unsigned long)(size) > (unsigned long)(nb)) { + + mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE)); + + if (mm != (char*)(MORECORE_FAILURE)) { + + /* + The offset to the start of the mmapped region is stored + in the prev_size field of the chunk. This allows us to adjust + returned start address to meet alignment requirements here + and in memalign(), and still be able to compute proper + address argument for later munmap in free() and realloc(). + */ + + front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK; + if (front_misalign > 0) { + correction = MALLOC_ALIGNMENT - front_misalign; + p = (mchunkptr)(mm + correction); + p->prev_size = correction; + set_head(p, (size - correction) |IS_MMAPPED); + } + else { + p = (mchunkptr)mm; + set_head(p, size|IS_MMAPPED); + } + + /* update statistics */ + + if (++av->n_mmaps > av->max_n_mmaps) + av->max_n_mmaps = av->n_mmaps; + + sum = av->mmapped_mem += size; + if (sum > (unsigned long)(av->max_mmapped_mem)) + av->max_mmapped_mem = sum; + sum += av->sbrked_mem; + if (sum > (unsigned long)(av->max_total_mem)) + av->max_total_mem = sum; + + check_chunk(p); + + return chunk2mem(p); + } + } + } +#endif + + /* Record incoming configuration of top */ + + old_top = av->top; + old_size = chunksize(old_top); + old_end = (char*)(chunk_at_offset(old_top, old_size)); + + brk = snd_brk = (char*)(MORECORE_FAILURE); + + /* + If not the first time through, we require old_size to be + at least MINSIZE and to have prev_inuse set. + */ + + assert((old_top == initial_top(av) && old_size == 0) || + ((unsigned long) (old_size) >= MINSIZE && + prev_inuse(old_top))); + + /* Precondition: not enough current space to satisfy nb request */ + assert((unsigned long)(old_size) < (unsigned long)(nb + MINSIZE)); + + /* Precondition: all fastbins are consolidated */ + assert(!have_fastchunks(av)); + + + /* Request enough space for nb + pad + overhead */ + + size = nb + av->top_pad + MINSIZE; + + /* + If contiguous, we can subtract out existing space that we hope to + combine with new space. We add it back later only if + we don't actually get contiguous space. + */ + + if (contiguous(av)) + size -= old_size; + + /* + Round to a multiple of page size. + If MORECORE is not contiguous, this ensures that we only call it + with whole-page arguments. And if MORECORE is contiguous and + this is not first time through, this preserves page-alignment of + previous calls. Otherwise, we correct to page-align below. + */ + + size = (size + pagemask) & ~pagemask; + + /* + Don't try to call MORECORE if argument is so big as to appear + negative. Note that since mmap takes size_t arg, it may succeed + below even if we cannot call MORECORE. + */ + + if (size > 0) + brk = (char*)(MORECORE(size)); + + /* + If have mmap, try using it as a backup when MORECORE fails or + cannot be used. This is worth doing on systems that have "holes" in + address space, so sbrk cannot extend to give contiguous space, but + space is available elsewhere. Note that we ignore mmap max count + and threshold limits, since the space will not be used as a + segregated mmap region. + */ + +#if HAVE_MMAP + if (brk == (char*)(MORECORE_FAILURE)) { + + /* Cannot merge with old top, so add its size back in */ + if (contiguous(av)) + size = (size + old_size + pagemask) & ~pagemask; + + /* If we are relying on mmap as backup, then use larger units */ + if ((unsigned long)(size) < (unsigned long)(MMAP_AS_MORECORE_SIZE)) + size = MMAP_AS_MORECORE_SIZE; + + /* Don't try if size wraps around 0 */ + if ((unsigned long)(size) > (unsigned long)(nb)) { + + brk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE)); + + if (brk != (char*)(MORECORE_FAILURE)) { + + /* We do not need, and cannot use, another sbrk call to find end */ + snd_brk = brk + size; + + /* + Record that we no longer have a contiguous sbrk region. + After the first time mmap is used as backup, we do not + ever rely on contiguous space since this could incorrectly + bridge regions. + */ + set_noncontiguous(av); + } + } + } +#endif + + if (brk != (char*)(MORECORE_FAILURE)) { + av->sbrked_mem += size; + + /* + If MORECORE extends previous space, we can likewise extend top size. + */ + + if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) { + set_head(old_top, (size + old_size) | PREV_INUSE); + } + + /* + Otherwise, make adjustments: + + * If the first time through or noncontiguous, we need to call sbrk + just to find out where the end of memory lies. + + * We need to ensure that all returned chunks from malloc will meet + MALLOC_ALIGNMENT + + * If there was an intervening foreign sbrk, we need to adjust sbrk + request size to account for fact that we will not be able to + combine new space with existing space in old_top. + + * Almost all systems internally allocate whole pages at a time, in + which case we might as well use the whole last page of request. + So we allocate enough more memory to hit a page boundary now, + which in turn causes future contiguous calls to page-align. + */ + + else { + front_misalign = 0; + end_misalign = 0; + correction = 0; + aligned_brk = brk; + + /* handle contiguous cases */ + if (contiguous(av)) { + + /* Guarantee alignment of first new chunk made from this space */ + + front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK; + if (front_misalign > 0) { + + /* + Skip over some bytes to arrive at an aligned position. + We don't need to specially mark these wasted front bytes. + They will never be accessed anyway because + prev_inuse of av->top (and any chunk created from its start) + is always true after initialization. + */ + + correction = MALLOC_ALIGNMENT - front_misalign; + aligned_brk += correction; + } + + /* + If this isn't adjacent to existing space, then we will not + be able to merge with old_top space, so must add to 2nd request. + */ + + correction += old_size; + + /* Extend the end address to hit a page boundary */ + end_misalign = (INTERNAL_SIZE_T)(brk + size + correction); + correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign; + + assert(correction >= 0); + snd_brk = (char*)(MORECORE(correction)); + + /* + If can't allocate correction, try to at least find out current + brk. It might be enough to proceed without failing. + + Note that if second sbrk did NOT fail, we assume that space + is contiguous with first sbrk. This is a safe assumption unless + program is multithreaded but doesn't use locks and a foreign sbrk + occurred between our first and second calls. + */ + + if (snd_brk == (char*)(MORECORE_FAILURE)) { + correction = 0; + snd_brk = (char*)(MORECORE(0)); + } + } + + /* handle non-contiguous cases */ + else { + /* MORECORE/mmap must correctly align */ + assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0); + + /* Find out current end of memory */ + if (snd_brk == (char*)(MORECORE_FAILURE)) { + snd_brk = (char*)(MORECORE(0)); + } + } + + /* Adjust top based on results of second sbrk */ + if (snd_brk != (char*)(MORECORE_FAILURE)) { + av->top = (mchunkptr)aligned_brk; + set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE); + av->sbrked_mem += correction; + + /* + If not the first time through, we either have a + gap due to foreign sbrk or a non-contiguous region. Insert a + double fencepost at old_top to prevent consolidation with space + we don't own. These fenceposts are artificial chunks that are + marked as inuse and are in any case too small to use. We need + two to make sizes and alignments work out. + */ + + if (old_size != 0) { + /* + Shrink old_top to insert fenceposts, keeping size a + multiple of MALLOC_ALIGNMENT. We know there is at least + enough space in old_top to do this. + */ + old_size = (old_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK; + set_head(old_top, old_size | PREV_INUSE); + + /* + Note that the following assignments completely overwrite + old_top when old_size was previously MINSIZE. This is + intentional. We need the fencepost, even if old_top otherwise gets + lost. + */ + chunk_at_offset(old_top, old_size )->size = + SIZE_SZ|PREV_INUSE; + + chunk_at_offset(old_top, old_size + SIZE_SZ)->size = + SIZE_SZ|PREV_INUSE; + + /* If possible, release the rest. */ + if (old_size >= MINSIZE) { + fREe(chunk2mem(old_top)); + } + + } + } + } + + /* Update statistics */ + sum = av->sbrked_mem; + if (sum > (unsigned long)(av->max_sbrked_mem)) + av->max_sbrked_mem = sum; + + sum += av->mmapped_mem; + if (sum > (unsigned long)(av->max_total_mem)) + av->max_total_mem = sum; + + check_malloc_state(); + + /* finally, do the allocation */ + p = av->top; + size = chunksize(p); + + /* check that one of the above allocation paths succeeded */ + if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) { + remainder_size = size - nb; + remainder = chunk_at_offset(p, nb); + av->top = remainder; + set_head(p, nb | PREV_INUSE); + set_head(remainder, remainder_size | PREV_INUSE); + check_malloced_chunk(p, nb); + return chunk2mem(p); + } + } + + /* catch all failure paths */ + MALLOC_FAILURE_ACTION; + return 0; +} + + +/* + sYSTRIm is an inverse of sorts to sYSMALLOc. It gives memory back + to the system (via negative arguments to sbrk) if there is unused + memory at the `high' end of the malloc pool. It is called + automatically by free() when top space exceeds the trim + threshold. It is also called by the public malloc_trim routine. It + returns 1 if it actually released any memory, else 0. +*/ + +#if __STD_C +static int sYSTRIm(size_t pad, mstate av) +#else +static int sYSTRIm(pad, av) size_t pad; mstate av; +#endif +{ + long top_size; /* Amount of top-most memory */ + long extra; /* Amount to release */ + long released; /* Amount actually released */ + char* current_brk; /* address returned by pre-check sbrk call */ + char* new_brk; /* address returned by post-check sbrk call */ + size_t pagesz; + + pagesz = av->pagesize; + top_size = chunksize(av->top); + + /* Release in pagesize units, keeping at least one page */ + extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz; + + if (extra > 0) { + + /* + Only proceed if end of memory is where we last set it. + This avoids problems if there were foreign sbrk calls. + */ + current_brk = (char*)(MORECORE(0)); + if (current_brk == (char*)(av->top) + top_size) { + + /* + Attempt to release memory. We ignore MORECORE return value, + and instead call again to find out where new end of memory is. + This avoids problems if first call releases less than we asked, + of if failure somehow altered brk value. (We could still + encounter problems if it altered brk in some very bad way, + but the only thing we can do is adjust anyway, which will cause + some downstream failure.) + */ + + MORECORE(-extra); + new_brk = (char*)(MORECORE(0)); + + if (new_brk != (char*)MORECORE_FAILURE) { + released = (long)(current_brk - new_brk); + + if (released != 0) { + /* Success. Adjust top. */ + av->sbrked_mem -= released; + set_head(av->top, (top_size - released) | PREV_INUSE); + check_malloc_state(); + return 1; + } + } + } + } + return 0; +} + +/* + ------------------------------ malloc ------------------------------ +*/ + +#if __STD_C +Void_t* mALLOc(size_t bytes) +#else + Void_t* mALLOc(bytes) size_t bytes; +#endif +{ + mstate av = get_malloc_state(); + + INTERNAL_SIZE_T nb; /* normalized request size */ + unsigned int idx; /* associated bin index */ + mbinptr bin; /* associated bin */ + mfastbinptr* fb; /* associated fastbin */ + + mchunkptr victim; /* inspected/selected chunk */ + INTERNAL_SIZE_T size; /* its size */ + int victim_index; /* its bin index */ + + mchunkptr remainder; /* remainder from a split */ + unsigned long remainder_size; /* its size */ + + unsigned int block; /* bit map traverser */ + unsigned int bit; /* bit map traverser */ + unsigned int map; /* current word of binmap */ + + mchunkptr fwd; /* misc temp for linking */ + mchunkptr bck; /* misc temp for linking */ + + /* + Convert request size to internal form by adding SIZE_SZ bytes + overhead plus possibly more to obtain necessary alignment and/or + to obtain a size of at least MINSIZE, the smallest allocatable + size. Also, checked_request2size traps (returning 0) request sizes + that are so large that they wrap around zero when padded and + aligned. + */ + + checked_request2size(bytes, nb); + + /* + If the size qualifies as a fastbin, first check corresponding bin. + This code is safe to execute even if av is not yet initialized, so we + can try it without checking, which saves some time on this fast path. + */ + + if ((unsigned long)(nb) <= (unsigned long)(av->max_fast)) { + fb = &(av->fastbins[(fastbin_index(nb))]); + if ( (victim = *fb) != 0) { + *fb = victim->fd; + check_remalloced_chunk(victim, nb); + return chunk2mem(victim); + } + } + + /* + If a small request, check regular bin. Since these "smallbins" + hold one size each, no searching within bins is necessary. + (For a large request, we need to wait until unsorted chunks are + processed to find best fit. But for small ones, fits are exact + anyway, so we can check now, which is faster.) + */ + + if (in_smallbin_range(nb)) { + idx = smallbin_index(nb); + bin = bin_at(av,idx); + + if ( (victim = last(bin)) != bin) { + if (victim == 0) /* initialization check */ + malloc_consolidate(av); + else { + bck = victim->bk; + set_inuse_bit_at_offset(victim, nb); + bin->bk = bck; + bck->fd = bin; + + check_malloced_chunk(victim, nb); + return chunk2mem(victim); + } + } + } + + /* + If this is a large request, consolidate fastbins before continuing. + While it might look excessive to kill all fastbins before + even seeing if there is space available, this avoids + fragmentation problems normally associated with fastbins. + Also, in practice, programs tend to have runs of either small or + large requests, but less often mixtures, so consolidation is not + invoked all that often in most programs. And the programs that + it is called frequently in otherwise tend to fragment. + */ + + else { + idx = largebin_index(nb); + if (have_fastchunks(av)) + malloc_consolidate(av); + } + + /* + Process recently freed or remaindered chunks, taking one only if + it is exact fit, or, if this a small request, the chunk is remainder from + the most recent non-exact fit. Place other traversed chunks in + bins. Note that this step is the only place in any routine where + chunks are placed in bins. + + The outer loop here is needed because we might not realize until + near the end of malloc that we should have consolidated, so must + do so and retry. This happens at most once, and only when we would + otherwise need to expand memory to service a "small" request. + */ + + for(;;) { + + while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) { + bck = victim->bk; + size = chunksize(victim); + + /* + If a small request, try to use last remainder if it is the + only chunk in unsorted bin. This helps promote locality for + runs of consecutive small requests. This is the only + exception to best-fit, and applies only when there is + no exact fit for a small chunk. + */ + + if (in_smallbin_range(nb) && + bck == unsorted_chunks(av) && + victim == av->last_remainder && + (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) { + + /* split and reattach remainder */ + remainder_size = size - nb; + remainder = chunk_at_offset(victim, nb); + unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder; + av->last_remainder = remainder; + remainder->bk = remainder->fd = unsorted_chunks(av); + + set_head(victim, nb | PREV_INUSE); + set_head(remainder, remainder_size | PREV_INUSE); + set_foot(remainder, remainder_size); + + check_malloced_chunk(victim, nb); + return chunk2mem(victim); + } + + /* remove from unsorted list */ + unsorted_chunks(av)->bk = bck; + bck->fd = unsorted_chunks(av); + + /* Take now instead of binning if exact fit */ + + if (size == nb) { + set_inuse_bit_at_offset(victim, size); + check_malloced_chunk(victim, nb); + return chunk2mem(victim); + } + + /* place chunk in bin */ + + if (in_smallbin_range(size)) { + victim_index = smallbin_index(size); + bck = bin_at(av, victim_index); + fwd = bck->fd; + } + else { + victim_index = largebin_index(size); + bck = bin_at(av, victim_index); + fwd = bck->fd; + + /* maintain large bins in sorted order */ + if (fwd != bck) { + size |= PREV_INUSE; /* Or with inuse bit to speed comparisons */ + /* if smaller than smallest, bypass loop below */ + if ((unsigned long)(size) <= (unsigned long)(bck->bk->size)) { + fwd = bck; + bck = bck->bk; + } + else { + while ((unsigned long)(size) < (unsigned long)(fwd->size)) + fwd = fwd->fd; + bck = fwd->bk; + } + } + } + + mark_bin(av, victim_index); + victim->bk = bck; + victim->fd = fwd; + fwd->bk = victim; + bck->fd = victim; + } + + /* + If a large request, scan through the chunks of current bin in + sorted order to find smallest that fits. This is the only step + where an unbounded number of chunks might be scanned without doing + anything useful with them. However the lists tend to be short. + */ + + if (!in_smallbin_range(nb)) { + bin = bin_at(av, idx); + + /* skip scan if empty or largest chunk is too small */ + if ((victim = last(bin)) != bin && + (unsigned long)(first(bin)->size) >= (unsigned long)(nb)) { + + while (((unsigned long)(size = chunksize(victim)) < + (unsigned long)(nb))) + victim = victim->bk; + + remainder_size = size - nb; + unlink(victim, bck, fwd); + + /* Exhaust */ + if (remainder_size < MINSIZE) { + set_inuse_bit_at_offset(victim, size); + check_malloced_chunk(victim, nb); + return chunk2mem(victim); + } + /* Split */ + else { + remainder = chunk_at_offset(victim, nb); + unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder; + remainder->bk = remainder->fd = unsorted_chunks(av); + set_head(victim, nb | PREV_INUSE); + set_head(remainder, remainder_size | PREV_INUSE); + set_foot(remainder, remainder_size); + check_malloced_chunk(victim, nb); + return chunk2mem(victim); + } + } + } + + /* + Search for a chunk by scanning bins, starting with next largest + bin. This search is strictly by best-fit; i.e., the smallest + (with ties going to approximately the least recently used) chunk + that fits is selected. + + The bitmap avoids needing to check that most blocks are nonempty. + The particular case of skipping all bins during warm-up phases + when no chunks have been returned yet is faster than it might look. + */ + + ++idx; + bin = bin_at(av,idx); + block = idx2block(idx); + map = av->binmap[block]; + bit = idx2bit(idx); + + for (;;) { + + /* Skip rest of block if there are no more set bits in this block. */ + if (bit > map || bit == 0) { + do { + if (++block >= BINMAPSIZE) /* out of bins */ + goto use_top; + } while ( (map = av->binmap[block]) == 0); + + bin = bin_at(av, (block << BINMAPSHIFT)); + bit = 1; + } + + /* Advance to bin with set bit. There must be one. */ + while ((bit & map) == 0) { + bin = next_bin(bin); + bit <<= 1; + assert(bit != 0); + } + + /* Inspect the bin. It is likely to be non-empty */ + victim = last(bin); + + /* If a false alarm (empty bin), clear the bit. */ + if (victim == bin) { + av->binmap[block] = map &= ~bit; /* Write through */ + bin = next_bin(bin); + bit <<= 1; + } + + else { + size = chunksize(victim); + + /* We know the first chunk in this bin is big enough to use. */ + assert((unsigned long)(size) >= (unsigned long)(nb)); + + remainder_size = size - nb; + + /* unlink */ + bck = victim->bk; + bin->bk = bck; + bck->fd = bin; + + /* Exhaust */ + if (remainder_size < MINSIZE) { + set_inuse_bit_at_offset(victim, size); + check_malloced_chunk(victim, nb); + return chunk2mem(victim); + } + + /* Split */ + else { + remainder = chunk_at_offset(victim, nb); + + unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder; + remainder->bk = remainder->fd = unsorted_chunks(av); + /* advertise as last remainder */ + if (in_smallbin_range(nb)) + av->last_remainder = remainder; + + set_head(victim, nb | PREV_INUSE); + set_head(remainder, remainder_size | PREV_INUSE); + set_foot(remainder, remainder_size); + check_malloced_chunk(victim, nb); + return chunk2mem(victim); + } + } + } + + use_top: + /* + If large enough, split off the chunk bordering the end of memory + (held in av->top). Note that this is in accord with the best-fit + search rule. In effect, av->top is treated as larger (and thus + less well fitting) than any other available chunk since it can + be extended to be as large as necessary (up to system + limitations). + + We require that av->top always exists (i.e., has size >= + MINSIZE) after initialization, so if it would otherwise be + exhuasted by current request, it is replenished. (The main + reason for ensuring it exists is that we may need MINSIZE space + to put in fenceposts in sysmalloc.) + */ + + victim = av->top; + size = chunksize(victim); + + if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) { + remainder_size = size - nb; + remainder = chunk_at_offset(victim, nb); + av->top = remainder; + set_head(victim, nb | PREV_INUSE); + set_head(remainder, remainder_size | PREV_INUSE); + + check_malloced_chunk(victim, nb); + return chunk2mem(victim); + } + + /* + If there is space available in fastbins, consolidate and retry, + to possibly avoid expanding memory. This can occur only if nb is + in smallbin range so we didn't consolidate upon entry. + */ + + else if (have_fastchunks(av)) { + assert(in_smallbin_range(nb)); + malloc_consolidate(av); + idx = smallbin_index(nb); /* restore original bin index */ + } + + /* + Otherwise, relay to handle system-dependent cases + */ + else + return sYSMALLOc(nb, av); + } +} + +/* + ------------------------------ free ------------------------------ +*/ + +#if __STD_C +void fREe(Void_t* mem) +#else +void fREe(mem) Void_t* mem; +#endif +{ + mstate av = get_malloc_state(); + + mchunkptr p; /* chunk corresponding to mem */ + INTERNAL_SIZE_T size; /* its size */ + mfastbinptr* fb; /* associated fastbin */ + mchunkptr nextchunk; /* next contiguous chunk */ + INTERNAL_SIZE_T nextsize; /* its size */ + int nextinuse; /* true if nextchunk is used */ + INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */ + mchunkptr bck; /* misc temp for linking */ + mchunkptr fwd; /* misc temp for linking */ + + + /* free(0) has no effect */ + if (mem != 0) { + p = mem2chunk(mem); + size = chunksize(p); + + check_inuse_chunk(p); + + /* + If eligible, place chunk on a fastbin so it can be found + and used quickly in malloc. + */ + + if ((unsigned long)(size) <= (unsigned long)(av->max_fast) + +#if TRIM_FASTBINS + /* + If TRIM_FASTBINS set, don't place chunks + bordering top into fastbins + */ + && (chunk_at_offset(p, size) != av->top) +#endif + ) { + + set_fastchunks(av); + fb = &(av->fastbins[fastbin_index(size)]); + p->fd = *fb; + *fb = p; + } + + /* + Consolidate other non-mmapped chunks as they arrive. + */ + + else if (!chunk_is_mmapped(p)) { + nextchunk = chunk_at_offset(p, size); + nextsize = chunksize(nextchunk); + + /* consolidate backward */ + if (!prev_inuse(p)) { + prevsize = p->prev_size; + size += prevsize; + p = chunk_at_offset(p, -((long) prevsize)); + unlink(p, bck, fwd); + } + + if (nextchunk != av->top) { + /* get and clear inuse bit */ + nextinuse = inuse_bit_at_offset(nextchunk, nextsize); + set_head(nextchunk, nextsize); + + /* consolidate forward */ + if (!nextinuse) { + unlink(nextchunk, bck, fwd); + size += nextsize; + } + + /* + Place the chunk in unsorted chunk list. Chunks are + not placed into regular bins until after they have + been given one chance to be used in malloc. + */ + + bck = unsorted_chunks(av); + fwd = bck->fd; + p->bk = bck; + p->fd = fwd; + bck->fd = p; + fwd->bk = p; + + set_head(p, size | PREV_INUSE); + set_foot(p, size); + + check_free_chunk(p); + } + + /* + If the chunk borders the current high end of memory, + consolidate into top + */ + + else { + size += nextsize; + set_head(p, size | PREV_INUSE); + av->top = p; + check_chunk(p); + } + + /* + If freeing a large space, consolidate possibly-surrounding + chunks. Then, if the total unused topmost memory exceeds trim + threshold, ask malloc_trim to reduce top. + + Unless max_fast is 0, we don't know if there are fastbins + bordering top, so we cannot tell for sure whether threshold + has been reached unless fastbins are consolidated. But we + don't want to consolidate on each free. As a compromise, + consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD + is reached. + */ + + if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) { + if (have_fastchunks(av)) + malloc_consolidate(av); + +#ifndef MORECORE_CANNOT_TRIM + if ((unsigned long)(chunksize(av->top)) >= + (unsigned long)(av->trim_threshold)) + sYSTRIm(av->top_pad, av); +#endif + } + + } + /* + If the chunk was allocated via mmap, release via munmap() + Note that if HAVE_MMAP is false but chunk_is_mmapped is + true, then user must have overwritten memory. There's nothing + we can do to catch this error unless DEBUG is set, in which case + check_inuse_chunk (above) will have triggered error. + */ + + else { +#if HAVE_MMAP + int ret; + INTERNAL_SIZE_T offset = p->prev_size; + av->n_mmaps--; + av->mmapped_mem -= (size + offset); + ret = munmap((char*)p - offset, size + offset); + /* munmap returns non-zero on failure */ + assert(ret == 0); +#endif + } + } +} + +/* + ------------------------- malloc_consolidate ------------------------- + + malloc_consolidate is a specialized version of free() that tears + down chunks held in fastbins. Free itself cannot be used for this + purpose since, among other things, it might place chunks back onto + fastbins. So, instead, we need to use a minor variant of the same + code. + + Also, because this routine needs to be called the first time through + malloc anyway, it turns out to be the perfect place to trigger + initialization code. +*/ + +#if __STD_C +static void malloc_consolidate(mstate av) +#else +static void malloc_consolidate(av) mstate av; +#endif +{ + mfastbinptr* fb; /* current fastbin being consolidated */ + mfastbinptr* maxfb; /* last fastbin (for loop control) */ + mchunkptr p; /* current chunk being consolidated */ + mchunkptr nextp; /* next chunk to consolidate */ + mchunkptr unsorted_bin; /* bin header */ + mchunkptr first_unsorted; /* chunk to link to */ + + /* These have same use as in free() */ + mchunkptr nextchunk; + INTERNAL_SIZE_T size; + INTERNAL_SIZE_T nextsize; + INTERNAL_SIZE_T prevsize; + int nextinuse; + mchunkptr bck; + mchunkptr fwd; + + /* + If max_fast is 0, we know that av hasn't + yet been initialized, in which case do so below + */ + + if (av->max_fast != 0) { + clear_fastchunks(av); + + unsorted_bin = unsorted_chunks(av); + + /* + Remove each chunk from fast bin and consolidate it, placing it + then in unsorted bin. Among other reasons for doing this, + placing in unsorted bin avoids needing to calculate actual bins + until malloc is sure that chunks aren't immediately going to be + reused anyway. + */ + + maxfb = &(av->fastbins[fastbin_index(av->max_fast)]); + fb = &(av->fastbins[0]); + do { + if ( (p = *fb) != 0) { + *fb = 0; + + do { + check_inuse_chunk(p); + nextp = p->fd; + + /* Slightly streamlined version of consolidation code in free() */ + size = p->size & ~PREV_INUSE; + nextchunk = chunk_at_offset(p, size); + nextsize = chunksize(nextchunk); + + if (!prev_inuse(p)) { + prevsize = p->prev_size; + size += prevsize; + p = chunk_at_offset(p, -((long) prevsize)); + unlink(p, bck, fwd); + } + + if (nextchunk != av->top) { + nextinuse = inuse_bit_at_offset(nextchunk, nextsize); + set_head(nextchunk, nextsize); + + if (!nextinuse) { + size += nextsize; + unlink(nextchunk, bck, fwd); + } + + first_unsorted = unsorted_bin->fd; + unsorted_bin->fd = p; + first_unsorted->bk = p; + + set_head(p, size | PREV_INUSE); + p->bk = unsorted_bin; + p->fd = first_unsorted; + set_foot(p, size); + } + + else { + size += nextsize; + set_head(p, size | PREV_INUSE); + av->top = p; + } + + } while ( (p = nextp) != 0); + + } + } while (fb++ != maxfb); + } + else { + malloc_init_state(av); + check_malloc_state(); + } +} + +/* + ------------------------------ realloc ------------------------------ +*/ + + +#if __STD_C +Void_t* rEALLOc(Void_t* oldmem, size_t bytes) +#else +Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes; +#endif +{ + mstate av = get_malloc_state(); + + INTERNAL_SIZE_T nb; /* padded request size */ + + mchunkptr oldp; /* chunk corresponding to oldmem */ + INTERNAL_SIZE_T oldsize; /* its size */ + + mchunkptr newp; /* chunk to return */ + INTERNAL_SIZE_T newsize; /* its size */ + Void_t* newmem; /* corresponding user mem */ + + mchunkptr next; /* next contiguous chunk after oldp */ + + mchunkptr remainder; /* extra space at end of newp */ + unsigned long remainder_size; /* its size */ + + mchunkptr bck; /* misc temp for linking */ + mchunkptr fwd; /* misc temp for linking */ + + unsigned long copysize; /* bytes to copy */ + unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */ + INTERNAL_SIZE_T* s; /* copy source */ + INTERNAL_SIZE_T* d; /* copy destination */ + + +#ifdef REALLOC_ZERO_BYTES_FREES + if (bytes == 0) { + fREe(oldmem); + return 0; + } +#endif + + /* realloc of null is supposed to be same as malloc */ + if (oldmem == 0) return mALLOc(bytes); + + checked_request2size(bytes, nb); + + oldp = mem2chunk(oldmem); + oldsize = chunksize(oldp); + + check_inuse_chunk(oldp); + + if (!chunk_is_mmapped(oldp)) { + + if ((unsigned long)(oldsize) >= (unsigned long)(nb)) { + /* already big enough; split below */ + newp = oldp; + newsize = oldsize; + } + + else { + next = chunk_at_offset(oldp, oldsize); + + /* Try to expand forward into top */ + if (next == av->top && + (unsigned long)(newsize = oldsize + chunksize(next)) >= + (unsigned long)(nb + MINSIZE)) { + set_head_size(oldp, nb); + av->top = chunk_at_offset(oldp, nb); + set_head(av->top, (newsize - nb) | PREV_INUSE); + return chunk2mem(oldp); + } + + /* Try to expand forward into next chunk; split off remainder below */ + else if (next != av->top && + !inuse(next) && + (unsigned long)(newsize = oldsize + chunksize(next)) >= + (unsigned long)(nb)) { + newp = oldp; + unlink(next, bck, fwd); + } + + /* allocate, copy, free */ + else { + newmem = mALLOc(nb - MALLOC_ALIGN_MASK); + if (newmem == 0) + return 0; /* propagate failure */ + + newp = mem2chunk(newmem); + newsize = chunksize(newp); + + /* + Avoid copy if newp is next chunk after oldp. + */ + if (newp == next) { + newsize += oldsize; + newp = oldp; + } + else { + /* + Unroll copy of <= 36 bytes (72 if 8byte sizes) + We know that contents have an odd number of + INTERNAL_SIZE_T-sized words; minimally 3. + */ + + copysize = oldsize - SIZE_SZ; + s = (INTERNAL_SIZE_T*)(oldmem); + d = (INTERNAL_SIZE_T*)(newmem); + ncopies = copysize / sizeof(INTERNAL_SIZE_T); + assert(ncopies >= 3); + + if (ncopies > 9) + MALLOC_COPY(d, s, copysize); + + else { + *(d+0) = *(s+0); + *(d+1) = *(s+1); + *(d+2) = *(s+2); + if (ncopies > 4) { + *(d+3) = *(s+3); + *(d+4) = *(s+4); + if (ncopies > 6) { + *(d+5) = *(s+5); + *(d+6) = *(s+6); + if (ncopies > 8) { + *(d+7) = *(s+7); + *(d+8) = *(s+8); + } + } + } + } + + fREe(oldmem); + check_inuse_chunk(newp); + return chunk2mem(newp); + } + } + } + + /* If possible, free extra space in old or extended chunk */ + + assert((unsigned long)(newsize) >= (unsigned long)(nb)); + + remainder_size = newsize - nb; + + if (remainder_size < MINSIZE) { /* not enough extra to split off */ + set_head_size(newp, newsize); + set_inuse_bit_at_offset(newp, newsize); + } + else { /* split remainder */ + remainder = chunk_at_offset(newp, nb); + set_head_size(newp, nb); + set_head(remainder, remainder_size | PREV_INUSE); + /* Mark remainder as inuse so free() won't complain */ + set_inuse_bit_at_offset(remainder, remainder_size); + fREe(chunk2mem(remainder)); + } + + check_inuse_chunk(newp); + return chunk2mem(newp); + } + + /* + Handle mmap cases + */ + + else { +#if HAVE_MMAP + +#if HAVE_MREMAP + INTERNAL_SIZE_T offset = oldp->prev_size; + size_t pagemask = av->pagesize - 1; + char *cp; + unsigned long sum; + + /* Note the extra SIZE_SZ overhead */ + newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask; + + /* don't need to remap if still within same page */ + if (oldsize == newsize - offset) + return oldmem; + + cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1); + + if (cp != (char*)MORECORE_FAILURE) { + + newp = (mchunkptr)(cp + offset); + set_head(newp, (newsize - offset)|IS_MMAPPED); + + assert(aligned_OK(chunk2mem(newp))); + assert((newp->prev_size == offset)); + + /* update statistics */ + sum = av->mmapped_mem += newsize - oldsize; + if (sum > (unsigned long)(av->max_mmapped_mem)) + av->max_mmapped_mem = sum; + sum += av->sbrked_mem; + if (sum > (unsigned long)(av->max_total_mem)) + av->max_total_mem = sum; + + return chunk2mem(newp); + } +#endif + + /* Note the extra SIZE_SZ overhead. */ + if ((unsigned long)(oldsize) >= (unsigned long)(nb + SIZE_SZ)) + newmem = oldmem; /* do nothing */ + else { + /* Must alloc, copy, free. */ + newmem = mALLOc(nb - MALLOC_ALIGN_MASK); + if (newmem != 0) { + MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ); + fREe(oldmem); + } + } + return newmem; + +#else + /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */ + check_malloc_state(); + MALLOC_FAILURE_ACTION; + return 0; +#endif + } +} + +/* + ------------------------------ memalign ------------------------------ +*/ + +#if __STD_C +Void_t* mEMALIGn(size_t alignment, size_t bytes) +#else +Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes; +#endif +{ + INTERNAL_SIZE_T nb; /* padded request size */ + char* m; /* memory returned by malloc call */ + mchunkptr p; /* corresponding chunk */ + char* brk; /* alignment point within p */ + mchunkptr newp; /* chunk to return */ + INTERNAL_SIZE_T newsize; /* its size */ + INTERNAL_SIZE_T leadsize; /* leading space before alignment point */ + mchunkptr remainder; /* spare room at end to split off */ + unsigned long remainder_size; /* its size */ + INTERNAL_SIZE_T size; + + /* If need less alignment than we give anyway, just relay to malloc */ + + if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes); + + /* Otherwise, ensure that it is at least a minimum chunk size */ + + if (alignment < MINSIZE) alignment = MINSIZE; + + /* Make sure alignment is power of 2 (in case MINSIZE is not). */ + if ((alignment & (alignment - 1)) != 0) { + size_t a = MALLOC_ALIGNMENT * 2; + while ((unsigned long)a < (unsigned long)alignment) a <<= 1; + alignment = a; + } + + checked_request2size(bytes, nb); + + /* + Strategy: find a spot within that chunk that meets the alignment + request, and then possibly free the leading and trailing space. + */ + + + /* Call malloc with worst case padding to hit alignment. */ + + m = (char*)(mALLOc(nb + alignment + MINSIZE)); + + if (m == 0) return 0; /* propagate failure */ + + p = mem2chunk(m); + + if ((((unsigned long)(m)) % alignment) != 0) { /* misaligned */ + + /* + Find an aligned spot inside chunk. Since we need to give back + leading space in a chunk of at least MINSIZE, if the first + calculation places us at a spot with less than MINSIZE leader, + we can move to the next aligned spot -- we've allocated enough + total room so that this is always possible. + */ + + brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) & + -((signed long) alignment)); + if ((unsigned long)(brk - (char*)(p)) < MINSIZE) + brk += alignment; + + newp = (mchunkptr)brk; + leadsize = brk - (char*)(p); + newsize = chunksize(p) - leadsize; + + /* For mmapped chunks, just adjust offset */ + if (chunk_is_mmapped(p)) { + newp->prev_size = p->prev_size + leadsize; + set_head(newp, newsize|IS_MMAPPED); + return chunk2mem(newp); + } + + /* Otherwise, give back leader, use the rest */ + set_head(newp, newsize | PREV_INUSE); + set_inuse_bit_at_offset(newp, newsize); + set_head_size(p, leadsize); + fREe(chunk2mem(p)); + p = newp; + + assert (newsize >= nb && + (((unsigned long)(chunk2mem(p))) % alignment) == 0); + } + + /* Also give back spare room at the end */ + if (!chunk_is_mmapped(p)) { + size = chunksize(p); + if ((unsigned long)(size) > (unsigned long)(nb + MINSIZE)) { + remainder_size = size - nb; + remainder = chunk_at_offset(p, nb); + set_head(remainder, remainder_size | PREV_INUSE); + set_head_size(p, nb); + fREe(chunk2mem(remainder)); + } + } + + check_inuse_chunk(p); + return chunk2mem(p); +} + +/* + ------------------------------ calloc ------------------------------ +*/ + +#if __STD_C +Void_t* cALLOc(size_t n_elements, size_t elem_size) +#else +Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size; +#endif +{ + mchunkptr p; + unsigned long clearsize; + unsigned long nclears; + INTERNAL_SIZE_T* d; + + Void_t* mem = mALLOc(n_elements * elem_size); + + if (mem != 0) { + p = mem2chunk(mem); + +#if MMAP_CLEARS + if (!chunk_is_mmapped(p)) /* don't need to clear mmapped space */ +#endif + { + /* + Unroll clear of <= 36 bytes (72 if 8byte sizes) + We know that contents have an odd number of + INTERNAL_SIZE_T-sized words; minimally 3. + */ + + d = (INTERNAL_SIZE_T*)mem; + clearsize = chunksize(p) - SIZE_SZ; + nclears = clearsize / sizeof(INTERNAL_SIZE_T); + assert(nclears >= 3); + + if (nclears > 9) + MALLOC_ZERO(d, clearsize); + + else { + *(d+0) = 0; + *(d+1) = 0; + *(d+2) = 0; + if (nclears > 4) { + *(d+3) = 0; + *(d+4) = 0; + if (nclears > 6) { + *(d+5) = 0; + *(d+6) = 0; + if (nclears > 8) { + *(d+7) = 0; + *(d+8) = 0; + } + } + } + } + } + } + return mem; +} + +/* + ------------------------------ cfree ------------------------------ +*/ + +#if __STD_C +void cFREe(Void_t *mem) +#else +void cFREe(mem) Void_t *mem; +#endif +{ + fREe(mem); +} + +/* + ------------------------- independent_calloc ------------------------- +*/ + +#if __STD_C +Void_t** iCALLOc(size_t n_elements, size_t elem_size, Void_t* chunks[]) +#else +Void_t** iCALLOc(n_elements, elem_size, chunks) size_t n_elements; size_t elem_size; Void_t* chunks[]; +#endif +{ + size_t sz = elem_size; /* serves as 1-element array */ + /* opts arg of 3 means all elements are same size, and should be cleared */ + return iALLOc(n_elements, &sz, 3, chunks); +} + +/* + ------------------------- independent_comalloc ------------------------- +*/ + +#if __STD_C +Void_t** iCOMALLOc(size_t n_elements, size_t sizes[], Void_t* chunks[]) +#else +Void_t** iCOMALLOc(n_elements, sizes, chunks) size_t n_elements; size_t sizes[]; Void_t* chunks[]; +#endif +{ + return iALLOc(n_elements, sizes, 0, chunks); +} + + +/* + ------------------------------ ialloc ------------------------------ + ialloc provides common support for independent_X routines, handling all of + the combinations that can result. + + The opts arg has: + bit 0 set if all elements are same size (using sizes[0]) + bit 1 set if elements should be zeroed +*/ + + +#if __STD_C +static Void_t** iALLOc(size_t n_elements, + size_t* sizes, + int opts, + Void_t* chunks[]) +#else +static Void_t** iALLOc(n_elements, sizes, opts, chunks) size_t n_elements; size_t* sizes; int opts; Void_t* chunks[]; +#endif +{ + mstate av = get_malloc_state(); + INTERNAL_SIZE_T element_size; /* chunksize of each element, if all same */ + INTERNAL_SIZE_T contents_size; /* total size of elements */ + INTERNAL_SIZE_T array_size; /* request size of pointer array */ + Void_t* mem; /* malloced aggregate space */ + mchunkptr p; /* corresponding chunk */ + INTERNAL_SIZE_T remainder_size; /* remaining bytes while splitting */ + Void_t** marray; /* either "chunks" or malloced ptr array */ + mchunkptr array_chunk; /* chunk for malloced ptr array */ + int mmx; /* to disable mmap */ + INTERNAL_SIZE_T size; + size_t i; + + /* Ensure initialization/consolidation */ + if (have_fastchunks(av)) malloc_consolidate(av); + + /* compute array length, if needed */ + if (chunks != 0) { + if (n_elements == 0) + return chunks; /* nothing to do */ + marray = chunks; + array_size = 0; + } + else { + /* if empty req, must still return chunk representing empty array */ + if (n_elements == 0) + return (Void_t**) mALLOc(0); + marray = 0; + array_size = request2size(n_elements * (sizeof(Void_t*))); + } + + /* compute total element size */ + if (opts & 0x1) { /* all-same-size */ + element_size = request2size(*sizes); + contents_size = n_elements * element_size; + } + else { /* add up all the sizes */ + element_size = 0; + contents_size = 0; + for (i = 0; i != n_elements; ++i) + contents_size += request2size(sizes[i]); + } + + /* subtract out alignment bytes from total to minimize overallocation */ + size = contents_size + array_size - MALLOC_ALIGN_MASK; + + /* + Allocate the aggregate chunk. + But first disable mmap so malloc won't use it, since + we would not be able to later free/realloc space internal + to a segregated mmap region. + */ + mmx = av->n_mmaps_max; /* disable mmap */ + av->n_mmaps_max = 0; + mem = mALLOc(size); + av->n_mmaps_max = mmx; /* reset mmap */ + if (mem == 0) + return 0; + + p = mem2chunk(mem); + assert(!chunk_is_mmapped(p)); + remainder_size = chunksize(p); + + if (opts & 0x2) { /* optionally clear the elements */ + MALLOC_ZERO(mem, remainder_size - SIZE_SZ - array_size); + } + + /* If not provided, allocate the pointer array as final part of chunk */ + if (marray == 0) { + array_chunk = chunk_at_offset(p, contents_size); + marray = (Void_t**) (chunk2mem(array_chunk)); + set_head(array_chunk, (remainder_size - contents_size) | PREV_INUSE); + remainder_size = contents_size; + } + + /* split out elements */ + for (i = 0; ; ++i) { + marray[i] = chunk2mem(p); + if (i != n_elements-1) { + if (element_size != 0) + size = element_size; + else + size = request2size(sizes[i]); + remainder_size -= size; + set_head(p, size | PREV_INUSE); + p = chunk_at_offset(p, size); + } + else { /* the final element absorbs any overallocation slop */ + set_head(p, remainder_size | PREV_INUSE); + break; + } + } + +#if DEBUG + if (marray != chunks) { + /* final element must have exactly exhausted chunk */ + if (element_size != 0) + assert(remainder_size == element_size); + else + assert(remainder_size == request2size(sizes[i])); + check_inuse_chunk(mem2chunk(marray)); + } + + for (i = 0; i != n_elements; ++i) + check_inuse_chunk(mem2chunk(marray[i])); +#endif + + return marray; +} + + +/* + ------------------------------ valloc ------------------------------ +*/ + +#if __STD_C +Void_t* vALLOc(size_t bytes) +#else +Void_t* vALLOc(bytes) size_t bytes; +#endif +{ + /* Ensure initialization/consolidation */ + mstate av = get_malloc_state(); + if (have_fastchunks(av)) malloc_consolidate(av); + return mEMALIGn(av->pagesize, bytes); +} + +/* + ------------------------------ pvalloc ------------------------------ +*/ + + +#if __STD_C +Void_t* pVALLOc(size_t bytes) +#else +Void_t* pVALLOc(bytes) size_t bytes; +#endif +{ + mstate av = get_malloc_state(); + size_t pagesz; + + /* Ensure initialization/consolidation */ + if (have_fastchunks(av)) malloc_consolidate(av); + pagesz = av->pagesize; + return mEMALIGn(pagesz, (bytes + pagesz - 1) & ~(pagesz - 1)); +} + + +/* + ------------------------------ malloc_trim ------------------------------ +*/ + +#if __STD_C +int mTRIm(size_t pad) +#else +int mTRIm(pad) size_t pad; +#endif +{ + mstate av = get_malloc_state(); + /* Ensure initialization/consolidation */ + malloc_consolidate(av); + +#ifndef MORECORE_CANNOT_TRIM + return sYSTRIm(pad, av); +#else + return 0; +#endif +} + + +/* + ------------------------- malloc_usable_size ------------------------- +*/ + +#if __STD_C +size_t mUSABLe(Void_t* mem) +#else +size_t mUSABLe(mem) Void_t* mem; +#endif +{ + mchunkptr p; + if (mem != 0) { + p = mem2chunk(mem); + if (chunk_is_mmapped(p)) + return chunksize(p) - 2*SIZE_SZ; + else if (inuse(p)) + return chunksize(p) - SIZE_SZ; + } + return 0; +} + +/* + ------------------------------ mallinfo ------------------------------ +*/ + +struct mallinfo mALLINFo() +{ + mstate av = get_malloc_state(); + struct mallinfo mi; + int i; + mbinptr b; + mchunkptr p; + INTERNAL_SIZE_T avail; + INTERNAL_SIZE_T fastavail; + int nblocks; + int nfastblocks; + + /* Ensure initialization */ + if (av->top == 0) malloc_consolidate(av); + + check_malloc_state(); + + /* Account for top */ + avail = chunksize(av->top); + nblocks = 1; /* top always exists */ + + /* traverse fastbins */ + nfastblocks = 0; + fastavail = 0; + + for (i = 0; i < NFASTBINS; ++i) { + for (p = av->fastbins[i]; p != 0; p = p->fd) { + ++nfastblocks; + fastavail += chunksize(p); + } + } + + avail += fastavail; + + /* traverse regular bins */ + for (i = 1; i < NBINS; ++i) { + b = bin_at(av, i); + for (p = last(b); p != b; p = p->bk) { + ++nblocks; + avail += chunksize(p); + } + } + + mi.smblks = nfastblocks; + mi.ordblks = nblocks; + mi.fordblks = avail; + mi.uordblks = av->sbrked_mem - avail; + mi.arena = av->sbrked_mem; + mi.hblks = av->n_mmaps; + mi.hblkhd = av->mmapped_mem; + mi.fsmblks = fastavail; + mi.keepcost = chunksize(av->top); + mi.usmblks = av->max_total_mem; + return mi; +} + +/* + ------------------------------ malloc_stats ------------------------------ +*/ + +void mSTATs() +{ + struct mallinfo mi = mALLINFo(); + +#ifdef WIN32 + { + unsigned long free, reserved, committed; + vminfo (&free, &reserved, &committed); + fprintf(stderr, "free bytes = %10lu\n", + free); + fprintf(stderr, "reserved bytes = %10lu\n", + reserved); + fprintf(stderr, "committed bytes = %10lu\n", + committed); + } +#endif + + + fprintf(stderr, "max system bytes = %10lu\n", + (unsigned long)(mi.usmblks)); + fprintf(stderr, "system bytes = %10lu\n", + (unsigned long)(mi.arena + mi.hblkhd)); + fprintf(stderr, "in use bytes = %10lu\n", + (unsigned long)(mi.uordblks + mi.hblkhd)); + + +#ifdef WIN32 + { + unsigned long kernel, user; + if (cpuinfo (TRUE, &kernel, &user)) { + fprintf(stderr, "kernel ms = %10lu\n", + kernel); + fprintf(stderr, "user ms = %10lu\n", + user); + } + } +#endif +} + + +/* + ------------------------------ mallopt ------------------------------ +*/ + +#if __STD_C +int mALLOPt(int param_number, int value) +#else +int mALLOPt(param_number, value) int param_number; int value; +#endif +{ + mstate av = get_malloc_state(); + /* Ensure initialization/consolidation */ + malloc_consolidate(av); + + switch(param_number) { + case M_MXFAST: + if (value >= 0 && value <= MAX_FAST_SIZE) { + set_max_fast(av, value); + return 1; + } + else + return 0; + + case M_TRIM_THRESHOLD: + av->trim_threshold = value; + return 1; + + case M_TOP_PAD: + av->top_pad = value; + return 1; + + case M_MMAP_THRESHOLD: + av->mmap_threshold = value; + return 1; + + case M_MMAP_MAX: +#if !HAVE_MMAP + if (value != 0) + return 0; +#endif + av->n_mmaps_max = value; + return 1; + + default: + return 0; + } +} + + +/* + -------------------- Alternative MORECORE functions -------------------- +*/ + + +/* + General Requirements for MORECORE. + + The MORECORE function must have the following properties: + + If MORECORE_CONTIGUOUS is false: + + * MORECORE must allocate in multiples of pagesize. It will + only be called with arguments that are multiples of pagesize. + + * MORECORE(0) must return an address that is at least + MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.) + + else (i.e. If MORECORE_CONTIGUOUS is true): + + * Consecutive calls to MORECORE with positive arguments + return increasing addresses, indicating that space has been + contiguously extended. + + * MORECORE need not allocate in multiples of pagesize. + Calls to MORECORE need not have args of multiples of pagesize. + + * MORECORE need not page-align. + + In either case: + + * MORECORE may allocate more memory than requested. (Or even less, + but this will generally result in a malloc failure.) + + * MORECORE must not allocate memory when given argument zero, but + instead return one past the end address of memory from previous + nonzero call. This malloc does NOT call MORECORE(0) + until at least one call with positive arguments is made, so + the initial value returned is not important. + + * Even though consecutive calls to MORECORE need not return contiguous + addresses, it must be OK for malloc'ed chunks to span multiple + regions in those cases where they do happen to be contiguous. + + * MORECORE need not handle negative arguments -- it may instead + just return MORECORE_FAILURE when given negative arguments. + Negative arguments are always multiples of pagesize. MORECORE + must not misinterpret negative args as large positive unsigned + args. You can suppress all such calls from even occurring by defining + MORECORE_CANNOT_TRIM, + + There is some variation across systems about the type of the + argument to sbrk/MORECORE. If size_t is unsigned, then it cannot + actually be size_t, because sbrk supports negative args, so it is + normally the signed type of the same width as size_t (sometimes + declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much + matter though. Internally, we use "long" as arguments, which should + work across all reasonable possibilities. + + Additionally, if MORECORE ever returns failure for a positive + request, and HAVE_MMAP is true, then mmap is used as a noncontiguous + system allocator. This is a useful backup strategy for systems with + holes in address spaces -- in this case sbrk cannot contiguously + expand the heap, but mmap may be able to map noncontiguous space. + + If you'd like mmap to ALWAYS be used, you can define MORECORE to be + a function that always returns MORECORE_FAILURE. + + If you are using this malloc with something other than sbrk (or its + emulation) to supply memory regions, you probably want to set + MORECORE_CONTIGUOUS as false. As an example, here is a custom + allocator kindly contributed for pre-OSX macOS. It uses virtually + but not necessarily physically contiguous non-paged memory (locked + in, present and won't get swapped out). You can use it by + uncommenting this section, adding some #includes, and setting up the + appropriate defines above: + + #define MORECORE osMoreCore + #define MORECORE_CONTIGUOUS 0 + + There is also a shutdown routine that should somehow be called for + cleanup upon program exit. + + #define MAX_POOL_ENTRIES 100 + #define MINIMUM_MORECORE_SIZE (64 * 1024) + static int next_os_pool; + void *our_os_pools[MAX_POOL_ENTRIES]; + + void *osMoreCore(int size) + { + void *ptr = 0; + static void *sbrk_top = 0; + + if (size > 0) + { + if (size < MINIMUM_MORECORE_SIZE) + size = MINIMUM_MORECORE_SIZE; + if (CurrentExecutionLevel() == kTaskLevel) + ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); + if (ptr == 0) + { + return (void *) MORECORE_FAILURE; + } + // save ptrs so they can be freed during cleanup + our_os_pools[next_os_pool] = ptr; + next_os_pool++; + ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK); + sbrk_top = (char *) ptr + size; + return ptr; + } + else if (size < 0) + { + // we don't currently support shrink behavior + return (void *) MORECORE_FAILURE; + } + else + { + return sbrk_top; + } + } + + // cleanup any allocated memory pools + // called as last thing before shutting down driver + + void osCleanupMem(void) + { + void **ptr; + + for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++) + if (*ptr) + { + PoolDeallocate(*ptr); + *ptr = 0; + } + } + +*/ + + +/* + -------------------------------------------------------------- + + Emulation of sbrk for win32. + Donated by J. Walter <Walter@GeNeSys-e.de>. + For additional information about this code, and malloc on Win32, see + http://www.genesys-e.de/jwalter/ +*/ + + +#ifdef WIN32 + +#ifdef _DEBUG +/* #define TRACE */ +#endif + +/* Support for USE_MALLOC_LOCK */ +#ifdef USE_MALLOC_LOCK + +/* Wait for spin lock */ +static int slwait (int *sl) { + while (InterlockedCompareExchange ((void **) sl, (void *) 1, (void *) 0) != 0) + Sleep (0); + return 0; +} + +/* Release spin lock */ +static int slrelease (int *sl) { + InterlockedExchange (sl, 0); + return 0; +} + +#ifdef NEEDED +/* Spin lock for emulation code */ +static int g_sl; +#endif + +#endif /* USE_MALLOC_LOCK */ + +/* getpagesize for windows */ +static long getpagesize (void) { + static long g_pagesize = 0; + if (! g_pagesize) { + SYSTEM_INFO system_info; + GetSystemInfo (&system_info); + g_pagesize = system_info.dwPageSize; + } + return g_pagesize; +} +static long getregionsize (void) { + static long g_regionsize = 0; + if (! g_regionsize) { + SYSTEM_INFO system_info; + GetSystemInfo (&system_info); + g_regionsize = system_info.dwAllocationGranularity; + } + return g_regionsize; +} + +/* A region list entry */ +typedef struct _region_list_entry { + void *top_allocated; + void *top_committed; + void *top_reserved; + long reserve_size; + struct _region_list_entry *previous; +} region_list_entry; + +/* Allocate and link a region entry in the region list */ +static int region_list_append (region_list_entry **last, void *base_reserved, long reserve_size) { + region_list_entry *next = HeapAlloc (GetProcessHeap (), 0, sizeof (region_list_entry)); + if (! next) + return FALSE; + next->top_allocated = (char *) base_reserved; + next->top_committed = (char *) base_reserved; + next->top_reserved = (char *) base_reserved + reserve_size; + next->reserve_size = reserve_size; + next->previous = *last; + *last = next; + return TRUE; +} +/* Free and unlink the last region entry from the region list */ +static int region_list_remove (region_list_entry **last) { + region_list_entry *previous = (*last)->previous; + if (! HeapFree (GetProcessHeap (), sizeof (region_list_entry), *last)) + return FALSE; + *last = previous; + return TRUE; +} + +#define CEIL(size,to) (((size)+(to)-1)&~((to)-1)) +#define FLOOR(size,to) ((size)&~((to)-1)) + +#define SBRK_SCALE 0 +/* #define SBRK_SCALE 1 */ +/* #define SBRK_SCALE 2 */ +/* #define SBRK_SCALE 4 */ + +/* sbrk for windows */ +static void *sbrk (long size) { + static long g_pagesize, g_my_pagesize; + static long g_regionsize, g_my_regionsize; + static region_list_entry *g_last; + void *result = (void *) MORECORE_FAILURE; +#ifdef TRACE + printf ("sbrk %d\n", size); +#endif +#if defined (USE_MALLOC_LOCK) && defined (NEEDED) + /* Wait for spin lock */ + slwait (&g_sl); +#endif + /* First time initialization */ + if (! g_pagesize) { + g_pagesize = getpagesize (); + g_my_pagesize = g_pagesize << SBRK_SCALE; + } + if (! g_regionsize) { + g_regionsize = getregionsize (); + g_my_regionsize = g_regionsize << SBRK_SCALE; + } + if (! g_last) { + if (! region_list_append (&g_last, 0, 0)) + goto sbrk_exit; + } + /* Assert invariants */ + assert (g_last); + assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_allocated && + g_last->top_allocated <= g_last->top_committed); + assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_committed && + g_last->top_committed <= g_last->top_reserved && + (unsigned) g_last->top_committed % g_pagesize == 0); + assert ((unsigned) g_last->top_reserved % g_regionsize == 0); + assert ((unsigned) g_last->reserve_size % g_regionsize == 0); + /* Allocation requested? */ + if (size >= 0) { + /* Allocation size is the requested size */ + long allocate_size = size; + /* Compute the size to commit */ + long to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed; + /* Do we reach the commit limit? */ + if (to_commit > 0) { + /* Round size to commit */ + long commit_size = CEIL (to_commit, g_my_pagesize); + /* Compute the size to reserve */ + long to_reserve = (char *) g_last->top_committed + commit_size - (char *) g_last->top_reserved; + /* Do we reach the reserve limit? */ + if (to_reserve > 0) { + /* Compute the remaining size to commit in the current region */ + long remaining_commit_size = (char *) g_last->top_reserved - (char *) g_last->top_committed; + if (remaining_commit_size > 0) { + /* Assert preconditions */ + assert ((unsigned) g_last->top_committed % g_pagesize == 0); + assert (0 < remaining_commit_size && remaining_commit_size % g_pagesize == 0); { + /* Commit this */ + void *base_committed = VirtualAlloc (g_last->top_committed, remaining_commit_size, + MEM_COMMIT, PAGE_READWRITE); + /* Check returned pointer for consistency */ + if (base_committed != g_last->top_committed) + goto sbrk_exit; + /* Assert postconditions */ + assert ((unsigned) base_committed % g_pagesize == 0); +#ifdef TRACE + printf ("Commit %p %d\n", base_committed, remaining_commit_size); +#endif + /* Adjust the regions commit top */ + g_last->top_committed = (char *) base_committed + remaining_commit_size; + } + } { + /* Now we are going to search and reserve. */ + int contiguous = -1; + int found = FALSE; + MEMORY_BASIC_INFORMATION memory_info; + void *base_reserved; + long reserve_size; + do { + /* Assume contiguous memory */ + contiguous = TRUE; + /* Round size to reserve */ + reserve_size = CEIL (to_reserve, g_my_regionsize); + /* Start with the current region's top */ + memory_info.BaseAddress = g_last->top_reserved; + /* Assert preconditions */ + assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0); + assert (0 < reserve_size && reserve_size % g_regionsize == 0); + while (VirtualQuery (memory_info.BaseAddress, &memory_info, sizeof (memory_info))) { + /* Assert postconditions */ + assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0); +#ifdef TRACE + printf ("Query %p %d %s\n", memory_info.BaseAddress, memory_info.RegionSize, + memory_info.State == MEM_FREE ? "FREE": + (memory_info.State == MEM_RESERVE ? "RESERVED": + (memory_info.State == MEM_COMMIT ? "COMMITTED": "?"))); +#endif + /* Region is free, well aligned and big enough: we are done */ + if (memory_info.State == MEM_FREE && + (unsigned) memory_info.BaseAddress % g_regionsize == 0 && + memory_info.RegionSize >= (unsigned) reserve_size) { + found = TRUE; + break; + } + /* From now on we can't get contiguous memory! */ + contiguous = FALSE; + /* Recompute size to reserve */ + reserve_size = CEIL (allocate_size, g_my_regionsize); + memory_info.BaseAddress = (char *) memory_info.BaseAddress + memory_info.RegionSize; + /* Assert preconditions */ + assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0); + assert (0 < reserve_size && reserve_size % g_regionsize == 0); + } + /* Search failed? */ + if (! found) + goto sbrk_exit; + /* Assert preconditions */ + assert ((unsigned) memory_info.BaseAddress % g_regionsize == 0); + assert (0 < reserve_size && reserve_size % g_regionsize == 0); + /* Try to reserve this */ + base_reserved = VirtualAlloc (memory_info.BaseAddress, reserve_size, + MEM_RESERVE, PAGE_NOACCESS); + if (! base_reserved) { + int rc = GetLastError (); + if (rc != ERROR_INVALID_ADDRESS) + goto sbrk_exit; + } + /* A null pointer signals (hopefully) a race condition with another thread. */ + /* In this case, we try again. */ + } while (! base_reserved); + /* Check returned pointer for consistency */ + if (memory_info.BaseAddress && base_reserved != memory_info.BaseAddress) + goto sbrk_exit; + /* Assert postconditions */ + assert ((unsigned) base_reserved % g_regionsize == 0); +#ifdef TRACE + printf ("Reserve %p %d\n", base_reserved, reserve_size); +#endif + /* Did we get contiguous memory? */ + if (contiguous) { + long start_size = (char *) g_last->top_committed - (char *) g_last->top_allocated; + /* Adjust allocation size */ + allocate_size -= start_size; + /* Adjust the regions allocation top */ + g_last->top_allocated = g_last->top_committed; + /* Recompute the size to commit */ + to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed; + /* Round size to commit */ + commit_size = CEIL (to_commit, g_my_pagesize); + } + /* Append the new region to the list */ + if (! region_list_append (&g_last, base_reserved, reserve_size)) + goto sbrk_exit; + /* Didn't we get contiguous memory? */ + if (! contiguous) { + /* Recompute the size to commit */ + to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed; + /* Round size to commit */ + commit_size = CEIL (to_commit, g_my_pagesize); + } + } + } + /* Assert preconditions */ + assert ((unsigned) g_last->top_committed % g_pagesize == 0); + assert (0 < commit_size && commit_size % g_pagesize == 0); { + /* Commit this */ + void *base_committed = VirtualAlloc (g_last->top_committed, commit_size, + MEM_COMMIT, PAGE_READWRITE); + /* Check returned pointer for consistency */ + if (base_committed != g_last->top_committed) + goto sbrk_exit; + /* Assert postconditions */ + assert ((unsigned) base_committed % g_pagesize == 0); +#ifdef TRACE + printf ("Commit %p %d\n", base_committed, commit_size); +#endif + /* Adjust the regions commit top */ + g_last->top_committed = (char *) base_committed + commit_size; + } + } + /* Adjust the regions allocation top */ + g_last->top_allocated = (char *) g_last->top_allocated + allocate_size; + result = (char *) g_last->top_allocated - size; + /* Deallocation requested? */ + } else if (size < 0) { + long deallocate_size = - size; + /* As long as we have a region to release */ + while ((char *) g_last->top_allocated - deallocate_size < (char *) g_last->top_reserved - g_last->reserve_size) { + /* Get the size to release */ + long release_size = g_last->reserve_size; + /* Get the base address */ + void *base_reserved = (char *) g_last->top_reserved - release_size; + /* Assert preconditions */ + assert ((unsigned) base_reserved % g_regionsize == 0); + assert (0 < release_size && release_size % g_regionsize == 0); { + /* Release this */ + int rc = VirtualFree (base_reserved, 0, + MEM_RELEASE); + /* Check returned code for consistency */ + if (! rc) + goto sbrk_exit; +#ifdef TRACE + printf ("Release %p %d\n", base_reserved, release_size); +#endif + } + /* Adjust deallocation size */ + deallocate_size -= (char *) g_last->top_allocated - (char *) base_reserved; + /* Remove the old region from the list */ + if (! region_list_remove (&g_last)) + goto sbrk_exit; + } { + /* Compute the size to decommit */ + long to_decommit = (char *) g_last->top_committed - ((char *) g_last->top_allocated - deallocate_size); + if (to_decommit >= g_my_pagesize) { + /* Compute the size to decommit */ + long decommit_size = FLOOR (to_decommit, g_my_pagesize); + /* Compute the base address */ + void *base_committed = (char *) g_last->top_committed - decommit_size; + /* Assert preconditions */ + assert ((unsigned) base_committed % g_pagesize == 0); + assert (0 < decommit_size && decommit_size % g_pagesize == 0); { + /* Decommit this */ + int rc = VirtualFree ((char *) base_committed, decommit_size, + MEM_DECOMMIT); + /* Check returned code for consistency */ + if (! rc) + goto sbrk_exit; +#ifdef TRACE + printf ("Decommit %p %d\n", base_committed, decommit_size); +#endif + } + /* Adjust deallocation size and regions commit and allocate top */ + deallocate_size -= (char *) g_last->top_allocated - (char *) base_committed; + g_last->top_committed = base_committed; + g_last->top_allocated = base_committed; + } + } + /* Adjust regions allocate top */ + g_last->top_allocated = (char *) g_last->top_allocated - deallocate_size; + /* Check for underflow */ + if ((char *) g_last->top_reserved - g_last->reserve_size > (char *) g_last->top_allocated || + g_last->top_allocated > g_last->top_committed) { + /* Adjust regions allocate top */ + g_last->top_allocated = (char *) g_last->top_reserved - g_last->reserve_size; + goto sbrk_exit; + } + result = g_last->top_allocated; + } + /* Assert invariants */ + assert (g_last); + assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_allocated && + g_last->top_allocated <= g_last->top_committed); + assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_committed && + g_last->top_committed <= g_last->top_reserved && + (unsigned) g_last->top_committed % g_pagesize == 0); + assert ((unsigned) g_last->top_reserved % g_regionsize == 0); + assert ((unsigned) g_last->reserve_size % g_regionsize == 0); + +sbrk_exit: +#if defined (USE_MALLOC_LOCK) && defined (NEEDED) + /* Release spin lock */ + slrelease (&g_sl); +#endif + return result; +} + +/* mmap for windows */ +static void *mmap (void *ptr, long size, long prot, long type, long handle, long arg) { + static long g_pagesize; + static long g_regionsize; +#ifdef TRACE + printf ("mmap %d\n", size); +#endif +#if defined (USE_MALLOC_LOCK) && defined (NEEDED) + /* Wait for spin lock */ + slwait (&g_sl); +#endif + /* First time initialization */ + if (! g_pagesize) + g_pagesize = getpagesize (); + if (! g_regionsize) + g_regionsize = getregionsize (); + /* Assert preconditions */ + assert ((unsigned) ptr % g_regionsize == 0); + assert (size % g_pagesize == 0); + /* Allocate this */ + ptr = VirtualAlloc (ptr, size, + MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, PAGE_READWRITE); + if (! ptr) { + ptr = (void *) MORECORE_FAILURE; + goto mmap_exit; + } + /* Assert postconditions */ + assert ((unsigned) ptr % g_regionsize == 0); +#ifdef TRACE + printf ("Commit %p %d\n", ptr, size); +#endif +mmap_exit: +#if defined (USE_MALLOC_LOCK) && defined (NEEDED) + /* Release spin lock */ + slrelease (&g_sl); +#endif + return ptr; +} + +/* munmap for windows */ +static long munmap (void *ptr, long size) { + static long g_pagesize; + static long g_regionsize; + int rc = MUNMAP_FAILURE; +#ifdef TRACE + printf ("munmap %p %d\n", ptr, size); +#endif +#if defined (USE_MALLOC_LOCK) && defined (NEEDED) + /* Wait for spin lock */ + slwait (&g_sl); +#endif + /* First time initialization */ + if (! g_pagesize) + g_pagesize = getpagesize (); + if (! g_regionsize) + g_regionsize = getregionsize (); + /* Assert preconditions */ + assert ((unsigned) ptr % g_regionsize == 0); + assert (size % g_pagesize == 0); + /* Free this */ + if (! VirtualFree (ptr, 0, + MEM_RELEASE)) + goto munmap_exit; + rc = 0; +#ifdef TRACE + printf ("Release %p %d\n", ptr, size); +#endif +munmap_exit: +#if defined (USE_MALLOC_LOCK) && defined (NEEDED) + /* Release spin lock */ + slrelease (&g_sl); +#endif + return rc; +} + +static void vminfo (unsigned long *free, unsigned long *reserved, unsigned long *committed) { + MEMORY_BASIC_INFORMATION memory_info; + memory_info.BaseAddress = 0; + *free = *reserved = *committed = 0; + while (VirtualQuery (memory_info.BaseAddress, &memory_info, sizeof (memory_info))) { + switch (memory_info.State) { + case MEM_FREE: + *free += memory_info.RegionSize; + break; + case MEM_RESERVE: + *reserved += memory_info.RegionSize; + break; + case MEM_COMMIT: + *committed += memory_info.RegionSize; + break; + } + memory_info.BaseAddress = (char *) memory_info.BaseAddress + memory_info.RegionSize; + } +} + +static int cpuinfo (int whole, unsigned long *kernel, unsigned long *user) { + if (whole) { + __int64 creation64, exit64, kernel64, user64; + int rc = GetProcessTimes (GetCurrentProcess (), + (FILETIME *) &creation64, + (FILETIME *) &exit64, + (FILETIME *) &kernel64, + (FILETIME *) &user64); + if (! rc) { + *kernel = 0; + *user = 0; + return FALSE; + } + *kernel = (unsigned long) (kernel64 / 10000); + *user = (unsigned long) (user64 / 10000); + return TRUE; + } else { + __int64 creation64, exit64, kernel64, user64; + int rc = GetThreadTimes (GetCurrentThread (), + (FILETIME *) &creation64, + (FILETIME *) &exit64, + (FILETIME *) &kernel64, + (FILETIME *) &user64); + if (! rc) { + *kernel = 0; + *user = 0; + return FALSE; + } + *kernel = (unsigned long) (kernel64 / 10000); + *user = (unsigned long) (user64 / 10000); + return TRUE; + } +} + +#endif /* WIN32 */ + +/* ------------------------------------------------------------ +History: + + V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee) + * Introduce independent_comalloc and independent_calloc. + Thanks to Michael Pachos for motivation and help. + * Make optional .h file available + * Allow > 2GB requests on 32bit systems. + * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>. + Thanks also to Andreas Mueller <a.mueller at paradatec.de>, + and Anonymous. + * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for + helping test this.) + * memalign: check alignment arg + * realloc: don't try to shift chunks backwards, since this + leads to more fragmentation in some programs and doesn't + seem to help in any others. + * Collect all cases in malloc requiring system memory into sYSMALLOc + * Use mmap as backup to sbrk + * Place all internal state in malloc_state + * Introduce fastbins (although similar to 2.5.1) + * Many minor tunings and cosmetic improvements + * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK + * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS + Thanks to Tony E. Bennett <tbennett@nvidia.com> and others. + * Include errno.h to support default failure action. + + V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee) + * return null for negative arguments + * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com> + * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h' + (e.g. WIN32 platforms) + * Cleanup header file inclusion for WIN32 platforms + * Cleanup code to avoid Microsoft Visual C++ compiler complaints + * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing + memory allocation routines + * Set 'malloc_getpagesize' for WIN32 platforms (needs more work) + * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to + usage of 'assert' in non-WIN32 code + * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to + avoid infinite loop + * Always call 'fREe()' rather than 'free()' + + V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee) + * Fixed ordering problem with boundary-stamping + + V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee) + * Added pvalloc, as recommended by H.J. Liu + * Added 64bit pointer support mainly from Wolfram Gloger + * Added anonymously donated WIN32 sbrk emulation + * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen + * malloc_extend_top: fix mask error that caused wastage after + foreign sbrks + * Add linux mremap support code from HJ Liu + + V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee) + * Integrated most documentation with the code. + * Add support for mmap, with help from + Wolfram Gloger (Gloger@lrz.uni-muenchen.de). + * Use last_remainder in more cases. + * Pack bins using idea from colin@nyx10.cs.du.edu + * Use ordered bins instead of best-fit threshhold + * Eliminate block-local decls to simplify tracing and debugging. + * Support another case of realloc via move into top + * Fix error occuring when initial sbrk_base not word-aligned. + * Rely on page size for units instead of SBRK_UNIT to + avoid surprises about sbrk alignment conventions. + * Add mallinfo, mallopt. Thanks to Raymond Nijssen + (raymond@es.ele.tue.nl) for the suggestion. + * Add `pad' argument to malloc_trim and top_pad mallopt parameter. + * More precautions for cases where other routines call sbrk, + courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de). + * Added macros etc., allowing use in linux libc from + H.J. Lu (hjl@gnu.ai.mit.edu) + * Inverted this history list + + V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee) + * Re-tuned and fixed to behave more nicely with V2.6.0 changes. + * Removed all preallocation code since under current scheme + the work required to undo bad preallocations exceeds + the work saved in good cases for most test programs. + * No longer use return list or unconsolidated bins since + no scheme using them consistently outperforms those that don't + given above changes. + * Use best fit for very large chunks to prevent some worst-cases. + * Added some support for debugging + + V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee) + * Removed footers when chunks are in use. Thanks to + Paul Wilson (wilson@cs.texas.edu) for the suggestion. + + V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee) + * Added malloc_trim, with help from Wolfram Gloger + (wmglo@Dent.MED.Uni-Muenchen.DE). + + V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g) + + V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g) + * realloc: try to expand in both directions + * malloc: swap order of clean-bin strategy; + * realloc: only conditionally expand backwards + * Try not to scavenge used bins + * Use bin counts as a guide to preallocation + * Occasionally bin return list chunks in first scan + * Add a few optimizations from colin@nyx10.cs.du.edu + + V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g) + * faster bin computation & slightly different binning + * merged all consolidations to one part of malloc proper + (eliminating old malloc_find_space & malloc_clean_bin) + * Scan 2 returns chunks (not just 1) + * Propagate failure in realloc if malloc returns 0 + * Add stuff to allow compilation on non-ANSI compilers + from kpv@research.att.com + + V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu) + * removed potential for odd address access in prev_chunk + * removed dependency on getpagesize.h + * misc cosmetics and a bit more internal documentation + * anticosmetics: mangled names in macros to evade debugger strangeness + * tested on sparc, hp-700, dec-mips, rs6000 + with gcc & native cc (hp, dec only) allowing + Detlefs & Zorn comparison study (in SIGPLAN Notices.) + + Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu) + * Based loosely on libg++-1.2X malloc. (It retains some of the overall + structure of old version, but most details differ.) + +*/ diff --git a/libbanshee/engine/nonspec.c b/libbanshee/engine/nonspec.c new file mode 100644 index 00000000000..96e0652d2cb --- /dev/null +++ b/libbanshee/engine/nonspec.c @@ -0,0 +1,852 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include "banshee.h" +#include "flowrow-sort.h" +#include "flowrow-var.h" +#include "setif-sort.h" +#include "setif-var.h" +#include "setst-sort.h" +#include "setst-var.h" +#include "term-sort.h" +#include "term-var.h" + +struct constructor +{ + sort_kind sort; + int type; + int arity; + char *name; + sig_elt *sig; +}; + +typedef struct constructor *constructor; + +typedef enum +{ + vnc_pos, + vnc_neg, + vnc_non +} vnc_kind; + +struct sig_elt +{ + vnc_kind variance; + sort_kind sort; +}; + +typedef struct sig_elt sig_elt; + +typedef struct proj_pat +{ + sort_kind sort; + int type; + stamp st; + int i; + gen_e exp; + vnc_kind variance; + constructor c; +} *proj_pat; + + +typedef struct cons_expr +{ + sort_kind sort; + int type; + stamp st; + int arity; + char *name; + sig_elt *sig; + gen_e *exps; +} * cons_expr; + + +static int new_type() +{ + static int type = 10; + int ret = type; + if (type > 2000) + { + fprintf(stderr, "Exceeded maximum number of constructors\n"); + assert(0); + } + type += 2; + return ret; +} + +static bool fixed_sort(sort_kind s) +{ + return !(s == sort_term || s == sort_set); +} + +/* + Convention : constructor types are even, pats are odd. + The smallest specialized type is 10. +*/ +static bool setif_is_pat(gen_e e) +{ + int type = ((setif_term)e)->type; + return ( (type & 1) && (type > 10) ); +} + +static bool setst_is_pat(gen_e e) +{ + int type = ((setst_term)e)->type; + return ( (type & 1) && (type > 10) ); +} + +static gen_e get_proj_var(sort_kind s, bool large) +{ + switch (s) + { + case setif_sort: + { + if (large) + return (gen_e)sv_fresh_large(get_sort_region(setif_sort),NULL); + else return (gen_e)sv_fresh(get_sort_region(setif_sort),NULL); + } + break; + case setst_sort: + { + if (large) + return (gen_e)st_fresh_large(get_sort_region(setst_sort),NULL); + else return (gen_e)st_fresh(get_sort_region(setst_sort),NULL); + } + break; + case flowrow_sort: + { + if (large) + return (gen_e)fv_fresh_large(get_sort_region(flowrow_sort),NULL); + else return (gen_e)fv_fresh(get_sort_region(flowrow_sort),NULL); + } + break; + case term_sort: + { + if (large) + return (gen_e)tv_fresh_large(get_sort_region(term_sort),NULL); + else return (gen_e)tv_fresh(get_sort_region(term_sort),NULL) + } + break; + default: + { + fail("Unmatched sort in get_proj_var\n"); + return NULL; + } + break; + } + + return NULL; +} + +static gen_e get_sort_zero(sort_kind s) +{ +switch (s) + { + case setif_sort: + return setif_zero(); + case setst_sort: + return setst_zero(); + case flowrow_sort: + return flowrow_zero(); + case term_sort: + return term_zero(); + default: + fail("Unmatched sort in get_sort_zero\n"); + return NULL; + } + return NULL; +} + +static gen_e get_sort_one(sort_kind s) +{ +switch (s) + { + case setif_sort: + return setif_one(); + case setst_sort: + return setst_one(); + case flowrow_sort: + return flowrow_one(); + case term_sort: + return term_one(); + default: + fail("Unmatched sort in get_sort_zero\n"); + return NULL; + } + return NULL; +} + +static region get_sort_region(sort s) +{ + switch (s) + { + case setif_sort: + return setif_region; + case setst_sort: + return setst_region; + case flowrow_sort: + return flowrow_region; + case term_sort: + return term_region: + default: + fail("Unmatched sort in get_sort_region\n"); + return NULL; + } + return NULL; +} + +static term_hash get_sort_hash(sort s) +{ + switch (s) + { + case setif_sort: + return setif_hash; + case setst_sort: + return setst_hash; + case flowrow_sort: + return flowrow_hash; + case term_sort: + return term_hash: + default: + fail("Unmatched sort in get_sort_hash\n"); + return NULL; + } + return NULL; +} + +constructor make_constructor(const char *name,sort_kind sort, sig_elt s[], + int arity) +{ + constructor c = ralloc(expr_region,struct constructor); + sig_elt *sig = rarrayalloc(expr_region,arity,sig_elt); + + c->type = new_type(); + + if (arity) + { + memcpy(sig,s,sizeof(sig_elt)*arity); + } + + if ( fixed_sort(sort) ) + failure("Specified sort does not allow constructor types\n"); + + c->sort = sort; + c->arity = arity; + c->name = rstrdup(expr_region,name); + c->sig = sig; + + return c; +} + +gen_e constructor_expr(constructor c, gen_e exps[], int arity) +{ + cons_expr result; + int i; + region sort_region = get_sort_region(c->sort); + term_hash sort_hash = get_sort_hash(c->sort); + + stamp *st = rarrayalloc(sort_region,arity + 1,stamp); + st[0] = c->type; + + // Dynamic arity check + if(arity != c->arity) + failure("Signature mismatch\n"); + + // Dynamic sort checks + for (i = 0; i < arity; i++) + { + if ( c->sig[i].sort != exps[i]->sort) + failure(stderr,"Signature mismatch\n"); + st[i+1] = exps[i]->st; + } + + // Hash-consing of terms + if (!(result = term_hash_find(sort_hash,st,arity+1)) || arity == 0 ) + { + gen_e *e = rarrayalloc(sort_region,arity,gen_e); + + if (arity) + memcpy(e,exps,sizeof(gen_e)*arity); + else + e = NULL; + + result = ralloc(sort_region,struct cons_expr); + result->type = st[0]; + result->st = stamp_fresh(); + result->sort = c->sort; + result->arity = c->arity; + result->name = c->name; + result->sig = c->sig; + result->exps = e; + + term_hash_insert(expr_hash,result,st,arity+1); + } + + return (gen_e)result; +} + +static gen_e proj_pat(constructor c, int i, gen_e e) +{ + proj_pat pat; + region sort_region = get_sort_region(e->sort); + term_hash sort_hash = get_sort_hash(e->sort); + + stamp s[3]; + s[0] = c->type + 1; + s[1] = e->st; + s[2] = i; + + if (! (pat = term_hash_find(sort_hash,s,3)) ) + { + pat = ralloc(sort_region,struct proj_pat); + pat->type = s[0]; + pat->st = stamp_fresh(); + pat->sort = c->sort; + pat->exp = e; + pat->variance = c->sig[i].variance; + pat->c = c; + pat->i = i; + term_hash_insert(sort_hash,pat,s,3); + } + + return (gen_e)pat; +} + +gen_e setif_proj_pat(constructor c,int i,gen_e e) +{ + return proj_pat(c,i,e); +} + +gen_e setst_proj_pat(constructor c, int i, gen_e e) +{ + return proj_pat(c,i,e); +} + +gen_e setif_proj(constructor c, int i, gen_e e) +{ + setif_var v; + gen_e proj_var, proj; + + gen_e nonspec_get_proj(gen_e_list arg1) + { + proj_pat pat; + gen_e_list_scanner scan; + gen_e temp; + + gen_e_list_scan(arg1,&scan); + while (gen_e_list_next(&scan,&temp)) + { + pat = (proj_pat)temp; + if ( pat_match(pat->type,c->type) && i == pat->i ) + return pat->exp; + } + return NULL; + } + + if (e->sort != setif_sort) + { + failure("Sort check : setif_proj\n"); + } + + else if (i < 0 || i > c->arity) + { + failure("Signature mismatch\n"); + } + + else if (setif_is_zero(e)) + return get_sort_zero(c->sig[i].sort); + + else if ( ((setif_term)e)->type == c->type ) + { + cons_expr constructed = (cons_expr)e; + return constructed->exps[i]; + } + + else if (setif_is_var(e)) + { + v = (setif_var)e; + if ( (proj = sv_get_ub_proj(v,nonspec_get_proj)) ) + { + return proj; + } + else + { + gen_e pat; + gen_e_list_scanner scan; + gen_e lb; + proj_var = get_proj_var(c->sig[i].sort,FALSE); + pat = setif_proj_pat(c,i,proj_var); + sv_add_ub_proj(sort_region,v,pat); + + gen_e_list_scan(sv_get_lbs(v),&scan); + while (gen_e_list_next(&scan,&lb)) + { + setif_inclusion(lb,pat); + } + return proj_var; + } + } + + else if (setif_is_union(e)) + { + if( (proj = nonspec_get_proj(setif_get_proj_cache(e))) ) + return proj; + else + { + gen_e pat; + proj_var = get_proj_var(c->sig[i].sort,FALSE); + pat = setif_proj_pat(c,i,proj_var); + + setif_set_proj_cache(e,pat); + + setif_inclusion(e,pat); + return proj_var; + } + } + else + { + gen_e pat; + proj_var = get_proj_var(c->sig[i].sort,FALSE); + pat = setif_proj_pat(c,i,proj_var); + setif_inclusion(e,pat); + return proj_var; + } +} + +gen_e setst_proj(constructor c, int i, gen_e e) +{ + setst_var v; + gen_e proj_var, proj; + + gen_e nonspec_get_proj(gen_e_list arg1) + { + proj_pat pat; + gen_e_list_scanner scan; + gen_e temp; + + gen_e_list_scan(arg1,&scan); + while (gen_e_list_next(&scan,&temp)) + { + pat = (proj_pat)temp; + if ( pat_match(pat->type,c->type) && i == pat->i ) + return pat->exp; + } + return NULL; + } + + if (e->sort != setst_sort) + { + failure("Sort check : setst_proj\n"); + } + + else if (i < 0 || i > c->arity) + { + failure("Signature mismatch\n"); + } + + else if (setst_is_zero(e)) + return get_sort_zero(c->sig[i].sort); + + else if ( ((setst_term)e)->type == c->type ) + { + cons_expr constructed = (cons_expr)e; + return constructed->exps[i]; + } + + else if (setst_is_var(e)) + { + v = (setst_var)e; + if ( (proj = sv_get_ub_proj(v,nonspec_get_proj)) ) + { + return proj; + } + else + { + gen_e pat; + gen_e_list_scanner scan; + gen_e lb; + proj_var = get_proj_var(c->sig[i].sort,FALSE); + pat = setst_proj_pat(c,i,proj_var); + sv_add_ub_proj(sort_region,v,pat); + + gen_e_list_scan(sv_get_lbs(v),&scan); + while (gen_e_list_next(&scan,&lb)) + { + setst_inclusion(lb,pat); + } + return proj_var; + } + } + + else if (setst_is_union(e)) + { + if( (proj = nonspec_get_proj(setst_get_proj_cache(e))) ) + return proj; + else + { + gen_e pat; + proj_var = get_proj_var(c->sig[i].sort,FALSE); + pat = setst_proj_pat(c,i,proj_var); + + setst_set_proj_cache(e,pat); + + setst_inclusion(e,pat); + return proj_var; + } + } + else + { + gen_e pat; + proj_var = get_proj_var(c->sig[i].sort,FALSE); + pat = setst_proj_pat(c,i,proj_var); + setst_inclusion(e,pat); + return proj_var; + } +} + +static void setif_con_match(gen_e e1, gen_e e2) +{ + // Case where e1 is a constructor expression and e2 is a proj_pat + if (pat_match(((setif_term)e2)->type,((setif_term)e1)->type)) + { + cons_expr c = (cons_expr)e1; + proj_pat p = (proj_pat)e2; + int i = p->i; + + if (c->sig[i].variance == vnc_pos) + call_inclusion_fn(c->exps[i],p->exp); + else if (c->sig[i].variance == vnc_neg) + call_inclusion_fn(p->exp,c->exps[i]); + else + call_unify_fn(c->exps[i],p->exp); + } + else if (setif_is_pat(e2)) + { + return; + } + + // Case where e1 and e2 are constructor expressions + else + { + cons_expr c1 = (cons_expr)e1, + c2 = (cons_expr)e2; + + if (c1->type != c2->type) + failure("Constructor mismatch\n"); + else + { + int i; + for (i = 0; i < c1->arity; i++) + { + if (c1->sig[i].variance == vnc_pos) + call_inclusion_fn(e1,e2); + else if (c1->sig[i].variance == vnc_neg) + call_inclusion_fn(e2,e1); + else + call_unify_fn(e1,e2); + } + + } + } +} + +static void setst_con_match(gen_e e1, gen_e e2) +{ + // Case where e1 is a constructor expression and e2 is a proj_pat + if (pat_match(((setst_term)e2)->type,((setst_term)e1)->type)) + { + cons_expr c = (cons_expr)e1; + proj_pat p = (proj_pat)e2; + int i = p->i; + + if (c->sig[i].variance == vnc_pos) + call_inclusion_fn(c->exps[i],p->exp); + else if (c->sig[i].variance == vnc_neg) + call_inclusion_fn(p->exp,c->exps[i]); + else + call_unify_fn(c->exps[i],p->exp); + } + else if (setst_is_pat(e2)) + { + return; + } + + // Case where e1 and e2 are constructor expressions + else + { + cons_expr c1 = (cons_expr)e1, + c2 = (cons_expr)e2; + + if (c1->type != c2->type) + failure("Constructor mismatch\n"); + else + { + int i; + for (i = 0; i < c1->arity; i++) + { + if (c1->sig[i].variance == vnc_pos) + call_inclusion_fn(e1,e2); + else if (c1->sig[i].variance == vnc_neg) + call_inclusion_fn(e2,e1); + else + call_unify_fn(e1,e2); + } + + } + } +} + +// given x <= proj(c,i,e) +// proj_merge(region,e,get_proj_i_arg,fresh_large_fn_ptr, +// sort_inclusion_fn_ptr,set_inclusion) +static bool nonspec_res_proj(set_var v1,gen_e e2) +{ + proj_pat projection_pat = (proj_pat)e2; + + gen_e setif_get_proj(gen_e_list arg1) + { + gen_e_list_scanner scan; + gen_e temp; + proj_pat pat; + + gen_e_list_scan(arg1,&scan); + while(gen_e_list_next(&scan,&temp)) + { + pat = (proj_pat)temp; + if ( pat->type == ((setif_term)e2)->type && + pat->i == ((proj_pat)e2)->i) + return pat->exp; + } + return NULL; + } + + gen_e fresh_large(void) + { + return get_proj_var( ((proj_pat)e2)->exp->sort,TRUE); + } + + bool sort_inclusion(gen_e e1, gen_e e2) + { + if ( projection_pat->variance == vnc_pos ) + return call_inclusion_fn(e1,e2); + else if ( projection_pat->variance == vnc_neg) + return call_inclusion_fn(e2,e1); + else + return call_unify_fn(e1,e2); + } + + gen_e proj_con(gen_e e) + { + return make_proj_pat( ((proj_pat)e2)->c, ((proj_pat)e2)->i,e); + } + + return setif_proj_merge(setif_region,v1,((proj_pat)e2)->exp, + setif_get_proj,proj_con, + fresh_large,sort_inclusion, + call_setif_inclusion); + +} + + +void call_setif_inclusion(gen_e e1,gen_e e2) +{ + setif_inclusion(setif_con_match,setif_res_proj,e1,e2); +} + +void call_setif_unify(gen_e e1, gen_e e2) +{ + setif_inclusion(setif_con_match,setif_res_proj,e1,e2); + setif_inclusion(setif_con_match,setif_res_proj,e2,e1); +} + +void call_setst_inclusion(gen_e e1, gen_e e2) +{ + setst_inclusion(setst_con_match,e1,e2); +} + +void call_setst_unify(gen_e e1, gen_e e2) +{ + setst_inclusion(setst_con_match,e1,e2); + setst_inclusion(setst_con_match,e2,e1); +} + +void call_flowrow_inclusion(gen_e e1,gen_e e2) +{ + + if ( (e1->sort != flowrow_sort) || (e2->sort != flowrow_sort) ) + failure("Constraint system is not well-sorted\n"); + + if ( flowrow_base_sort(e1) != flowrow_base_sert(e2)) + failure("Constraint system is not well-sorted\n"); + + + flowrow_inclusion(fresh,get_stamp,field_incl,zero_elem,e1,e2); +} + +void call_flowrow_unify(gen_e e1, gen_e e2) +{ + flowrow_inclusion(fresh,get_stamp,field_incl,zero_elem,e1,e2); + flowrow_inclusion(fresh,get_stamp,field_incl,zero_elem,e2,e1); +} + +static void term_con_match(gen_e e1, gen_e e2) +{ + cons_expr c1 = (cons_expr)e1, + c2 = (cons_expr)e2; + + if (c1->type != c2->type) + failure("Constructor mismatch\n"); + else + { + int i; + for (i = 0; i < c1->arity; i++) + { + call_unify_fn(e1,e2); + } + + } +} + +static void term_occurs(term_var v, gen_e e) +{ + gen_e ecr = term_get_ecr(e); + + if (((gen_term)ecr)->type == VAR_TYPE) + return ( term_get_stamp((gen_e)v) == term_get_stamp(e) ); + + else if (((gen_term)ecr)->type >= 10) + { + cons_expr c_e = (cons_expr) e; + int i; + for (int i = 0; i < arity; i++) + { + if (term_occurs(v,c->exps[i])) + return TRUE; + } + } + + return FALSE; +} + +void call_term_unify(gen_e e1, gen_e e2) +{ + term_unify(term_con_match,term_occurs,e1,e2); +} + +void call_term_cunify(gen_e e1, gen_e e2) +{ + term_cunify(term_con_match,term_occurs,e1,e2); +} + + +static void call_inclusion_fn(gen_e e1, gen_e e2) +{ + switch (e1->sort) + { + case sort_setif: + { + setif_inclusion(setif_con_match,setif_res_proj,e1,e2); + } + break; + case sort_setst: + { + setst_inclusion(setst_con_match,e1,e2); + } + break; + case sort_term: + { + term_unify(term_con_match,term_occurs,e1,e2); + } + break; + case sort_row: + { + /* TODO */ + flowrow_inclusion(fresh,get_stamp,field_incl,zero_elem,e1,e2); + } + break; + default : + fail("Unmatched sort in call inclusion\n"); + } +} + +static bool call_unify_fn(gen_e e1, gen_e e2) +{ + + switch (e1->sort) + { + case sort_setif: + { + setif_inclusion(setif_con_match,setif_res_proj,e1,e2); + setif_inclusion(setif_con_match,setif_res_proj,e2,e1); + } + break; + case sort_setst: + { + setst_inclusion(setst_con_match,e1,e2); + setst_inclusion(setst_con_match,e2,e1); + } + break; + case sort_term: + { + term_unify(term_con_match,term_occurs,e1,e2); + } + break; + case sort_row: + { + /* TODO */ + flowrow_inclusion(fresh,get_stamp,field_incl,zero_elem,e1,e2); + flowrow_inclusion(fresh,get_stamp,field_incl,zero_elem,e2,e1); + } + break; + default : + fail("Unmatched sort in call inclusion\n"); + } +} + +void nonspec_init(void) +{ + banshee_init(); + setif_init(); + setst_init(); + flowrow_init(); +} + +void nonspec_reset(void) +{ + flowrow_reset(); + setst_reset(); + setif_reset(); + banshee_reset(); +} + +void expr_print(FILE *f,gen_e e) +{ + +} diff --git a/libbanshee/engine/nonspec.h b/libbanshee/engine/nonspec.h new file mode 100644 index 00000000000..bfa3b9d8b90 --- /dev/null +++ b/libbanshee/engine/nonspec.h @@ -0,0 +1,189 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef NONSPEC_H +#define NONSPEC_H + +#include <stdio.h> + +EXTERN_C_BEGIN + +typedef enum +{ + vnc_pos, + vnc_neg, + vnc_non +} vnc_kind; + +struct sig_elt +{ + vnc_kind variance; + sort_kind sort; +}; + +typedef struct sig_elt sig_elt; +typedef struct constructor *constructor; + +struct flowrow_field +{ + char *label; + gen_e expr; +}; + +DECLARE_LIST(flowrow_map,flowrow_field) + +/* + Flags +*/ +extern bool flag_merge_projections; +extern bool flag_eliminate_cycles; +extern bool flag_occurs_check; + +/* + Operations for building terms +*/ + +/* Defines a new constructor for sort s with the given signature */ +constructor make_constructor(const char *name,sort_kind sort, sig_elt[], + int arity); + +/* Build the term c(exps[0]....exps[n]) */ +gen_e constructor_expr(constructor c, gen_e exps[], int arity); + +/* make a constant of sort s */ +gen_e setif_constant(const char *name); + +gen_e setst_constant(const char *name); + +gen_e term_constant(const char *name); + +/* Creates a projection pattern projpat(c,i,e) */ +gen_e setif_proj_pat(constructor c,int i,gen_e e); + +gen_e setst_proj_pat(constructor c, int i, gen_e e); + +/* Adds a constraint e <= projpat(c,i,fv) where fv is a fresh variable */ +gen_e setif_proj(constructor c, int i, gen_e e); + +gen_e setst_proj(constructor c, int i, gen_e e); + +/* Make a new variable of sort s */ +gen_e setif_fresh(const char *name); + +gen_e term_fresh(const char *name); + +gen_e flowrow_fresh(const char *name); + +gen_e setst_fresh(const char *name); + +/* Operations for unions */ + +gen_e setif_union(gen_e exps[]); + +gen_e setif_inter(gen_e exps[]); + +gen_e setst_union(gen_e exps[]); + +gen_e setst_inter(gen_e exps[]); + +/* Empty set of sort s */ +gen_e setif_zero(void); + +gen_e setst_zero(void); + +gen_e flowrow_zero(sort_kind base_sort); + +gen_e term_zero(void); + +/* Universal set of sort s */ +gen_e setif_one(void); + +gen_e setst_one(void); + +gen_e flowrow_one(sort_kind base_sort); + +gen_e term_one(void); + +/* + Operations for building flowrows +*/ + +/* Closed flowrow of base sort s */ +gen_e flowrow_abs(sort_kind base_sort); + +/* Wild flowrow of base sort s */ +gen_e flowrow_wild(sort_kind base_sort); + +/* Build a flowrow of <l : e>_fields o <rest> */ +gen_e flowrow_row(flowrow_map fields, gen_e rest); + +/* + Inclusion functions +*/ +void call_setif_inclusion(gen_e e1,gen_e e2); +void call_setif_unify(gen_e e1, gen_e e2); + +void call_setst_inclusion(gen_e e1, gen_e e2); +void call_setst_unify(gen_e e1, gen_e e2); + +void call_flowrow_inclusion(gen_e e1,gen_e e2); +void call_flowrow_unify(gen_e e1, gen_e e2); + +void call_term_unify(gen_e e1, gen_e e2); +void call_term_cunify(gen_e e1, gen_e e2); + +/* + Extracting solutions + */ +struct decon +{ + int arity; + gen_e[1]; +}; +struct decon deconstruct_expr(constructor c,gen_e e); + +gen_e_list setif_tlb(gen_e e); + +gen_e_list setst_tlb(gen_e e); + +gen_e term_get_ecr(gen_e e); + +gen_e flowrow_extract_field(const char *label,gen_e row); +flowrow_map flowrow_extract_fields(gen_e row); +gen_e flowrow_extract_rest(gen_e row); + +void nonspec_init(void); +void nonspec_reset(void); + +void expr_print(FILE *f,gen_e e); + +EXTERN_C_END + +#endif /* NONSPEC_H */ diff --git a/libbanshee/engine/setif-sort.c b/libbanshee/engine/setif-sort.c new file mode 100644 index 00000000000..0350018dfcd --- /dev/null +++ b/libbanshee/engine/setif-sort.c @@ -0,0 +1,1141 @@ +/* + * Copyright (c) 2000-2004 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <assert.h> +#include <setjmp.h> +#include "regions.h" +#include "bounds.h" +#include "jcollection.h" +#include "setif-sort.h" +#include "util.h" + +bool flag_eliminate_cycles = TRUE; +bool flag_merge_projections = TRUE; + +struct setif_union_ /* extends gen_e */ +{ +#ifdef NONSPEC + sort_kind sort; +#endif + int type; + stamp st; + gen_e_list exprs; + gen_e_list proj_cache; +}; + +struct setif_inter_ /* extends gen_e */ +{ +#ifdef NONSPEC + sort_kind sort; +#endif + int type; + stamp st; + gen_e_list exprs; +}; + +struct setif_constant_ /* extends gen_e */ +{ +#ifdef NONSPEC + sort_kind sort; +#endif + int type; + stamp st; + char *name; +}; + +typedef struct setif_inter_ *setif_inter_; +typedef struct setif_union_ *setif_union_; +typedef struct setif_constant_ *setif_constant_; + +static setif_var_list setif_vars; +static region tlb_cache_region; +static setif_var_list tlb_var_cache; +static jcoll_dict tlb_dict; + + +region setif_region; +term_hash setif_hash; +struct setif_stats setif_stats; + +stamp setif_get_stamp(gen_e e) +{ +#ifdef NONSPEC + assert(e->sort == setif_sort); +#endif + + if ( ((setif_term)e)->type == VAR_TYPE) + return sv_get_stamp( (setif_var)e ); + + else + return ((setif_term)e)->st; +} + +static void tlv_lower_aux(jmp_buf buf,stamp st, gen_e e) +{ + if ( setif_is_var(e) && (setif_get_stamp(e) > st) ) + longjmp(buf,1); + + else if (setif_is_union(e)) + { + gen_e temp; + gen_e_list exprs = ((setif_union_)e)->exprs; + gen_e_list_scanner scan; + + gen_e_list_scan(exprs,&scan); + while (gen_e_list_next(&scan,&temp)) + tlv_lower_aux(buf,st,temp); + } +} + +static bool tlv_lower(stamp st, gen_e e) +{ + jmp_buf buf; + int higher; + + higher = setjmp(buf); + if (higher) + return FALSE; + + tlv_lower_aux(buf,st,e); + + return TRUE; +} + +static void invalidate_tlb_cache(void) deletes +{ + assert(tlb_cache_region); + + setif_var_list_app(tlb_var_cache,sv_clear_tlb_cache); + jcoll_delete_dict(tlb_dict); + deleteregion_ptr(&tlb_cache_region); + + tlb_cache_region = newregion(); + tlb_dict = jcoll_create_dict(tlb_cache_region,setif_get_stamp); + tlb_var_cache = new_setif_var_list(tlb_cache_region); +} + +static void set_tlb_cache(setif_var v, jcoll j) +{ + setif_var_list_cons(v,tlb_var_cache); + sv_set_tlb_cache(v,j); +} + +/* + A constraint e1 <= e2 is l-inductive iff e2 is a variable x and + for each y in tlv(e1), stamp(y) < stamp(x) +*/ +static bool l_inductive(gen_e e1, gen_e e2) +{ + if (setif_is_var(e2) && tlv_lower(setif_get_stamp(e2), e1)) + return TRUE; + + else return FALSE; +} + +/* + A constraint e1 <= e2 is r-inductive iff e1 is a variable x and + for each y in tlv(e2), stamp(y) < stamp(x) +*/ +static bool r_inductive(gen_e e1, gen_e e2) +{ + if (setif_is_var(e1) && tlv_lower(setif_get_stamp(e1), e2)) + return TRUE; + + else return FALSE; +} + +static bool eq(gen_e e1, gen_e e2) +{ + return ( setif_get_stamp(e1) == setif_get_stamp(e2) ); +} + +gen_e_list setif_get_union(gen_e e) +{ + assert ( ((setif_term)e)->type == UNION_TYPE); + + return ( (setif_union_) e)->exprs; +} + +gen_e_list setif_get_inter(gen_e e) +{ + assert ( ((setif_term)e)->type == INTER_TYPE); + + return ( (setif_inter_) e)->exprs; +} + +static setif_var_list search_ubs(region r, setif_var v1, setif_var goal) +{ + bool found = FALSE; + setif_var_list cycle; + + static void search_ubs_aux(setif_var v) + { + assert(! found); + + if (sv_eq (v, goal)) + { + found = TRUE; + return; + } + else if (sv_lt(v,goal)) + { + return; + } + else + { + gen_e_list_scanner scan; + gen_e ub; + gen_e_list ubs = sv_get_ubs(v); + + gen_e_list_scan(ubs,&scan); + while (gen_e_list_next(&scan,&ub)) + { + if (setif_is_var(ub)) + { + search_ubs_aux((setif_var)ub); + if (found) + { + setif_var_list_cons(v,cycle); + return; + } + } + } + } + } + + found = FALSE; + cycle = new_setif_var_list(r); + search_ubs_aux(v1); + + return cycle; +} + +static setif_var_list search_lbs(region r, setif_var v1, setif_var goal) +{ + bool found; + setif_var_list cycle; + + static void search_lbs_aux(setif_var v) + { + assert (! found); + if (sv_eq(v,goal)) + { + found = TRUE; + return; + } + else if (sv_lt(v,goal)) + { + return; + } + else + { + gen_e_list_scanner scan; + gen_e lb; + gen_e_list lbs = sv_get_lbs(v); + + gen_e_list_scan(lbs,&scan); + while (gen_e_list_next(&scan,&lb)) + { + if (setif_is_var(lb)) + { + search_lbs_aux((setif_var)lb); + if (found) + { + setif_var_list_cons(v,cycle); + return; + } + } + } + } + + } + + found = FALSE; + cycle = new_setif_var_list(r); + search_lbs_aux(v1); + + return cycle; +} + +static setif_var_list cycle_detect(region r,setif_var v1,setif_var v2) +{ + if (sv_union_component(v1,v2)) + return new_setif_var_list(r); + + else + { + setif_stats.cycles_searched_forward++; + return search_ubs(r, v2, v1); + } +} + + +static setif_var_list cycle_detect_rev(region r, setif_var v1, setif_var v2) +{ + if (sv_union_component(v1,v2)) + return new_setif_var_list(r); + + else + { + setif_stats.cycles_searched_backward++; + return search_lbs(r, v1, v2); + } +} + +void setif_inclusion(con_match_fn_ptr con_match, res_proj_fn_ptr res_proj, + gen_e e1, gen_e e2) deletes +{ + + static void collapse_cycle_lower(region r, setif_var witness, + setif_var_list cycle) deletes + { + gen_e lb; + gen_e_list_scanner scan_bounds; + setif_var_list_scanner scan_cycle; + setif_var var; + +#ifndef NDEBUG + stamp lowest = sv_get_stamp(witness); +#endif + bounds b = bounds_create(r); + + /* Collect all lower bounds in the cycle, and add transitive edges */ + setif_var_list_scan(cycle,&scan_cycle); + while(setif_var_list_next(&scan_cycle,&var)) + { + assert( sv_get_stamp(var) > lowest); + gen_e_list_scan(sv_get_lbs(var),&scan_bounds); + while(gen_e_list_next(&scan_bounds,&lb)) + bounds_add(b,lb,setif_get_stamp(lb)); + } + + sv_unify(witness,cycle); + assert(sv_get_stamp(witness) == lowest); + + gen_e_list_scan(bounds_exprs(b),&scan_bounds); + while (gen_e_list_next(&scan_bounds,&lb)) + setif_inclusion(con_match,res_proj,lb, (gen_e) witness); + + bounds_delete(b); + invalidate_tlb_cache(); + + setif_stats.cycles_collapsed_backward++; + setif_stats.cycles_length_backward += setif_var_list_length(cycle); + } + + static void collapse_cycle_upper(region r, setif_var witness, + setif_var_list cycle) deletes + { + gen_e ub; + gen_e_list_scanner scan_bounds; + setif_var_list_scanner scan_cycle; + setif_var var; + +#ifndef NDEBUG + stamp lowest = sv_get_stamp(witness); +#endif + bounds b = bounds_create(r); + + /* Collect all upper bounds in the cycle, and add transitive edges */ + setif_var_list_scan(cycle,&scan_cycle); + while(setif_var_list_next(&scan_cycle,&var)) + { + assert( sv_get_stamp(var) > lowest); + + gen_e_list_scan(sv_get_ubs(var),&scan_bounds); + while(gen_e_list_next(&scan_bounds,&ub)) + bounds_add(b,ub,setif_get_stamp(ub)); + + gen_e_list_scan(sv_get_ub_projs(var),&scan_bounds); + while(gen_e_list_next(&scan_bounds,&ub)) + bounds_add(b,ub,setif_get_stamp(ub)); + } + + sv_unify(witness,cycle); + assert(sv_get_stamp(witness) == lowest); + + gen_e_list_scan(bounds_exprs(b),&scan_bounds); + while (gen_e_list_next(&scan_bounds,&ub)) + setif_inclusion(con_match,res_proj,(gen_e) witness, ub); + + bounds_delete(b); + invalidate_tlb_cache(); + + setif_stats.cycles_collapsed_forward++; + setif_stats.cycles_length_backward += setif_var_list_length(cycle); + } + + static void update_lower_bound(setif_var v, gen_e e) deletes + { + if (sv_add_lb(v,e,setif_get_stamp(e))) + { + if (setif_is_var(e)) + setif_stats.redundant_succ++; + + else + setif_stats.redundant_source++; + } + + else + { + gen_e_list_scanner scan; + gen_e ub; + + if (setif_is_var(e)) + setif_stats.added_succ++; + else + setif_stats.added_source++; + + invalidate_tlb_cache(); + + gen_e_list_scan(sv_get_ubs(v),&scan); + while(gen_e_list_next(&scan,&ub)) + setif_inclusion(con_match,res_proj,e,ub); + + gen_e_list_scan(sv_get_ub_projs(v),&scan); + while (gen_e_list_next(&scan,&ub)) + setif_inclusion(con_match,res_proj,e,ub); + + } + + } + + static void update_upper_bound(setif_var v, gen_e e) deletes + { + if (sv_add_ub(v,e,setif_get_stamp(e))) + { + if (setif_is_var(e)) + setif_stats.redundant_pred++; + + else + setif_stats.redundant_sink++; + } + + else + { + gen_e_list_scanner scan; + gen_e lb; + + if (setif_is_var(e)) + setif_stats.added_pred++; + else + setif_stats.added_sink++; + + invalidate_tlb_cache(); + + gen_e_list_scan(sv_get_lbs(v),&scan); + while (gen_e_list_next(&scan,&lb)) + setif_inclusion(con_match,res_proj,lb,e); + + } + + } + + + if (eq(e1,e2)) + return; + + else if ( setif_is_zero(e1) || setif_is_one(e2) ) + return; + + /* c <= d */ + else if ( setif_is_constant(e1) && setif_is_constant(e2) ) + { + + failure("Inconsistent system of constraints\n"); + return; + } + + else if ( setif_is_union(e1) ) + { + gen_e_list_scanner scan; + gen_e temp; + + gen_e_list exprs = setif_get_union(e1); + + gen_e_list_scan(exprs,&scan); + while (gen_e_list_next(&scan,&temp)) + { + setif_inclusion(con_match,res_proj,temp,e2); + } + + return; + } + + else if ( setif_is_inter(e2) ) + { + gen_e_list_scanner scan; + gen_e temp; + + gen_e_list exprs = setif_get_inter(e2); + + gen_e_list_scan(exprs,&scan); + while (gen_e_list_next(&scan,&temp)) + { + setif_inclusion(con_match,res_proj,e1,temp); + } + + return; + } + + else if ( l_inductive(e1,e2) ) /* _ <= 'x */ + { + setif_var v2 = ((setif_var)e2); + + if (setif_is_var(e1)) + { + setif_var v1 = ((setif_var)e1); + + if (flag_eliminate_cycles) + { + region scratch = newregion(); + setif_var_list cycle = cycle_detect(scratch,v1,v2); + + if (! setif_var_list_empty(cycle)) + collapse_cycle_upper(scratch,v1,cycle); + else + update_lower_bound(v2,e1); + + deleteregion(scratch); + } + + else + update_lower_bound(v2,e1); + } + else /* e1 is a source */ + update_lower_bound(v2,e1); + } + + else if ( r_inductive(e1,e2) ) /* 'x <= _ */ + { + setif_var v1 = ((setif_var)e1); + + if (setif_is_var(e2)) + { + setif_var v2 = ((setif_var)e2); + + if (flag_eliminate_cycles) + { + region scratch = newregion(); + setif_var_list cycle = cycle_detect_rev(scratch,v1,v2); + + if (! setif_var_list_empty(cycle)) + collapse_cycle_lower(scratch,v2,cycle); + else + update_upper_bound(v1,e2); + + deleteregion(scratch); + } + + else + update_upper_bound(v1,e2); + } + else /* e2 is a sink */ + { + if (flag_merge_projections && res_proj(v1,e2)) + return; + else + update_upper_bound(v1,e2); + } + } + + else /* c(...) <= c(...) or c(...) <= projpat(c,i,e) */ + { + con_match(e1,e2); + return; + } + +} + +#ifdef NONSPEC +static struct setif_term zero = {setif_sort,ZERO_TYPE,ZERO_TYPE}; +static struct setif_term one = {setif_sort,ONE_TYPE,ONE_TYPE}; +#else +static struct setif_term zero = {ZERO_TYPE,ZERO_TYPE}; +static struct setif_term one = {ONE_TYPE,ONE_TYPE}; +#endif /* NONSPEC */ + +gen_e setif_zero(void) +{ + return (gen_e)&zero; +} + +gen_e setif_one(void) +{ + return (gen_e)&one; +} + +gen_e setif_fresh(const char *name) +{ + setif_var result = sv_fresh(setif_region,name); + setif_var_list_cons(result,setif_vars); + + setif_stats.fresh++; + return (gen_e)result; +} + +gen_e setif_fresh_large(const char *name) +{ + setif_var result = sv_fresh_large(setif_region,name); + setif_var_list_cons(result,setif_vars); + + setif_stats.fresh_large++; + return (gen_e)result; +} + +gen_e setif_fresh_small(const char *name) +{ + setif_var result = sv_fresh_small(setif_region,name); + setif_var_list_cons(result,setif_vars); + + setif_stats.fresh_small++; + return (gen_e)result; +} + +gen_e setif_constant(const char *str) deletes +{ + stamp st[2]; + gen_e result; + char *name = rstrdup(setif_region,str); + + assert (str != NULL); + + st[0] = CONSTANT_TYPE; + st[1] = stamp_string(name); + + if ( (result = term_hash_find(setif_hash,st,2)) == NULL) + { + setif_constant_ c = ralloc(setif_region, struct setif_constant_); +#ifdef NONSPEC + c->sort = setif_sort; +#endif + c->type = CONSTANT_TYPE; + c->st = stamp_fresh(); + c->name = name; + + result = (gen_e) c; + term_hash_insert(setif_hash,result,st,2); + + setif_stats.distinct_constants++; + + return result; + } + + else + { + setif_stats.hashed_constants++; + return result; + } +} + +static bool filter_zero(const gen_e e) +{ + return (!setif_is_zero(e)); +} + + +static bool filter_one(const gen_e e) +{ + return (!setif_is_one(e)); +} + +gen_e setif_union(gen_e_list exprs) deletes +{ + gen_e_list filtered = gen_e_list_filter(setif_region,exprs,filter_zero); + + if ( gen_e_list_empty(filtered) ) + { + setif_stats.filtered_unions++; + return setif_zero(); + } + else if (gen_e_list_length(filtered) == 1) + { + setif_stats.filtered_unions++; + return gen_e_list_head(filtered); + } + + else + { + int i = 0; + gen_e temp,result; + gen_e_list_scanner scan; + stamp st[ gen_e_list_length(filtered) + 1 ]; + + st[0] = UNION_TYPE; + + gen_e_list_scan(filtered,&scan); + while (gen_e_list_next(&scan,&temp)) + { + st[++i] = setif_get_stamp(temp); + } + + if ( (result = + term_hash_find(setif_hash,st,gen_e_list_length(filtered)+1)) + == NULL ) + { + struct setif_union_ *u = ralloc(setif_region,struct setif_union_); + + u->type = UNION_TYPE; + u->st = stamp_fresh(); + u->proj_cache = new_gen_e_list(setif_region); + u->exprs = filtered; + + result = (gen_e)u; + term_hash_insert(setif_hash,result,st,gen_e_list_length(filtered)+1); + + setif_stats.distinct_unions++; + return result; + } + else + { + setif_stats.hashed_unions++; + return result; + } + } +} + +gen_e setif_inter(gen_e_list exprs) deletes +{ + gen_e_list filtered = gen_e_list_filter(setif_region,exprs,filter_one); + + if ( gen_e_list_empty(filtered) ) + { + setif_stats.filtered_intersections++; + return setif_one(); + } + else if (gen_e_list_length(filtered) == 1) + { + setif_stats.filtered_intersections++; + return gen_e_list_head(filtered); + } + + else + { + int i = 0; + gen_e temp,result; + gen_e_list_scanner scan; + stamp st[ gen_e_list_length(filtered) + 1 ]; + + st[0] = INTER_TYPE; + + gen_e_list_scan(filtered,&scan); + while (gen_e_list_next(&scan,&temp)) + { + st[++i] = setif_get_stamp(temp); + } + + if ( (result = + term_hash_find(setif_hash,st,gen_e_list_length(filtered)+1)) + == NULL ) + { + struct setif_inter_ *u = ralloc(setif_region,struct setif_inter_); + + u->type = UNION_TYPE; + u->st = stamp_fresh(); + u->exprs = filtered; + + result = (gen_e)u; + term_hash_insert(setif_hash,result,st,gen_e_list_length(filtered)+1); + + setif_stats.distinct_intersections++; + + return result; + } + else + { + setif_stats.hashed_intersections++; + return result; + } + } +} + +bool setif_is_zero(gen_e e) +{ + return ((setif_term)e)->type == ZERO_TYPE; +} + +bool setif_is_one(gen_e e) +{ + return ((setif_term)e)->type == ONE_TYPE; +} + +bool setif_is_var(gen_e e) +{ + return ((setif_term)e)->type == VAR_TYPE; +} + +bool setif_is_union(gen_e e) +{ + return ((setif_term)e)->type == UNION_TYPE; +} + +bool setif_is_inter(gen_e e) +{ + return ((setif_term)e)->type == INTER_TYPE; +} + +bool setif_is_constant(gen_e e) +{ + return ((setif_term)e)->type == CONSTANT_TYPE; +} + +char *setif_get_constant_name(gen_e e) +{ + assert( ((setif_term)e)->type == CONSTANT_TYPE ); + + return ((setif_constant_)e)->name; +} + +void setif_init(void) +{ + setif_region = newregion(); + tlb_cache_region = newregion(); + setif_vars = new_setif_var_list(setif_region); + tlb_var_cache = new_setif_var_list(tlb_cache_region); + setif_hash = make_term_hash(setif_region); + tlb_dict = jcoll_create_dict(tlb_cache_region,setif_get_stamp); +} + + + +static void setif_reset_stats(void) +{ + setif_stats.fresh = 0; + setif_stats.fresh_small = 0; + setif_stats.fresh_large = 0; + + setif_stats.distinct_constructors = 0; + setif_stats.hashed_constructors = 0; + setif_stats.distinct_constants = 0; + setif_stats.hashed_constants = 0; + setif_stats.distinct_unions = 0; + setif_stats.filtered_unions = 0; + setif_stats.hashed_unions = 0; + setif_stats.distinct_intersections = 0; + setif_stats.filtered_intersections = 0; + setif_stats.hashed_intersections = 0; + + setif_stats.redundant_pred = 0; + setif_stats.redundant_succ = 0; + setif_stats.redundant_source = 0; + setif_stats.redundant_sink = 0; + + setif_stats.added_pred = 0; + setif_stats.added_succ = 0; + setif_stats.added_source = 0; + setif_stats.added_sink = 0; + + setif_stats.cycles_searched_forward = 0; + setif_stats.cycles_searched_backward = 0; + + setif_stats.cycles_collapsed_forward = 0; + setif_stats.cycles_collapsed_backward = 0; + + setif_stats.cycles_length_forward = 0; + setif_stats.cycles_length_backward = 0; +} + +void setif_reset(void) deletes +{ + term_hash_delete(setif_hash); + invalidate_tlb_cache(); + deleteregion_ptr(&setif_region); + deleteregion_ptr(&tlb_cache_region); + + setif_reset_stats(); + + setif_region = newregion(); + tlb_cache_region = newregion(); + setif_vars = new_setif_var_list(setif_region); + tlb_var_cache = new_setif_var_list(tlb_cache_region); + setif_hash = make_term_hash(setif_region); +} + +static jcoll tlb_aux(gen_e e) +{ + if (setif_is_var(e)) + { + setif_var v = (setif_var)e; + + if ( sv_get_tlb_cache(v) != NULL) + return sv_get_tlb_cache(v); + + else + { + jcoll result; + gen_e_list sources = new_gen_e_list(tlb_cache_region); + jcoll_list jvars = new_jcoll_list(tlb_cache_region); + gen_e_list_scanner scan; + gen_e lb; + + gen_e_list_scan(sv_get_lbs(v),&scan); + while (gen_e_list_next(&scan,&lb)) + { + if (setif_is_var(lb)) + jcoll_list_cons(tlb_aux(lb),jvars); + else + gen_e_list_cons(lb,sources); + /* jsources = jcoll_jcons(tlb_cache_region,lb,jsources); */ + } + + if (! gen_e_list_empty(sources)) + jcoll_list_cons(jcoll_create_chain(tlb_dict,sources),jvars); + + result = + jcoll_jjoin(tlb_dict,jvars); + + set_tlb_cache(v,result); + return result; + } + } + else if (setif_is_union(e)) + { + gen_e_list_scanner scan; + gen_e temp; + jcoll_list jexprs = new_jcoll_list(tlb_cache_region); + + gen_e_list_scan(setif_get_union(e),&scan); + while (gen_e_list_next(&scan,&temp)) + { + jcoll_list_cons(tlb_aux(temp),jexprs); + } + + return jcoll_jjoin(tlb_dict,jexprs); + + } + else + { + failure("Unmatched case in setif tlb computation\n"); + return NULL; + } +} + +gen_e_list setif_tlb(gen_e e) deletes +{ + return jcoll_flatten(tlb_dict,tlb_aux(e)); +} + +void setif_set_proj_cache(gen_e e,gen_e elem) +{ + if (setif_is_union(e)) + { + setif_union_ u = (setif_union_)e; + gen_e_list_cons(elem,u->proj_cache); + } +} + +gen_e_list setif_get_proj_cache(gen_e e) +{ + if (setif_is_union(e)) + { + setif_union_ u = (setif_union_)e; + return u->proj_cache; + } + else + { + failure("Term does not cache projections\n"); + return NULL; + } +} + + +bool setif_proj_merge(setif_var v, gen_e se, get_proj_fn_ptr get_proj, + proj_con_fn_ptr proj_con,fresh_large_fn_ptr fresh_large, + incl_fn_ptr sort_incl, incl_fn_ptr set_incl) deletes +{ + gen_e proj; + + if ((proj = sv_get_ub_proj(v,get_proj)) != NULL) + { + sort_incl(proj, se); + return TRUE; + } + + else + { + gen_e_list_scanner scan; + gen_e lb; + + gen_e proj_var; + gen_e proj_cons; + + /* create a projection variable for this projection */ + proj_var = fresh_large(NULL); + + assert(setif_is_var(proj_var)); + + proj_cons = proj_con(proj_var); + + sv_add_ub_proj(v, proj_cons); + + /* apply the transitive rule to each of v's lower bounds */ + gen_e_list_scan(sv_get_lbs(v),&scan); + while (gen_e_list_next(&scan,&lb)) + { + set_incl(lb,proj_cons); + } + + sort_incl(proj_var, se); + return TRUE; + } + +} + + +void setif_print_stats(FILE *f) +{ + fprintf(f,"\n========== SetIF Var Stats ==========\n"); + fprintf(f,"Fresh : %d\n",setif_stats.fresh); + fprintf(f,"Fresh Small : %d\n",setif_stats.fresh_small); + fprintf(f,"Fresh Large : %d\n",setif_stats.fresh_large); + fprintf(f,"Total : %d\n",setif_stats.fresh + setif_stats.fresh_small + + setif_stats.fresh_large); + fprintf(f,"\n========== SetIF Sort Stats ==========\n"); + fprintf(f,"\n"); + fprintf(f,"\n------------------------------\n"); + fprintf(f,"Additions"); + fprintf(f,"\n------------------------------\n"); + fprintf(f,"Pred: %d\n",setif_stats.added_pred); + fprintf(f,"Succ: %d\n",setif_stats.added_succ); + fprintf(f,"Source: %d\n",setif_stats.added_source); + fprintf(f,"Sink: %d",setif_stats.added_sink); + fprintf(f,"\n------------------------------\n"); + fprintf(f,"Total: %d",setif_stats.added_pred + setif_stats.added_succ + + setif_stats.added_source + setif_stats.added_sink); + fprintf(f,"\n"); + fprintf(f,"\n------------------------------\n"); + fprintf(f,"Redundant"); + fprintf(f,"\n------------------------------\n"); + fprintf(f,"Pred: %d\n",setif_stats.redundant_pred); + fprintf(f,"Succ: %d\n",setif_stats.redundant_succ); + fprintf(f,"Source: %d\n",setif_stats.redundant_source); + fprintf(f,"Sink: %d",setif_stats.redundant_sink); + fprintf(f,"\n------------------------------\n"); + fprintf(f,"Total: %d\n", + setif_stats.redundant_pred+setif_stats.redundant_succ+setif_stats.redundant_source+setif_stats.redundant_sink); + + fprintf(f,"\n"); + + fprintf(f,"\n------------------------------\n"); + fprintf(f,"Forward Cycles"); + fprintf(f,"\n------------------------------\n"); + fprintf(f,"Collapsed: %d\n",setif_stats.cycles_collapsed_forward); + fprintf(f,"Searched: %d\n",setif_stats.cycles_searched_forward); + fprintf(f,"Hit rate: %f\n", + ((float)setif_stats.cycles_collapsed_forward)/((float)setif_stats.cycles_searched_forward)); + fprintf(f,"Average Length: %f\n", + 1+((float)setif_stats.cycles_length_forward) / ((float)setif_stats.cycles_collapsed_forward)); + fprintf(f,"\n"); + fprintf(f,"\n------------------------------\n"); + fprintf(f,"Reverse Cycles"); + fprintf(f,"\n------------------------------\n"); + fprintf(f,"Collapsed: %d\n",setif_stats.cycles_collapsed_backward); + fprintf(f,"Searched: %d\n",setif_stats.cycles_searched_backward); + fprintf(f,"Hit rate: %f\n", + ((float)setif_stats.cycles_collapsed_backward)/((float)setif_stats.cycles_searched_backward)); + fprintf(f,"Average Length: %f\n", + 1+((float)setif_stats.cycles_length_backward) / ((float)setif_stats.cycles_collapsed_backward)); + fprintf(f,"=====================================\n"); +} + +/* + for now, print stamps and types for sources and sinks. + must eventually rely on specialized code +*/ +void setif_print_constraint_graph(FILE *f) +{ + setif_var_list_scanner scan; + gen_e_list_scanner scan_edges; + gen_e edge; + setif_var v; + dot_node n1,n2; + char temp_str[512]; + + graph_attr graph_style[3] = {{g_size,"\"8.5,11\""}, + {g_center,"true"}, + {g_orientation,"portrait"}}; + edge_attr succ_edge[1] = {{e_style,"solid"}}; + edge_attr pred_edge[1] = {{e_style,"dotted"}}; + + dot_start(f,"setif",TRUE,TRUE); + dot_global_graph_style(graph_style,3); + + setif_var_list_scan(setif_vars,&scan); + while(setif_var_list_next(&scan,&v)) + { + snprintf(temp_str,512,"%s:%ld",sv_get_name(v),sv_get_stamp(v)); + n1 = dot_get_node(temp_str); + gen_e_list_scan(sv_get_lbs(v),&scan_edges); + while(gen_e_list_next(&scan_edges,&edge)) + { + if (setif_is_var(edge)) + { + snprintf(temp_str,512,"%s:%ld",sv_get_name((setif_var)edge), + setif_get_stamp(edge)); + n2 = dot_get_node(temp_str); + } + else + { + snprintf(temp_str,512,"source:%ld",setif_get_stamp(edge)); + n2 = dot_get_node(temp_str); + } + dot_styled_edge(n2,n1,pred_edge,1); + } + + gen_e_list_scan(sv_get_ubs(v),&scan_edges); + while(gen_e_list_next(&scan_edges,&edge)) + { + if (setif_is_var(edge)) + { + snprintf(temp_str,512,"%s:%ld",sv_get_name((setif_var)edge), + setif_get_stamp(edge)); + n2 = dot_get_node(temp_str); + } + else + { + snprintf(temp_str,512,"sink:%ld",setif_get_stamp(edge)); + n2 = dot_get_node(temp_str); + } + dot_styled_edge(n1,n2,succ_edge,1); + } + + gen_e_list_scan(sv_get_ub_projs(v),&scan_edges); + while(gen_e_list_next(&scan_edges,&edge)) + { + snprintf(temp_str,512,"projpat:%ld",setif_get_stamp(edge)); + n2 = dot_get_node(temp_str); + dot_styled_edge(n1,n2,succ_edge,1); + } + + } + + dot_end(); +} + diff --git a/libbanshee/engine/setif-sort.h b/libbanshee/engine/setif-sort.h new file mode 100644 index 00000000000..7cfabc81752 --- /dev/null +++ b/libbanshee/engine/setif-sort.h @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef SETIF_SORT_H +#define SETIF_SORT_H + +#include <stdio.h> +#include "banshee.h" +#include "termhash.h" +#include "setif-var.h" + + +struct setif_term /* extends gen_e */ +{ +#ifdef NONSPEC + const sort_kind sort; +#endif + const int type; + const stamp st; +}; + +typedef struct setif_term *setif_term; + +extern bool flag_merge_projections; +extern bool flag_eliminate_cycles; + +extern region setif_region; +extern term_hash setif_hash; + +typedef bool (*res_proj_fn_ptr) (setif_var v,gen_e proj) deletes; +typedef gen_e (*proj_con_fn_ptr) (gen_e) deletes; + +stamp setif_get_stamp(gen_e e); +void setif_inclusion(con_match_fn_ptr,res_proj_fn_ptr, gen_e, gen_e) deletes; + +bool setif_proj_merge(setif_var v, gen_e se, get_proj_fn_ptr get_proj, + proj_con_fn_ptr make_proj,fresh_large_fn_ptr fresh_var, + incl_fn_ptr sort_incl, incl_fn_ptr set_incl) deletes; + +gen_e setif_zero(void); +gen_e setif_one(void); +gen_e setif_fresh(const char *name); +gen_e setif_fresh_large(const char *name); +gen_e setif_fresh_small(const char *name); +gen_e setif_constant(const char *name) deletes; +gen_e setif_union(gen_e_list exprs) deletes; +gen_e setif_inter(gen_e_list exprs) deletes; +bool setif_is_zero(gen_e e); +bool setif_is_one(gen_e e); +bool setif_is_var(gen_e e); +bool setif_is_union(gen_e e); +bool setif_is_inter(gen_e e); +bool setif_is_constant(gen_e e); +char *setif_get_constant_name(gen_e e); + +gen_e_list setif_get_union(gen_e e); +gen_e_list setif_get_inter(gen_e e); + +gen_e_list setif_tlb(gen_e e) deletes; + +void setif_set_proj_cache(gen_e e, gen_e elem); +gen_e_list setif_get_proj_cache(gen_e e); + +void setif_init(void); +void setif_reset(void) deletes; +void setif_print_stats(FILE *f); +void setif_print_constraint_graph(FILE *f); + +extern struct setif_stats setif_stats; +struct setif_stats +{ + int fresh; + int fresh_small; + int fresh_large; + + int distinct_constructors; + int hashed_constructors; + int distinct_constants; + int hashed_constants; + int distinct_unions; + int filtered_unions; + int hashed_unions; + int distinct_intersections; + int filtered_intersections; + int hashed_intersections; + + int redundant_pred; + int redundant_succ; + int redundant_source; + int redundant_sink; + + int added_pred; + int added_succ; + int added_source; + int added_sink; + + int cycles_searched_forward; + int cycles_searched_backward; + + int cycles_collapsed_forward; + int cycles_collapsed_backward; + + int cycles_length_forward; + int cycles_length_backward; +}; + +#endif /* SETIF_SORT_H */ diff --git a/libbanshee/engine/setif-var.c b/libbanshee/engine/setif-var.c new file mode 100644 index 00000000000..21941021284 --- /dev/null +++ b/libbanshee/engine/setif-var.c @@ -0,0 +1,226 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <stdio.h> +#include <assert.h> +#include <regions.h> +#include "setif-var.h" +#include "ufind.h" +#include "bounds.h" + +struct sv_info +{ + stamp st; + bounds sameregion lbs; + bounds sameregion ubs; + jcoll tlb_cache; + gen_e_list ub_projs; + const char *name; + uf_element component; +}; + +typedef struct sv_info *sv_info; + +DECLARE_UFIND(sv_elt,sv_info) + +DEFINE_UFIND(sv_elt,sv_info) + +DEFINE_LIST(setif_var_list,setif_var) + +#define get_info(v) (sv_elt_get_info((v)->elt)) + +struct setif_var /* extends gen_e */ +{ +#ifdef NONSPEC + sort_kind sort; +#endif + int type; + sv_elt elt; +}; + +bool sv_lt(setif_var v1, setif_var v2) +{ + return ( sv_get_stamp(v1) < sv_get_stamp(v2) ); +} + +bool sv_eq(setif_var v1, setif_var v2) +{ + return ( sv_get_stamp(v1) == sv_get_stamp(v2) ); +} + +static setif_var make_var(region r, const char *name, stamp st) +{ + setif_var result = ralloc(r, struct setif_var); + sv_info info = ralloc(r, struct sv_info); + + info->st = st; + info->lbs = bounds_create(r); + info->ubs = bounds_create(r); + info->tlb_cache = NULL; + info->ub_projs = new_gen_e_list(r); + info->name = name ? rstrdup(r,name) : "fv"; + info->component = new_uf_element(r, NULL); + + result->type = VAR_TYPE; + result->elt = new_sv_elt(r,info); + +#ifdef NONSPEC + result->sort = setif_sort; +#endif + + return result; +} + +setif_var sv_fresh(region r, const char *name) +{ + return make_var(r,name,stamp_fresh()); +} + +setif_var sv_fresh_large(region r, const char *name) +{ + return make_var(r,name,stamp_fresh_large()); +} + +setif_var sv_fresh_small(region r, const char *name) +{ + return make_var(r,name,stamp_fresh_small()); +} + +stamp sv_get_stamp(setif_var v) +{ + return get_info(v)->st; +} + +const char *sv_get_name(setif_var v) +{ + return get_info(v)->name; +} + + +static sv_info combine(sv_info i1, sv_info i2) +{ + if (i1->st < i2->st) + return i1; + else return i2; +} + +void sv_unify(setif_var v,setif_var_list vars) +{ + setif_var temp; + setif_var_list_scanner scan; + + setif_var_list_scan(vars,&scan); + + while (setif_var_list_next(&scan,&temp)) + { + sv_elt_unify(combine,v->elt,temp->elt); + } +} + +gen_e_list sv_get_lbs(setif_var v) +{ + return bounds_exprs(get_info(v)->lbs); +} + +gen_e_list sv_get_ubs(setif_var v) +{ + return bounds_exprs(get_info(v)->ubs); +} + +bool sv_add_ub(setif_var v, gen_e e, stamp s) +{ + return bounds_add(get_info(v)->ubs,e,s); +} + +bool sv_add_lb(setif_var v, gen_e e, stamp s) +{ + return bounds_add(get_info(v)->lbs,e,s); +} + +bool sv_is_ub(setif_var v, stamp s) +{ + bool self_edge = sv_get_stamp(v) == s, + in_bounds = bounds_query(get_info(v)->ubs,s); + + return (self_edge || in_bounds); +} + +bool sv_is_lb(setif_var v, stamp s) +{ + + bool self_edge = sv_get_stamp(v) == s, + in_bounds = bounds_query(get_info(v)->lbs,s); + + return (self_edge || in_bounds); + +} + +void sv_add_ub_proj(setif_var v, gen_e e) +{ + gen_e_list_cons(e,get_info(v)->ub_projs); +} + +gen_e sv_get_ub_proj(setif_var v, get_proj_fn_ptr get_proj) +{ + return get_proj(get_info(v)->ub_projs); +} + +gen_e_list sv_get_ub_projs(setif_var v) +{ + return get_info(v)->ub_projs; +} + + +bool sv_union_component(setif_var v1, setif_var v2) +{ + if (uf_eq(get_info(v1)->component,get_info(v2)->component)) + return FALSE; + + else + { + uf_union(get_info(v1)->component,get_info(v2)->component); + return TRUE; + } +} + +void sv_set_tlb_cache(setif_var v, jcoll j) +{ + get_info(v)->tlb_cache = j; +} + +jcoll sv_get_tlb_cache(setif_var v) +{ + return get_info(v)->tlb_cache; +} + +void sv_clear_tlb_cache(setif_var v) +{ + get_info(v)->tlb_cache = NULL; +} diff --git a/libbanshee/engine/setif-var.h b/libbanshee/engine/setif-var.h new file mode 100644 index 00000000000..6ded7363ae2 --- /dev/null +++ b/libbanshee/engine/setif-var.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef SETIF_VAR_H +#define SETIF_VAR_H + +#include "linkage.h" +#include "banshee.h" +#include "jcollection.h" + +EXTERN_C_BEGIN + +typedef struct setif_var *setif_var; + +DECLARE_LIST(setif_var_list,setif_var) + +bool sv_lt(setif_var v1, setif_var v2); +bool sv_eq(setif_var v1, setif_var v2); +setif_var sv_fresh(region r, const char *name); +setif_var sv_fresh_large(region r, const char *name); +setif_var sv_fresh_small(region r, const char *name); +stamp sv_get_stamp(setif_var v); +const char *sv_get_name(setif_var v); +void sv_unify(setif_var v,setif_var_list vars); +gen_e_list sv_get_lbs(setif_var v); +gen_e_list sv_get_ubs(setif_var v); +bool sv_add_ub(setif_var v, gen_e e, stamp st); +bool sv_add_lb(setif_var v, gen_e e, stamp st); +bool sv_is_ub(setif_var v, stamp st); +bool sv_is_lb(setif_var v, stamp st); +void sv_set_tlb_cache(setif_var v, jcoll j); +jcoll sv_get_tlb_cache(setif_var v); +void sv_clear_tlb_cache(setif_var v); +void sv_add_ub_proj(setif_var v, gen_e e); + +gen_e sv_get_ub_proj(setif_var v, get_proj_fn_ptr get_proj); +gen_e_list sv_get_ub_projs(setif_var v); + +bool sv_union_component(setif_var v1, setif_var v2); + +EXTERN_C_END + + +#endif /* SETIF_VAR_H */ + diff --git a/libbanshee/engine/setst-sort.c b/libbanshee/engine/setst-sort.c new file mode 100644 index 00000000000..5d89817ed00 --- /dev/null +++ b/libbanshee/engine/setst-sort.c @@ -0,0 +1,907 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <regions.h> +#include <assert.h> +#include <stdio.h> +#include "bounds.h" +#include "setst-sort.h" + + +struct setst_union_ +{ +#ifdef NONSPEC + sort_kind sort; +#endif + int type; + stamp st; + gen_e_list exprs; + gen_e_list proj_cache; +}; + +struct setst_inter_ +{ +#ifdef NONSPEC + sort_kind sort; +#endif + int type; + stamp st; + gen_e_list exprs; +}; + +struct setst_constant_ +{ +#ifdef NONSPEC + sort_kind sort; +#endif + int type; + stamp st; + char *name; +}; + +typedef struct setst_inter_ *setst_inter_; +typedef struct setst_union_ *setst_union_; +typedef struct setst_constant_ *setst_constant_; + +static region tlb_cache_region; +static jcoll_dict tlb_dict; +static setst_var_list setst_vars; +static bool setst_changed = FALSE; + +region setst_region; +term_hash setst_hash; +struct setst_stats setst_stats; + +stamp setst_get_stamp(gen_e e) +{ +#ifdef NONSPEC + assert(e->sort == setst_sort); +#endif + + if ( ((setst_term)e)->type == VAR_TYPE) + return st_get_stamp( (setst_var)e ); + + else + return ((setst_term)e)->st; +} + +static bool eq(gen_e e1, gen_e e2) +{ + return ( setst_get_stamp(e1) == setst_get_stamp(e2) ); +} + +static gen_e_list get_union(gen_e e) +{ + assert ( ((setst_term)e)->type == UNION_TYPE); + + return ( (setst_union_) e)->exprs; +} + +static gen_e_list get_inter(gen_e e) +{ + assert ( ((setst_term)e)->type == INTER_TYPE); + + return ( (setst_inter_) e)->exprs; +} + +static void update_lower_bound(setst_var v, gen_e e) +{ + if (setst_is_var(e)) + { + if (st_add_lb(v,(setst_var)e)) + { + setst_stats.redundant_var++; + } + else + { + setst_stats.added_var++; + setst_changed = TRUE; + } + } + else + { + if (st_add_source(v, e,setst_get_stamp(e))) + { + setst_stats.redundant_source++; + } + else + { + setst_stats.added_source++; + setst_changed = TRUE; + } + } + +} + +static void update_upper_bound(setst_var v, gen_e e) +{ + assert(! setst_is_var(e)); + + if (st_add_sink(v,e,setst_get_stamp(e))) + { + setst_stats.redundant_sink++; + } + else + { + setst_stats.added_sink++; + setst_changed = TRUE; + } +} + + +void setst_inclusion(con_match_fn_ptr con_match,gen_e e1, gen_e e2) +{ + if (eq(e1,e2)) + return; + + else if ( setst_is_zero(e1) || setst_is_one(e2) ) + return; + + else if (setst_is_union(e1)) + { + gen_e_list_scanner scan; + gen_e temp; + + gen_e_list exprs = get_union(e1); + + gen_e_list_scan(exprs,&scan); + while (gen_e_list_next(&scan,&temp)) + { + setst_inclusion(con_match,temp,e2); + } + + return; + } + + else if (setst_is_inter(e2)) + { + gen_e_list_scanner scan; + gen_e temp; + + gen_e_list exprs = get_inter(e2); + + gen_e_list_scan(exprs,&scan); + while (gen_e_list_next(&scan,&temp)) + { + setst_inclusion(con_match,e1,temp); + } + + return; + } + + else if (setst_is_var(e2)) + { + setst_var v = (setst_var)e2; + + update_lower_bound(v,e1); + } + + else if (setst_is_var(e1)) + { + setst_var v = (setst_var)e1; + + update_upper_bound(v,e2); + } + + else con_match(e1,e2); +} + +#ifdef NONSPEC +static struct setst_term zero = {ZERO_TYPE,setst_sort,ZERO_TYPE}; +static struct setst_term one = {ONE_TYPE,setst_sort,ONE_TYPE}; +#else +static struct setst_term zero = {ZERO_TYPE,ZERO_TYPE}; +static struct setst_term one = {ONE_TYPE,ONE_TYPE}; +#endif /* NONSPEC */ + +gen_e setst_zero(void) +{ + return (gen_e)&zero; +} + +gen_e setst_one(void) +{ + return (gen_e)&one; +} + +gen_e setst_fresh(const char *name) +{ + setst_var v = st_fresh(setst_region,name); + setst_var_list_cons(v,setst_vars); + return (gen_e)v; +} + +gen_e setst_fresh_large(const char *name) +{ + setst_var v = st_fresh_large(setst_region,name); + setst_var_list_cons(v,setst_vars); + return (gen_e)v; +} + +gen_e setst_fresh_small(const char *name) +{ + setst_var v = st_fresh_small(setst_region,name); + setst_var_list_cons(v,setst_vars); + return (gen_e)v; +} + +gen_e setst_constant(const char *str) deletes +{ + stamp st[2]; + gen_e result; + char *name = rstrdup(setst_region,str); + + assert (str != NULL); + + st[0] = CONSTANT_TYPE; + st[1] = stamp_string(name); + + if ( (result = term_hash_find(setst_hash,st,2)) == NULL) + { + setst_constant_ c = ralloc(setst_region, struct setst_constant_); + c->type = CONSTANT_TYPE; + c->st = stamp_fresh(); + c->name = name; + + result = (gen_e) c; + term_hash_insert(setst_hash,result,st,2); + + setst_stats.distinct_constants++; + + return result; + } + + else + { + setst_stats.hashed_constants++; + return result; + } +} + +static bool filter_zero(const gen_e e) +{ + return (!setst_is_zero(e)); +} + + +static bool filter_one(const gen_e e) +{ + return (!setst_is_one(e)); +} + +gen_e setst_union(gen_e_list exprs) deletes +{ + gen_e_list filtered = gen_e_list_filter(setst_region,exprs,filter_zero); + + if ( gen_e_list_empty(filtered) ) + { + setst_stats.filtered_unions++; + return setst_zero(); + } + else if (gen_e_list_length(filtered) == 1) + { + setst_stats.filtered_unions++; + return gen_e_list_head(filtered); + } + + else + { + int i = 0; + gen_e temp,result; + gen_e_list_scanner scan; + stamp st[ gen_e_list_length(filtered) + 1 ]; + + st[0] = UNION_TYPE; + + gen_e_list_scan(filtered,&scan); + while (gen_e_list_next(&scan,&temp)) + { + st[++i] = setst_get_stamp(temp); + } + + if ( (result = + term_hash_find(setst_hash,st,gen_e_list_length(filtered)+1)) + == NULL ) + { + struct setst_union_ *u = ralloc(setst_region,struct setst_union_); + + u->type = UNION_TYPE; + u->st = stamp_fresh(); + u->proj_cache = new_gen_e_list(setst_region); + u->exprs = filtered; + + result = (gen_e)u; + term_hash_insert(setst_hash,result,st,gen_e_list_length(filtered)+1); + + setst_stats.distinct_unions++; + return result; + } + else + { + setst_stats.hashed_unions++; + return result; + } + } +} + +gen_e setst_inter(gen_e_list exprs) deletes +{ + gen_e_list filtered = gen_e_list_filter(setst_region,exprs,filter_one); + + if ( gen_e_list_empty(filtered) ) + { + setst_stats.filtered_intersections++; + return setst_one(); + } + else if (gen_e_list_length(filtered) == 1) + { + setst_stats.filtered_intersections++; + return gen_e_list_head(filtered); + } + + else + { + int i = 0; + gen_e temp,result; + gen_e_list_scanner scan; + stamp st[ gen_e_list_length(filtered) + 1 ]; + + st[0] = INTER_TYPE; + + gen_e_list_scan(filtered,&scan); + while (gen_e_list_next(&scan,&temp)) + { + st[++i] = setst_get_stamp(temp); + } + + if ( (result = + term_hash_find(setst_hash,st,gen_e_list_length(filtered)+1)) + == NULL ) + { + struct setst_inter_ *u = ralloc(setst_region,struct setst_inter_); + + u->type = UNION_TYPE; + u->st = stamp_fresh(); + u->exprs = filtered; + + result = (gen_e)u; + term_hash_insert(setst_hash,result,st,gen_e_list_length(filtered)+1); + + setst_stats.distinct_intersections++; + + return result; + } + else + { + setst_stats.hashed_intersections++; + return result; + } + } +} + + +gen_e_list setst_get_union(gen_e e) +{ + assert (((setst_term)e)->type == UNION_TYPE); + + return ((setst_union_)e)->exprs; +} + + +gen_e_list setst_get_inter(gen_e e) +{ + assert (((setst_term)e)->type == INTER_TYPE); + + return ((setst_inter_)e)->exprs; +} + +static void invalidate_tlb_cache(void) +{ + assert(tlb_cache_region); + + jcoll_delete_dict(tlb_dict); + setst_var_list_app(setst_vars,st_clear_tlb_cache); + deleteregion_ptr(&tlb_cache_region); + + tlb_cache_region = newregion(); + tlb_dict = jcoll_create_dict(tlb_cache_region,setst_get_stamp); +} + +static void set_tlb_cache(setst_var v,jcoll j) +{ + st_set_tlb_cache(v,j); +} + +static void collect_sinks(bounds b,setst_var v) +{ + gen_e sink; + gen_e_list_scanner scan; + + gen_e_list_scan(st_get_sinks(v),&scan); + + while (gen_e_list_next(&scan,&sink)) + { + bounds_add(b,sink,setst_get_stamp(sink)); + } +} + +static void collect_sources(bounds b, setst_var v) +{ + gen_e source; + gen_e_list_scanner scan; + + gen_e_list_scan(st_get_sources(v),&scan); + + while (gen_e_list_next(&scan,&source)) + { + bounds_add(b,source,setst_get_stamp(source)); + } +} + +static void collect_lower_bounds(bounds b, setst_var v) +{ + setst_var lb; + setst_var_list_scanner scan; + + setst_var_list_scan(st_get_lbs(v),&scan); + + while (setst_var_list_next(&scan,&lb)) + { + bounds_add(b,(gen_e)lb,st_get_stamp(lb)); + } +} + +static void apply_sources(setst_var witness, bounds sources) +{ + gen_e source; + gen_e_list_scanner scan; + + gen_e_list_scan(bounds_exprs(sources),&scan); + + while (gen_e_list_next(&scan,&source)) + { + if ( st_add_source(witness,source,setst_get_stamp(source))) + setst_stats.redundant_source++; + + else + setst_stats.added_source++; + } +} + +static void apply_sinks(setst_var witness, bounds sinks) +{ + gen_e sink; + gen_e_list_scanner scan; + + gen_e_list_scan(bounds_exprs(sinks),&scan); + + while (gen_e_list_next(&scan,&sink)) + { + if (st_add_sink(witness,sink,setst_get_stamp(sink))) + setst_stats.redundant_sink++; + + else + setst_stats.added_sink++; + } +} + +static void apply_lower_bounds(setst_var witness,bounds lower) +{ + gen_e lb; + gen_e_list_scanner scan; + + gen_e_list_scan(bounds_exprs(lower),&scan); + + while (gen_e_list_next(&scan,&lb)) + { + if (st_add_lb(witness,(setst_var)lb)) + setst_stats.redundant_var++; + else + setst_stats.added_var++; + } +} + +static void collapse_cycle(setst_var witness, setst_var_list cycle) deletes +{ + setst_var_list_scanner var_scan; + setst_var temp; + region scratch_rgn = newregion(); + + bounds sources = bounds_create(scratch_rgn); + bounds sinks = bounds_create(scratch_rgn); + bounds lower = bounds_create(scratch_rgn); + + + setst_stats.cycles_collapsed++; + + /* force at least another iteration */ + setst_changed = TRUE; + + /* collect all bounds */ + setst_var_list_scan(cycle,&var_scan); + while (setst_var_list_next(&var_scan,&temp)) + { + collect_sources(sources,temp); + collect_sinks(sinks,temp); + collect_lower_bounds(lower,temp); + } + + /* unify all vars */ + st_unify(witness,cycle); + + /* add all bounds back */ + apply_sources(witness,sources); + apply_sinks(witness,sinks); + apply_lower_bounds(witness,lower); + + /* cleanup */ + bounds_delete(sources); + bounds_delete(sinks); + bounds_delete(lower); + deleteregion(scratch_rgn); + + /* remove self edges */ + st_repair_bounds(witness); +} +/* +static bool cycle_detect(setst_var goal, setst_var_list path, + setst_var_list *result) +{ + int pos = st_get_path_pos(goal); + setst_stats.cycles_searched++; + + if (pos) + { + setst_var_list_scanner scan; + setst_var temp; + setst_var_list cycle = new_setst_var_list(tlb_cache_region); + + setst_var_list_scan(path,&scan); + while(setst_var_list_next(&scan,&temp)) + { + if (st_get_path_pos(temp) >= pos) + setst_var_list_cons(temp,cycle); + } + + *result = cycle; + return TRUE; + } + + else + return FALSE; +} + +*/ +static bool cycle_detect(setst_var goal, setst_var_list path, + setst_var_list *result) +{ + setst_var_list cycle = + setst_var_list_reverse(setst_var_list_copy(tlb_cache_region,path)); + + setst_stats.cycles_searched++; + + while (!setst_var_list_empty(cycle) && + !eq((gen_e)setst_var_list_head(cycle),(gen_e)goal)) + { + setst_var_list_tail(cycle); + } + + if (setst_var_list_empty(cycle)) + { + return FALSE; + } + else + { + *result = cycle; + return TRUE; + } +} + +static jcoll tlb_aux(gen_e e,int path_len,setst_var_list path) deletes +{ + if (setst_is_var(e)) + { + setst_var_list cycle; + setst_var v = (setst_var)e; + if ( cycle_detect(v,path,&cycle) ) + { + setst_stats.cycles_length += setst_var_list_length(cycle); + collapse_cycle(v,cycle); + return NULL; + } + else + { + if (st_get_tlb_cache(v) != NULL) + return st_get_tlb_cache(v); + else + { + jcoll result; + setst_var_list_scanner scan; + setst_var lb; + jcoll_list jvars = new_jcoll_list(tlb_cache_region); + + gen_e_list sources = gen_e_list_copy(tlb_cache_region, + st_get_sources(v)); + + st_set_path_pos(v,path_len); + setst_var_list_scan(st_get_lbs(v),&scan); + while (setst_var_list_next(&scan,&lb)) + { + setst_var_list_cons(v,path); + jcoll_list_cons(tlb_aux((gen_e)lb,++path_len,path), + jvars); + setst_var_list_tail(path); + } + + if (! gen_e_list_empty(sources)) + jcoll_list_cons(jcoll_create_chain(tlb_dict,sources), + jvars); + result = jcoll_jjoin(tlb_dict,jvars); + set_tlb_cache(v,result); + st_set_path_pos(v,0); + return result; + } + + } + } + else if (setst_is_union(e)) + { + gen_e_list_scanner scan; + gen_e temp; + jcoll_list jexprs = new_jcoll_list(tlb_cache_region); + + gen_e_list_scan(setst_get_union(e),&scan); + while (gen_e_list_next(&scan,&temp)) + { + jcoll_list_cons(tlb_aux(temp,++path_len,path),jexprs); + } + + return jcoll_jjoin(tlb_dict,jexprs); + } + else + { + fail("Unmatched case in setst tlb computation\n"); + return NULL; + } +} +static gen_e_list tlb(gen_e e) +{ + return jcoll_flatten(tlb_dict, + tlb_aux(e,1,new_setst_var_list(tlb_cache_region)) ); +} +static void match_sinks(incl_fn_ptr setst_incl) +{ + gen_e_list_scanner tlb_scanner, sink_scanner; + setst_var_list_scanner var_scanner; + setst_var v; + gen_e lb, sink; + + setst_var_list_scan(setst_vars,&var_scanner); + + while (setst_var_list_next(&var_scanner,&v)) + { + gen_e_list tlbs = tlb((gen_e)v); + gen_e_list snks = st_get_sinks(v); + + + if(gen_e_list_empty(st_get_sinks(v))) + { + setst_stats.no_sinks++; + continue; + } + else if(st_get_seen(v)) + { + setst_stats.incycle_vars++; + continue; + } + else if (gen_e_list_length(tlbs) == st_get_src_sz(v) + && gen_e_list_length(snks) == st_get_snk_sz(v) ) + { + setst_stats.unchanged_vars++; + continue; + } + st_set_seen(v,TRUE); + + st_set_src_sz(v,gen_e_list_length(tlbs)); + st_set_snk_sz(v,gen_e_list_length(snks)); + + gen_e_list_scan(tlbs,&tlb_scanner); + + while (gen_e_list_next(&tlb_scanner,&lb)) + { + gen_e_list_scan(snks,&sink_scanner); + + while (gen_e_list_next(&sink_scanner,&sink)) + setst_incl(lb,sink); + } + } +} +static void iterate(incl_fn_ptr setst_incl) +{ + setst_var_list_scanner var_scanner; + setst_var v; + /* static int iterations = 0; */ + setst_changed = FALSE; + + setst_var_list_scan(setst_vars,&var_scanner); + while (setst_var_list_next(&var_scanner,&v)) + { + st_set_seen(v,FALSE); + } + + invalidate_tlb_cache(); + match_sinks(setst_incl); + + /* fprintf(stderr,"Iterations : %d\n",++iterations); */ + + if (setst_changed) + iterate(setst_incl); +} +gen_e_list setst_tlb(gen_e e,incl_fn_ptr setst_incl) deletes +{ + if (! setst_changed) + { + return tlb(e); + } + else + { + iterate(setst_incl); + return tlb(e); + } + +} + +void setst_set_proj_cache(gen_e e, gen_e elem) +{ + if (setst_is_union(e)) + { + setst_union_ u = (setst_union_)e; + gen_e_list_cons(elem,u->proj_cache); + } +} + +gen_e_list setst_get_proj_cache(gen_e e) +{ + + if (setst_is_union(e)) + { + setst_union_ u = (setst_union_)e; + return u->proj_cache; + } + else + { + fail("Term does not cache projections\n"); + return NULL; + } +} + +void setst_init(void) +{ + setst_region = newregion(); + tlb_cache_region = newregion(); + setst_hash = make_term_hash(setst_region); + setst_vars = new_setst_var_list(setst_region); + tlb_dict = jcoll_create_dict(tlb_cache_region,setst_get_stamp); +} + +void setst_reset(void) deletes +{ + term_hash_delete(setst_hash); + deleteregion_ptr(&setst_region); + + setst_region = newregion(); + setst_hash = make_term_hash(setst_region); + setst_vars = new_setst_var_list(setst_region); + invalidate_tlb_cache(); + setst_changed = FALSE; +} + +bool setst_is_zero(gen_e e) +{ + return ((setst_term)e)->type == ZERO_TYPE; +} + +bool setst_is_one(gen_e e) +{ + return ((setst_term)e)->type == ONE_TYPE; +} + +bool setst_is_var(gen_e e) +{ + return ((setst_term)e)->type == VAR_TYPE; +} + +bool setst_is_union(gen_e e) +{ + return ((setst_term)e)->type == UNION_TYPE; +} + +bool setst_is_inter(gen_e e) +{ + return ((setst_term)e)->type == INTER_TYPE; +} + +char *setst_get_constant_name(gen_e e) +{ + assert( ((setst_term)e)->type == CONSTANT_TYPE ); + + return ((setst_constant_)e)->name; +} + +void setst_print_stats(FILE *f) +{ + fprintf(f,"\n========== SetST Var Stats ==========\n"); + fprintf(f,"Fresh : %d\n",setst_stats.fresh); + fprintf(f,"Fresh Small : %d\n",setst_stats.fresh_small); + fprintf(f,"Fresh Large : %d\n",setst_stats.fresh_large); + fprintf(f,"Total : %d\n",setst_stats.fresh + setst_stats.fresh_small + + setst_stats.fresh_large); + fprintf(f,"\n========== SetST Sort Stats ==========\n"); + fprintf(f,"\n"); + fprintf(f,"\n------------------------------\n"); + fprintf(f,"Additions"); + fprintf(f,"\n------------------------------\n"); + fprintf(f,"Var: %d\n",setst_stats.added_var); + fprintf(f,"Source: %d\n",setst_stats.added_source); + fprintf(f,"Sink: %d",setst_stats.added_sink); + fprintf(f,"\n------------------------------\n"); + fprintf(f,"Total: %d",setst_stats.added_var + setst_stats.added_source + + setst_stats.added_sink); + fprintf(f,"\n"); + fprintf(f,"\n------------------------------\n"); + fprintf(f,"Redundant"); + fprintf(f,"\n------------------------------\n"); + fprintf(f,"Var: %d\n",setst_stats.redundant_var); + fprintf(f,"Source: %d\n",setst_stats.redundant_source); + fprintf(f,"Sink: %d",setst_stats.redundant_sink); + fprintf(f,"\n------------------------------\n"); + fprintf(f,"Total: %d\n", + setst_stats.redundant_var + setst_stats.redundant_source + + setst_stats.redundant_sink); + + fprintf(f,"\n"); + fprintf(f,"\n------------------------------\n"); + fprintf(f,"Iteration Optimizations"); + fprintf(f,"\n------------------------------\n"); + fprintf(f,"Skipped vars: %d\n",setst_stats.incycle_vars); + fprintf(f,"Unchanged vars: %d\n",setst_stats.unchanged_vars); + fprintf(f,"Vars w/o sinks: %d\n",setst_stats.no_sinks); + fprintf(f,"\n------------------------------\n"); + fprintf(f,"Cycles"); + fprintf(f,"\n------------------------------\n"); + fprintf(f,"Collapsed: %d\n",setst_stats.cycles_collapsed); + fprintf(f,"Searched: %d\n",setst_stats.cycles_searched); + fprintf(f,"Hit rate: %f\n", + ((float)setst_stats.cycles_collapsed)/((float)setst_stats.cycles_searched)); + fprintf(f,"Average Length: %f\n", + ((float)setst_stats.cycles_length) / ((float)setst_stats.cycles_collapsed)); + fprintf(f,"=====================================\n"); +} + diff --git a/libbanshee/engine/setst-sort.h b/libbanshee/engine/setst-sort.h new file mode 100644 index 00000000000..5aafde398cf --- /dev/null +++ b/libbanshee/engine/setst-sort.h @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef SETST_SORT_H +#define SETST_SORT_H + +#include "banshee.h" +#include "termhash.h" +#include "setst-var.h" + +extern region setst_region; +extern term_hash setst_hash; + +struct setst_term /* extends gen_e */ +{ +#ifdef NONSPEC + sort_kind sort; +#endif + int type; + stamp st; +}; + +typedef struct setst_term *setst_term; + +stamp setst_get_stamp(gen_e e); +void setst_inclusion(con_match_fn_ptr,gen_e, gen_e); + +gen_e setst_zero(void); +gen_e setst_one(void); +gen_e setst_fresh(const char *name); +gen_e setst_fresh_large(const char *name); +gen_e setst_fresh_small(const char *name); +gen_e setst_constant(const char *name) deletes; +gen_e setst_union(gen_e_list exprs) deletes; +gen_e setst_inter(gen_e_list exprs) deletes; +bool setst_is_zero(gen_e e); +bool setst_is_one(gen_e e); +bool setst_is_var(gen_e e); +bool setst_is_union(gen_e e); +bool setst_is_inter(gen_e e); +bool setst_is_constant(gen_e e); + +char *setst_get_constant_name(gen_e e); +gen_e_list setst_get_union(gen_e e); +gen_e_list setst_get_inter(gen_e e); + +gen_e_list setst_tlb(gen_e e,incl_fn_ptr setst_incl) deletes; +void setst_set_proj_cache(gen_e e, gen_e elem); +gen_e_list setst_get_proj_cache(gen_e e); + + +void setst_init(void); +void setst_reset(void) deletes; +void setst_print_stats(FILE *f); + +extern struct setst_stats setst_stats; + +struct setst_stats +{ + int fresh; + int fresh_large; + int fresh_small; + + int distinct_constructors; + int hashed_constructors; + int distinct_constants; + int hashed_constants; + int distinct_unions; + int filtered_unions; + int hashed_unions; + int distinct_intersections; + int filtered_intersections; + int hashed_intersections; + + int redundant_var; + int redundant_source; + int redundant_sink; + + int added_var; + int added_source; + int added_sink; + + int incycle_vars; + int unchanged_vars; + int no_sinks; + + int cycles_searched; + int cycles_collapsed; + int cycles_length; +}; + + +#endif /* SETST_SORT_H */ + diff --git a/libbanshee/engine/setst-var.c b/libbanshee/engine/setst-var.c new file mode 100644 index 00000000000..ba4c59eb5de --- /dev/null +++ b/libbanshee/engine/setst-var.c @@ -0,0 +1,249 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <stdio.h> +#include <assert.h> +#include <regions.h> +#include "setst-var.h" +#include "jcollection.h" +#include "ufind.h" +#include "bounds.h" + +struct st_info +{ + stamp st; + bounds lbs; + bounds sources; + bounds sinks; + jcoll tlb_cache; + const char *name; + bool seen; + int path_pos; + int src_sz; + int snk_sz; +}; + +typedef struct st_info *st_info; + +DECLARE_UFIND(st_elt,st_info) + +DEFINE_UFIND(st_elt,st_info) + +DEFINE_LIST(setst_var_list,setst_var) + +#define get_info(v) (st_elt_get_info((v)->elt)) + +struct setst_var /* extends gen_e */ +{ +#ifdef NONSPEC + sort_kind sort; +#endif + int type; + st_elt elt; +}; + +bool st_eq(setst_var v1, setst_var v2) +{ + return (st_get_stamp(v1) == st_get_stamp(v2)); +} + +static setst_var make_var(region r, const char *name, stamp st) +{ + setst_var result = ralloc(r,struct setst_var); + st_info info = ralloc(r, struct st_info); + + info->st = st; + info->lbs = bounds_create(r); + info->sources = bounds_create(r); + info->sinks = bounds_create(r); + info->tlb_cache = NULL; + info->name = name ? rstrdup(r,name) : "fv"; + info->seen = FALSE; + info->path_pos = 0; + info->src_sz = 0; + info->snk_sz = 0; + + result->type = VAR_TYPE; + result->elt = new_st_elt(r,info); + + +#ifdef NONSPEC + result->sort = setst_sort; +#endif + + return result; +} + +setst_var st_fresh(region r, const char *name) +{ + return make_var(r,name,stamp_fresh()); +} + +setst_var st_fresh_large(region r, const char *name) +{ + return make_var(r,name,stamp_fresh_large()); +} + +setst_var st_fresh_small(region r, const char *name) +{ + return make_var(r,name,stamp_fresh_small()); +} + +stamp st_get_stamp(setst_var v) +{ + return get_info(v)->st; +} + +const char *st_get_name(setst_var v) +{ + return get_info(v)->name; +} + +void st_unify(setst_var v,setst_var_list vars) +{ + setst_var temp; + setst_var_list_scanner scan; + + setst_var_list_scan(vars,&scan); + + while (setst_var_list_next(&scan,&temp)) + { + st_elt_union(v->elt,temp->elt); + } +} + +setst_var_list st_get_lbs(setst_var v) +{ + return (setst_var_list)bounds_exprs(get_info(v)->lbs); +} + +gen_e_list st_get_sources(setst_var v) +{ + return bounds_exprs(get_info(v)->sources); +} + +gen_e_list st_get_sinks(setst_var v) +{ + return bounds_exprs(get_info(v)->sinks); +} + +bool st_add_lb(setst_var v, setst_var lb) +{ + return bounds_add(get_info(v)->lbs,(gen_e)lb,st_get_stamp(lb)); +} + +bool st_add_source(setst_var v, gen_e source, stamp s) +{ + return bounds_add(get_info(v)->sources,source,s); +} + +bool st_add_sink(setst_var v, gen_e sink, stamp s) +{ + return bounds_add(get_info(v)->sinks,sink,s); +} + +jcoll st_get_tlb_cache(setst_var v) +{ + return get_info(v)->tlb_cache; +} + +void st_set_tlb_cache(setst_var v, jcoll j) +{ + get_info(v)->tlb_cache = j; +} + +void st_clear_tlb_cache(setst_var v) +{ + get_info(v)->tlb_cache = NULL; +} + +gen_e st_get_ub_proj(setst_var v, get_proj_fn_ptr get_proj) +{ + return get_proj(st_get_sinks(v)); +} +static setst_var neq_temp; +static bool neq (const setst_var v2) +{ + return (!(st_get_stamp (neq_temp) == st_get_stamp (v2))); +} +void st_repair_bounds(setst_var v1) +{ + setst_var_list lbs; + neq_temp = v1; + lbs = setst_var_list_filter2(st_get_lbs(v1),neq); + + bounds_set(get_info(v1)->lbs,(gen_e_list)lbs); +} + +void st_set_path_pos(setst_var v, int pos) +{ + get_info(v)->path_pos = pos; +} + +int st_get_path_pos(setst_var v) +{ + return get_info(v)->path_pos; +} + +void st_set_seen(setst_var v, bool b) +{ + get_info(v)->seen = b; +} + +bool st_get_seen(setst_var v) +{ + return get_info(v)->seen; +} + +void st_set_src_sz(setst_var v, int size) +{ + get_info(v)->src_sz = size; +} + +int st_get_src_sz(setst_var v) +{ + return get_info(v)->src_sz; +} + +void st_set_snk_sz(setst_var v, int size) +{ + get_info(v)->snk_sz = size; +} + +int st_get_snk_sz(setst_var v) +{ + return get_info(v)->snk_sz; +} + + + + + + diff --git a/libbanshee/engine/setst-var.h b/libbanshee/engine/setst-var.h new file mode 100644 index 00000000000..6ef2f84a005 --- /dev/null +++ b/libbanshee/engine/setst-var.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef SETST_VAR_H +#define SETST_VAR_H + +#include "linkage.h" +#include "banshee.h" +#include "jcollection.h" + +EXTERN_C_BEGIN + +typedef struct setst_var *setst_var; + +DECLARE_LIST(setst_var_list,setst_var) + +bool st_eq(setst_var v1, setst_var v2); +setst_var st_fresh(region r, const char *name); +setst_var st_fresh_large(region r, const char *name); +setst_var st_fresh_small(region r, const char *name); +stamp st_get_stamp(setst_var v); +const char *st_get_name(setst_var v); +void st_unify(setst_var v,setst_var_list vars); +setst_var_list st_get_lbs(setst_var v); +gen_e_list st_get_sources(setst_var v); +gen_e_list st_get_sinks(setst_var v); +gen_e st_get_ub_proj(setst_var v, get_proj_fn_ptr get_proj); +bool st_add_lb(setst_var v, setst_var lb); +bool st_add_source(setst_var v, gen_e source, stamp s); +bool st_add_sink(setst_var v, gen_e sink, stamp s); + +void st_set_path_pos(setst_var v, int pos); +int st_get_path_pos(setst_var v); +void st_set_seen(setst_var v, bool b); +bool st_get_seen(setst_var v); +void st_set_src_sz(setst_var v, int size); +int st_get_src_sz(setst_var v); +void st_set_snk_sz(setst_var v, int size); +int st_get_snk_sz(setst_var v); + +jcoll st_get_tlb_cache(setst_var v); +void st_set_tlb_cache(setst_var v, jcoll j); +void st_clear_tlb_cache(setst_var v); + +void st_repair_bounds(setst_var v); + +EXTERN_C_END + +#endif /* SETST_VAR_H */ + diff --git a/libbanshee/engine/stamp.c b/libbanshee/engine/stamp.c new file mode 100644 index 00000000000..8a1f5a95faf --- /dev/null +++ b/libbanshee/engine/stamp.c @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <stdio.h> +#include "stamp.h" +#include "util.h" +#include "hash.h" +#include "list.h" +#define INITIAL_SIZE 32 +#define INITIAL1 0 +#define INITIAL2 2000 +#define INITIAL3 536870911 +#define MIN -1073741824 /* -2^30 */ +#define MAX 1073741824 /* 2^30 */ + +static hash_table str_hash; +static region str_hash_rgn; + +static int count1 = INITIAL1, count2 = INITIAL2, count3 = INITIAL3; +static int bounds1 = MIN, bounds2 = 536870911, bounds3 = MAX; + +static inline stamp check1(int i) +{ + if (i <= bounds1) + fail ("Unable to create stamp with small index\n"); + return i; +} + +static inline stamp check2(int i) +{ + if (i > bounds2) + fail ("Unable to create a stamp with regular index\n"); + return i; +} + +static inline stamp check3(int i) +{ + if (i >= bounds3) + fail ("Unable to create a stamp with large index\n"); + return i; +} + +stamp stamp_fresh(void) +{ + return (check2(++count2)); +} + +stamp stamp_fresh_small(void) +{ + return (check1(--count1)); +} + +stamp stamp_fresh_large(void) +{ + return (check3(++count3)); +} + +stamp stamp_string(const char *str) deletes +{ + long st; + assert(str_hash != NULL); + + if (! hash_table_lookup(str_hash,(hash_key)str, (void *)(char *) &st)) + { + st = stamp_fresh(); + (void)hash_table_insert(str_hash,(hash_key)str,(hash_data) st); + } + return st; +} + +void stamp_reset(void) deletes +{ + count1 = INITIAL1; + count2 = INITIAL2; + count3 = INITIAL3; + hash_table_reset(str_hash); + deleteregion_ptr(&str_hash_rgn); +} + + + +void stamp_init(void) +{ + str_hash_rgn = newregion(); + str_hash = make_string_hash_table(str_hash_rgn,INITIAL_SIZE,FALSE); + +} +#if 0 +const char *stamp_to_str(region r,stamp st) +{ + return inttostr(r,st); +} +#endif diff --git a/libbanshee/engine/stamp.h b/libbanshee/engine/stamp.h new file mode 100644 index 00000000000..52c55598327 --- /dev/null +++ b/libbanshee/engine/stamp.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef STAMP_H +#define STAMP_H + +#include <regions.h> +#include "linkage.h" + +EXTERN_C_BEGIN + +typedef long stamp; + +stamp stamp_fresh(void); +stamp stamp_fresh_small(void); +stamp stamp_fresh_large(void); + +stamp stamp_string(const char *) deletes; + +const char *stamp_to_str(region r,stamp st); + +void stamp_reset(void) deletes; +void stamp_init(void); + +EXTERN_C_END + +#endif /* STAMP_H */ + + + diff --git a/libbanshee/engine/term-sort.c b/libbanshee/engine/term-sort.c new file mode 100644 index 00000000000..7503f296338 --- /dev/null +++ b/libbanshee/engine/term-sort.c @@ -0,0 +1,291 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <regions.h> +#include <assert.h> +#include <ansidecl.h> +#include "term-sort.h" + +struct term_constant_ /* extends gen_e */ +{ +#ifdef NONSPEC + sort_kind sort; +#endif + int type; + stamp st; + char *name; +}; + +typedef struct term_constant_ *term_constant_; + +region term_sort_region; +term_hash term_sort_hash; +bool flag_occurs_check = FALSE; + +struct term_stats term_stats; + +stamp term_get_stamp(gen_e e) +{ + if ( ((gen_term)e)->type == VAR_TYPE ) + return ((gen_term)term_get_ecr(e))->st; + else + return ((gen_term)e)->st; +} + +gen_e term_fresh(const char *name) +{ + term_stats.fresh++; + return (gen_e)tv_fresh(term_sort_region,name); +} + +gen_e term_fresh_large(const char *name) +{ + term_stats.fresh_large++; + return (gen_e)tv_fresh_large(term_sort_region,name); +} + +gen_e term_fresh_small(const char *name) +{ + term_stats.fresh_small++; + return (gen_e)tv_fresh_small(term_sort_region,name); +} + + +#ifdef NONSPEC +static struct gen_term zero = {ZERO_TYPE,term_sort,ZERO_TYPE}; +static struct gen_term one = {ONE_TYPE,term_sort,ONE_TYPE}; +#else +static struct gen_term zero = {ZERO_TYPE,ZERO_TYPE}; +static struct gen_term one = {ONE_TYPE,ONE_TYPE}; +#endif /* NONSPEC */ + +gen_e term_zero(void) +{ + return (gen_e)&zero; +} + +gen_e term_one(void) +{ + return (gen_e)&one; +} + + +gen_e term_constant(const char *str) +{ + stamp st[2]; + gen_e result; + char *name = rstrdup(term_sort_region,str); + + assert (str != NULL); + + st[0] = CONSTANT_TYPE; + st[1] = stamp_string(name); + + if ( (result = term_hash_find(term_sort_hash,st,2)) == NULL) + { + term_constant_ c = ralloc(term_sort_region, struct term_constant_); + c->type = CONSTANT_TYPE; + c->st = stamp_fresh(); + c->name = name; + + result = (gen_e) c; + term_hash_insert(term_sort_hash,result,st,2); + + return result; + } + + else + { + return result; + } + +} + +static bool term_is_bottom(gen_e e) +{ + return (term_is_zero(e) || term_is_var(e)); +} + +bool term_is_zero(gen_e e) +{ + return ( ((gen_term)term_get_ecr(e))->type == ZERO_TYPE); +} + +bool term_is_one(gen_e e) +{ + return ( ((gen_term)term_get_ecr(e))->type == ONE_TYPE); +} + +bool term_is_var(gen_e e) +{ + return ( ((gen_term)term_get_ecr(e))->type == VAR_TYPE); +} + +bool term_is_constant(gen_e e) +{ + return ( ((gen_term)term_get_ecr(e))->type == CONSTANT_TYPE); +} + +char *term_get_constant_name(gen_e e) +{ + gen_e ecr = term_get_ecr(e); + if(! term_is_constant(ecr)) + return NULL; + else + return ((term_constant_)ecr)->name; +} + +gen_e term_get_ecr(gen_e e) +{ + if (((gen_term)e)->type == VAR_TYPE) + return tv_get_ecr((term_var)e); + else return e; +} + +static void fire_pending(term_var v, gen_e e, + con_match_fn_ptr con_match, + occurs_check_fn_ptr occurs) +{ + gen_e_list_scanner scan; + gen_e temp; + + gen_e_list_scan(tv_get_pending(v),&scan); + while (gen_e_list_next(&scan,&temp)) + { + term_unify(con_match,occurs,temp,e); + } +} + +static bool eq(gen_e e1, gen_e e2) +{ + return term_get_ecr(e1) == term_get_ecr(e2); +} + +void term_unify(con_match_fn_ptr con_match, occurs_check_fn_ptr occurs, + gen_e a, gen_e b) +{ + gen_e e1 = term_get_ecr(a), + e2 = term_get_ecr(b); + + if ( eq(e1,e2) ) + { + return; + } + if (term_is_constant(e1) && term_is_constant(e2)) + { + failure("Inconsistent system of constraints\n"); + } + else if (term_is_var(e1)) + { + term_var v = (term_var)e1; + + + if (! term_is_bottom(e2)) + fire_pending(v,e2,con_match,occurs); + + if (term_is_var(e2)) + tv_unify_vars(v,(term_var)e2); + else /* v = e2, e2 is not a var */ + { + if (occurs(v,e2)) + failure("Unify terms: occurs check failed\n"); + tv_unify(v,e2); + } + } + else if (term_is_var(e2)) + { + term_var v = (term_var)e2; + + if (! term_is_bottom(e2)) + fire_pending(v,e1,con_match,occurs); + + /* v = e1, e1 is not a var */ + if (occurs(v,e1)) + failure("Unify terms: occurs check failed\n"); + tv_unify(v,e1); + + } + else con_match(e1,e2); +} + +void term_cunify(con_match_fn_ptr con_match, occurs_check_fn_ptr occurs, + gen_e e1, gen_e e2) +{ + if (term_is_bottom(e1) && term_is_var(e1)) + { + term_var v1 = (term_var)e1; + tv_add_pending(v1,e2); + } + else + { + term_unify(con_match,occurs,e1,e2); + } +} + +static void term_reset_stats(void) +{ + term_stats.fresh = 0; + term_stats.fresh_small = 0; + term_stats.fresh_large = 0; +} + +void term_print_stats(FILE *f) +{ + fprintf(f,"\n========== Term Var Stats ==========\n"); + fprintf(f,"Fresh : %d\n",term_stats.fresh); + fprintf(f,"Fresh Small : %d\n",term_stats.fresh_small); + fprintf(f,"Fresh Large : %d\n",term_stats.fresh_large); + fprintf(f,"=====================================\n"); +} + +/* TODO */ +void term_print_constraint_graph(FILE *f ATTRIBUTE_UNUSED) +{ +} + +void term_init(void) +{ + term_sort_region = newregion(); + term_sort_hash = make_term_hash(term_sort_region); +} + +void term_reset(void) +{ + term_hash_delete(term_sort_hash); + deleteregion_ptr(&term_sort_region); + + term_reset_stats(); + + term_sort_region = newregion(); + term_sort_hash = make_term_hash(term_sort_region); +} + + + diff --git a/libbanshee/engine/term-sort.h b/libbanshee/engine/term-sort.h new file mode 100644 index 00000000000..21a5dc5cff1 --- /dev/null +++ b/libbanshee/engine/term-sort.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef TERM_SORT_H +#define TERM_SORT_H + +#include <stdio.h> +#include "banshee.h" +#include "termhash.h" +#include "term-var.h" + +EXTERN_C_BEGIN + +extern bool flag_occurs_check; +extern region term_sort_region; +extern term_hash term_sort_hash; + +struct gen_term /* extends gen_e */ +{ +#ifdef NONSPEC + const sort_kind sort; +#endif + const int type; + const stamp st; +}; + +typedef struct gen_term *gen_term; + +/* return TRUE if v occurs in e, fals otherwise */ +typedef bool (* occurs_check_fn_ptr) (term_var v, gen_e e); + +stamp term_get_stamp(gen_e e); + +gen_e term_fresh(const char *name); +gen_e term_fresh_large(const char *name); +gen_e term_fresh_small(const char *name); +gen_e term_zero(void); +gen_e term_one(void); +gen_e term_constant(const char *name); + +bool term_is_zero(gen_e e); +bool term_is_one(gen_e e); +bool term_is_var(gen_e e); +bool term_is_constant(gen_e e); + +char *term_get_constant_name(gen_e e); +gen_e term_get_ecr(gen_e e); + +void term_unify(con_match_fn_ptr con_match, occurs_check_fn_ptr occurs, + gen_e e1, gen_e e2); +void term_cunify(con_match_fn_ptr con_match, occurs_check_fn_ptr occurs, + gen_e e1, gen_e e2); + +void term_print_stats(FILE *f); +void term_print_constraint_graph(FILE *f); + +void term_init(void); +void term_reset(void); + +extern struct term_stats term_stats; + +struct term_stats +{ + int fresh; + int fresh_small; + int fresh_large; +}; + +EXTERN_C_END + +#endif /* TERM_SORT_H */ + + + diff --git a/libbanshee/engine/term-var.c b/libbanshee/engine/term-var.c new file mode 100644 index 00000000000..9b8e2059871 --- /dev/null +++ b/libbanshee/engine/term-var.c @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <stdio.h> +#include <regions.h> +#include <assert.h> +#include "ufind.h" +#include "term-var.h" + +DECLARE_UFIND(tv_elt,gen_e) + +DEFINE_UFIND(tv_elt,gen_e) + +DEFINE_LIST(term_var_list,term_var) + +struct term_var +{ +#ifdef NONSPEC + sort_kind sort; +#endif + int type; + stamp st; + gen_e_list pending; + const char *name; + tv_elt elt; +}; + +static term_var make_var(region r, const char *name, stamp st) +{ + term_var result = ralloc(r, struct term_var); + gen_e info = (gen_e) result; + + result->type = VAR_TYPE; + result->st = st; + result->pending = new_gen_e_list(r); + result->name = name ? rstrdup(r,name) : "fv"; + result->elt = new_tv_elt(r,info); + + return result; +} + +term_var tv_fresh(region r, const char *name) +{ + return make_var(r,name,stamp_fresh()); +} + +term_var tv_fresh_small(region r, const char *name) +{ + return make_var(r,name,stamp_fresh_small()); +} + +term_var tv_fresh_large(region r, const char *name) +{ + return make_var(r,name,stamp_fresh_large()); +} + +static term_var tv_get_v_ecr(term_var v) +{ + term_var ecr = (term_var)tv_get_ecr(v); + assert (ecr->type == VAR_TYPE); /* this is a hack, but should be ok */ + + return ecr; +} + +const char *tv_get_name(term_var v) +{ + return tv_get_v_ecr(v)->name; +} + +gen_e_list tv_get_pending(term_var v) +{ + return tv_get_v_ecr(v)->pending; +} + +void tv_add_pending(term_var v,gen_e e) +{ + gen_e_list_cons(e,tv_get_v_ecr(v)->pending); +} + +void tv_unify(term_var v, gen_e e) +{ + tv_elt_update(v->elt,e); + + assert(tv_get_ecr(v) == e); +} + +static gen_e tv_combine(gen_e e1, gen_e e2) +{ + term_var v1 = (term_var)e1, + v2 = (term_var)e2; + + if (! (v1 == v2) ) + gen_e_list_append(tv_get_pending(v1), tv_get_pending(v2)); + + return e1; +} + +void tv_unify_vars(term_var v1, term_var v2) +{ + tv_elt_unify(tv_combine,v1->elt, v2->elt); +} + +gen_e tv_get_ecr(term_var v) +{ + return tv_elt_get_info(v->elt); +} diff --git a/libbanshee/engine/term-var.h b/libbanshee/engine/term-var.h new file mode 100644 index 00000000000..1ca035ff8c6 --- /dev/null +++ b/libbanshee/engine/term-var.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef TERM_VAR_H +#define TERM_VAR_H + +#include "linkage.h" +#include "banshee.h" + +EXTERN_C_BEGIN + +typedef struct term_var *term_var; + +DECLARE_LIST(term_var_list,term_var) + +term_var tv_fresh(region r, const char *name); +term_var tv_fresh_small(region r, const char *name); +term_var tv_fresh_large(region r, const char *name); + +const char *tv_get_name(term_var v); + +gen_e_list tv_get_pending(term_var v); +void tv_add_pending(term_var v,gen_e e); + +void tv_unify(term_var v, gen_e e); +void tv_unify_vars(term_var v1, term_var v2); + +gen_e tv_get_ecr(term_var v); + + +EXTERN_C_END + +#endif /* TERM_VAR_H */ diff --git a/libbanshee/engine/termhash.c b/libbanshee/engine/termhash.c new file mode 100644 index 00000000000..b42f9ba7304 --- /dev/null +++ b/libbanshee/engine/termhash.c @@ -0,0 +1,262 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <string.h> +#include "termhash.h" +#include "hash.h" +#include "termhash.h" +#include "util.h" + +#define UB(n) ((1<<n)-1) +#define CAP(n) (1<<n) +#define INITIAL_TABLE_SIZE 8 /* the initial table size is 2^8 */ + +/* An individual entry in the table consists of an array of stamps */ +/* (same arity as the expr's constructor) in addition to the expr */ +/* itself. */ +typedef struct hash_entry *hash_entry; + +/* A term_bucket is a list of entries (an list of exprs that have */ +/* collided after hashing) */ +typedef struct term_bucket *term_bucket; + +struct hash_entry +{ + int length; + stamp *stamps; + gen_e e; +}; + +struct term_bucket +{ + hash_entry entry; + struct term_bucket *next; +}; + +#define scan_term_bucket(b,var) for(var = b; var; var = var->next) + +/* size: initial_table_size + number of rehashes */ +/* capacity: 2^size (for array size) */ +/* ub: 2^size-1 (for array indexing) */ +/* inserts: num of elements inserted into the array */ +struct term_hash +{ + term_bucket * term_buckets; + region rgn; + int ub; + int size; + int capacity; + int inserts; +}; + +static int hash(int ub, stamp stamps[], int len); +static void post_insert(term_hash tab) deletes; +static void rehash(term_hash tab) deletes; +static void reinsert(term_hash tab, term_bucket b); +static void insert(term_hash tab, gen_e e, stamp * stamps, int len); +static void insert_entry(term_hash tab, struct hash_entry *entry); +static gen_e walk(term_bucket b, stamp * stamps, int len); + +static const int primes[] = + { 83, 1789, 5189, 5449, 5659, 6703, 7517, 7699, 8287, 8807, 9067, 9587, + 10627, 10939, 11239}; +/* +static const int prime_1 = 83; +static const int prime_2 = 1789; +*/ +static const int initial_table_size = INITIAL_TABLE_SIZE; + +term_hash make_term_hash(region rgn) +{ + int ub, n; + int i; + + region r; + + term_hash tab = ralloc(rgn, struct term_hash); + + r = newregion(); + ub = UB(initial_table_size); + n = CAP(initial_table_size); + + + tab->term_buckets = rarrayalloc(r, n, term_bucket); + + for (i = 0; i < n; i++) + { + tab->term_buckets[i] = NULL; + } + + tab->rgn = r; + tab->ub = ub; + tab->size = initial_table_size; + tab->capacity = n; + tab->inserts = 0; + return tab; +} + +void term_hash_delete(term_hash tab) deletes +{ + deleteregion(tab->rgn); +} + +gen_e term_hash_find(term_hash tab, stamp stamps[], int len) +{ + int hash_val; + + term_bucket b; + hash_val = hash(tab->ub, stamps, len); + b = tab->term_buckets[hash_val]; + return walk(b, stamps, len); +} + +static gen_e walk(term_bucket b, stamp stamps[], int len) +{ + term_bucket cur; + scan_term_bucket(b,cur) + { + if (len == cur->entry->length + && (memcmp(stamps, cur->entry->stamps, sizeof(int)*len) == 0) ) + return cur->entry->e; + } + return NULL; +} + +/* Should call t_hash_find to see if a gen_e with the given stamp */ +/* signature is already in the table. If so, insert should return */ +/* true and do nothing. */ +bool term_hash_insert(term_hash tab, gen_e e, stamp * stamps, int len) deletes +{ + if (term_hash_find(tab, stamps, len) != NULL) + { + return TRUE; + } + insert(tab, e, stamps, len); + post_insert(tab); + return FALSE; +} + + +/* Insert an expression e represented by the given stamp array into */ +/* the hash table. */ +static void insert(term_hash tab, gen_e e, stamp stamps[], int len) +{ + hash_entry entry; + stamp * stamp_cpy; + int i; + + + entry = ralloc(tab->rgn, struct hash_entry); + + stamp_cpy = rarrayalloc(tab->rgn, len, stamp); + for (i = 0; i < len; i++) + { + stamp_cpy[i] = stamps[i]; + } + + entry->length = len; + entry->stamps = stamp_cpy; + entry->e = e; + insert_entry(tab, entry); +} + +static void insert_entry(term_hash tab, hash_entry entry) +{ + int hash_val; + + term_bucket b, new_term_bucket; + hash_val = hash(tab->ub, entry->stamps, entry->length); + b = tab->term_buckets[hash_val]; + new_term_bucket = ralloc(tab->rgn, struct term_bucket); + + new_term_bucket->entry = entry; + new_term_bucket->next = b; + tab->term_buckets[hash_val] = new_term_bucket; +} + +static void post_insert(term_hash tab) deletes +{ + if (tab->capacity == ++tab->inserts) + { + rehash(tab); + } +} + +/* Double the size of the hash table and reinsert all of the elements. */ +static void rehash(term_hash tab) deletes +{ + region old_rgn; + term_bucket * old_term_buckets; + int i; + int old_table_size = tab->capacity; + + old_term_buckets = tab->term_buckets; + tab->capacity *= 2; + tab->ub = UB(++tab->size); + old_rgn = tab->rgn; + tab->rgn = newregion(); + + + tab->term_buckets = rarrayalloc(tab->rgn, tab->capacity, term_bucket); + for (i = 0; i < old_table_size; i++) + { + if (old_term_buckets[i] != NULL && old_term_buckets[i]->entry != NULL) + reinsert(tab, old_term_buckets[i]); + } + + deleteregion(old_rgn); + + +} + +static void reinsert(term_hash tab, term_bucket b) +{ + term_bucket cur; + scan_term_bucket(b,cur) + insert(tab, cur->entry->e, cur->entry->stamps, cur->entry->length); +} + +static int hash(int ub, stamp stamps[], int len) +{ + int i, n; + + n = 0; + for (i = 0; i < len; i++) + { + n = (n + (primes[i % 15] * abs(stamps[i]))) & ub; + } + return n; +} + + + + + + diff --git a/libbanshee/engine/termhash.h b/libbanshee/engine/termhash.h new file mode 100644 index 00000000000..777adc3fd16 --- /dev/null +++ b/libbanshee/engine/termhash.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef TERMHASH_H +#define TERMHASH_H + +#include "banshee.h" +#include "stamp.h" +#include "bool.h" + +typedef struct term_hash *term_hash; + +term_hash make_term_hash(region r); + +gen_e term_hash_find(term_hash h, stamp *st,int length); + +bool term_hash_insert(term_hash h, gen_e e, stamp *st, int length) deletes; + +void term_hash_delete(term_hash h) deletes; + +#endif /* TERMHASH_H */ diff --git a/libbanshee/engine/ufind.c b/libbanshee/engine/ufind.c new file mode 100644 index 00000000000..545171812bb --- /dev/null +++ b/libbanshee/engine/ufind.c @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <stdio.h> +#include "ufind.h" +#include "assert.h" + + +enum uf_type {uf_ecr,uf_link}; +typedef enum uf_type uf_type; + +struct uf_element +{ + uf_type type; + int rank; + void *info; + struct uf_element *link; +}; + +struct uf_element *new_uf_element(region r, void *info) +{ + struct uf_element *result; + + result = ralloc(r, struct uf_element); + + result->type = uf_ecr; + result->rank = 0; + result->info = info; + result->link = NULL; + + return result; +} + +static struct uf_element *find(struct uf_element *e) +{ + + if (e->type == uf_ecr) + return e; + + else if (e->link->type == uf_link) + { + struct uf_element *temp = e->link; + + e->link = e->link->link; + + return find(temp); + } + + else + return e->link; +} + +bool uf_union(struct uf_element *a, struct uf_element *b) +{ + struct uf_element *e1 = find(a); + struct uf_element *e2 = find(b); + + if ( e1 == e2 ) + return FALSE; + + else if (e1->rank < e2->rank) + { + e1->type = uf_link; + e1->link = e2; + + return TRUE; + } + + else if (e1->rank > e2->rank) + { + e2->type = uf_link; + e2->link = e1; + + return TRUE; + } + + else + { + e2->rank++; + + e1->type = uf_link; + e1->link = e2; + + return TRUE; + } + +} + +bool uf_unify(combine_fn_ptr combine, + struct uf_element *a, struct uf_element *b) +{ + struct uf_element *e1 = find(a); + struct uf_element *e2 = find(b); + + if ( e1 == e2 ) + return FALSE; + + else if (e1->rank < e2->rank) + { + e2->info = combine(e2->info,e1->info); + e1->type = uf_link; + e1->link = e2; + + return TRUE; + } + + else if (e1->rank > e2->rank) + { + e1->info = combine(e1->info,e2->info); + e2->type = uf_link; + e2->link = e1; + + return TRUE; + } + + else + { + e2->info = combine(e2->info, e1->info); + + e2->rank++; + e1->type = uf_link; + e1->link = e2; + + return TRUE; + } +} + + + +void *uf_get_info(struct uf_element *e) +{ + return find(e)->info; +} + + +bool uf_eq(struct uf_element *e1,struct uf_element *e2) +{ + return (find(e1) == find(e2)); +} + +void uf_update(struct uf_element *e,uf_info i) +{ + find(e)->info = i; +} + + + + + + diff --git a/libbanshee/engine/ufind.h b/libbanshee/engine/ufind.h new file mode 100644 index 00000000000..5068530114e --- /dev/null +++ b/libbanshee/engine/ufind.h @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef UFIND_H +#define UFIND_H + +#include <regions.h> +#include "linkage.h" +#include "bool.h" + +EXTERN_C_BEGIN + +struct uf_element; + +typedef struct uf_element *uf_element; +typedef void *uf_info; + +typedef uf_info (*combine_fn_ptr)(uf_info,uf_info); + +struct uf_element *new_uf_element(region r,uf_info i); +uf_info uf_get_info(struct uf_element *); +bool uf_unify(combine_fn_ptr,struct uf_element *,struct uf_element *); +bool uf_union(struct uf_element *,struct uf_element *); +bool uf_eq(struct uf_element *,struct uf_element *); +void uf_update(struct uf_element *,uf_info i); + +#define DECLARE_UFIND(name,type) \ +typedef struct name *name; \ +typedef type (* name ## _combine_fn_ptr)(type info1,type info2); \ +name new_ ## name(region r, type info); \ +type name ## _get_info(name); \ +bool name ## _unify(name ## _combine_fn_ptr,name e1, name e2); \ +bool name ## _union(name e1, name e2); \ +bool name ## _eq(name e1, name e2); \ +void name ## _update(name e1, type info); + +#define DEFINE_UFIND(name,type) \ +name new_ ## name(region r, type info) \ +{ \ + return (name)new_uf_element(r,info);\ +}\ +type name ## _get_info(name elem) \ +{ \ + return (type)uf_get_info((struct uf_element *)elem);\ +} \ +bool name ## _unify(name ## _combine_fn_ptr cmb,name e1, name e2) \ +{ \ + return uf_unify((combine_fn_ptr)cmb,(struct uf_element *)e1,(struct uf_element *)e2); \ +} \ +bool name ## _union(name e1, name e2) \ +{ \ + return uf_union((struct uf_element *)e1,(struct uf_element *)e2); \ +}\ +bool name ## _eq(name e1, name e2) \ +{ \ + return uf_eq((struct uf_element *)e1,(struct uf_element *)e2); \ +} \ +void name ##_update(name e1, type info) \ +{ \ + uf_update((struct uf_element *)e1,(uf_info)info); \ +} \ + +EXTERN_C_END + +#endif /* UFIND_H */ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/libbanshee/engine/util.c b/libbanshee/engine/util.c new file mode 100644 index 00000000000..8989956b70a --- /dev/null +++ b/libbanshee/engine/util.c @@ -0,0 +1,201 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <ctype.h> +#include <math.h> +#include <signal.h> +#include <stdio.h> +#include <stdarg.h> +#include <string.h> +#include <unistd.h> +#include "buffer.h" +#include "util.h" + +/* Panic with a message */ +static void vfail(const char *fmt, va_list args) __attribute__((__noreturn__)); + +static void vfail(const char *fmt, va_list args) +{ + vfprintf(stderr, fmt, args); + fflush(stdin); + fflush(stderr); + fflush(stdout); + sync(); + fsync(STDIN_FILENO); + fsync(STDERR_FILENO); + fsync(STDOUT_FILENO); + abort(); + while (1); /* Work around stupid gcc-2.96-85 bug */ +} + +/* Panic with a nice message */ +void __fail(const char *file, unsigned int line, + const char *func __attribute__((unused)), + const char *fmt, ...) +{ + va_list args; + va_start(args, fmt); + fprintf(stderr, "\n%s:%u ", file, line); + vfail(fmt, args); +} + +#ifndef HAVE_VARIADIC_MACROS +/* Panic with a not-quite-as-nice message */ +void fail(const char *fmt, ...) +{ + va_list args; + va_start(args, fmt); + vfail(fmt, args); +} +#endif + +void failure(const char *message) +{ + fprintf(stderr,message); + exit(1); +} + +/* Concatenate 2 strings, allocating space in r for the result */ +char *rstrcat(region r, const char *s1, const char *s2) +{ + char *result = rarrayalloc(r, strlen(s1)+strlen(s2)+1, char); + result[0] = '\0'; + strcat(result, s1); + strcat(result, s2); + return result; +} + +/* Concatenate n strings, allocating space in r for the result. The + last argument should be a null pointer. */ +char *rstrscat(region r, ...) +{ + char *result; + int len = 0; + const char *s; + va_list args; + + va_start(args, r); + while ((s = va_arg(args, const char *))) + len += strlen(s); + result = rarrayalloc(r, len+1, char); + result[0] = '\0'; + + va_start(args, r); + while ((s = va_arg(args, const char *))) + strcat(result, s); + + return result; +} +#if 0 +/* Convert an integer to a string, storing the result in r */ +const char *inttostr(region r, int i) +{ + char *result; + int width; + + if (i == 0) + width = 1; + else + width = (int) (floor(log10(abs((double) i))) + 1); + if (i<0) width++; + + printf("i=%d, width=%d\n", i, width); + assert(width >0); + + result = rarrayalloc(r, width + 1, char); + if (snprintf(result, width + 1, "%d", i) == -1) { + printf("i=%d, width=%d\n", i, width); + fail ("inttostr width wrong\n"); + } + return result; +} +#endif + +/* sprintf a string, allocating space in r for the result */ +char *rsprintf(region r, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + return rvsprintf(r, fmt, args); +} + +char *rvsprintf(region r, const char *fmt, va_list args) +{ + growbuf buf = growbuf_new(r, 100); + gvprintf(buf, fmt, args); + return growbuf_contents(buf); +} + +/* Space for the ASCII representation of a pointer -- 2 hex chars per + byte, plus 3 chars for 0x prefix and trailing \0 */ +#define PTR_ASCII_SIZE ((int) (3 + sizeof(void *)*2)) + +/* Convert a pointer to an ascii string with leading 0x. Re-uses + internal buffer. */ +char *ptr_to_ascii(void *ptr) { + static char addr[PTR_ASCII_SIZE]; + int nchars; + + nchars = snprintf(addr, PTR_ASCII_SIZE, "%p", ptr); + if (nchars == -1 || nchars >= PTR_ASCII_SIZE) + fail("Unable to convert ptr to ascii (need %d bytes, have %d)\n", + nchars, PTR_ASCII_SIZE); + return addr; +} + +/* Convert a pointer to an integer */ +long ptr_hash(void *ptr) +{ + return (long) ptr; +} + +/* Return TRUE iff ptr1 == ptr2 */ +bool ptr_eq(void *ptr1, void *ptr2) +{ + return ptr1 == ptr2; +} + +/* Return TRUE iff s1 == s2 */ +bool str_eq(const char *s1, const char *s2) +{ + return (strcmp(s1, s2) == 0); +} + +/* A total ordering on pointers. Returns 0 if ptr1 = ptr2, a value <0 + if ptr1 < ptr2, or a value >0 if ptr1 > ptr2. */ +int ptr_cmp(const void *ptr1, const void *ptr2) +{ + return (char *) ptr1 - (char *) ptr2; +} + +int min(int a, int b) { if (a < b) return a; else return b; } +int max(int a, int b) { if (a < b) return b; else return a; } +/* int abs(int a) { if (a < 0) return -a; else return a; } */ diff --git a/libbanshee/engine/util.h b/libbanshee/engine/util.h new file mode 100644 index 00000000000..1d13147da08 --- /dev/null +++ b/libbanshee/engine/util.h @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2000-2001 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef UTIL_H +#define UTIL_H + +#include <assert.h> +#include <stdarg.h> +#include <stdlib.h> +#include <regions.h> +#include "compiler.h" +#include "linkage.h" +#include "bool.h" + + +EXTERN_C_BEGIN + +#ifdef HAVE_VARIADIC_MACROS +#define fail(args...) __fail(__FILE__, __LINE__, __FUNCTION__, args) +#else +void fail(const char *fmt, ...); +#endif + +void __fail(const char *file, unsigned int line, + const char *func, const char *fmt, ...) __attribute__ ((__noreturn__)); + + +/* insist(action) is like assert(action), but action may have + side-effects */ +#ifdef NDEBUG +# define insist(action) (action) +#else +# define insist assert +#endif + +#ifdef NDEBUG +# define insistnot(action) (action) +#else +# define insistnot(action) assert(!(action)) +#endif + +void failure(const char *message); + +/* Concatenate 2 strings, allocating space in r for the result */ +char *rstrcat(region, const char *, const char *); + +/* Concatenate n strings, allocating space in r for the result. The + last argument should be a null pointer. */ +char *rstrscat(region, ...); + +/* Convert an integer to a string, storing the result in r */ +const char *inttostr(region r, int); + +/* sprintf a string, allocating space in r for the result */ +char *rsprintf(region r, const char *fmt, ...); +char *rvsprintf(region r, const char *fmt, va_list args); + +/* Convert a pointer to an ascii string with leading 0x. Re-uses + internal buffer. */ +char *ptr_to_ascii(void *ptr); + +/* Convert a pointer to an integer */ +long ptr_hash(void *ptr); + +/* Return TRUE iff ptr1 == ptr2 */ +bool ptr_eq(void *ptr1, void *ptr2); + +/* Return TRUE iff s1 == s2 */ +bool str_eq(const char *s1, const char *s2); + +/* A total ordering on pointers. Returns 0 if ptr1 = ptr2, a value <0 + if ptr1 < ptr2, or a value >0 if ptr1 > ptr2. */ +int ptr_cmp(const void *ptr1, const void *ptr2); + +extern inline int min(int, int); +extern inline int max(int, int); +extern inline int min(int a, int b) { if (a < b) return a; else return b; } +extern inline int max(int a, int b) { if (a < b) return b; else return a; } +EXTERN_C_END + +#endif |